Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:22:42

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSBSPsAArch64Shared
0007  *
0008  * @brief Boot and system start code.
0009  */
0010 
0011 /*
0012  * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
0013  * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #include <rtems/asm.h>
0038 #include <rtems/score/percpu.h>
0039 
0040 #include <bspopts.h>
0041 
0042   /* Global symbols */
0043   .globl _start
0044   .section ".bsp_start_text", "ax"
0045 
0046 /* Start entry */
0047 
0048 _start:
0049 
0050   /*
0051    * We do not save the context since we do not return to the boot
0052    * loader but preserve x1 and x2 to allow access to bootloader parameters
0053    */
0054 #ifndef BSP_START_NEEDS_REGISTER_INITIALIZATION
0055   mov x5, x1    /* machine type number or ~0 for DT boot */
0056   mov x6, x2    /* physical address of ATAGs or DTB */
0057 #else /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
0058   /*
0059    * This block is dead code. No aarch64 targets require this. It might be
0060    * needed for hardware simulations or in future processor variants with
0061    * lock-step cores.
0062    */
0063   mov x0, XZR
0064   mov x1, XZR
0065   mov x2, XZR
0066   mov x3, XZR
0067   mov x4, XZR
0068   mov x5, XZR
0069   mov x6, XZR
0070   mov x7, XZR
0071   mov x8, XZR
0072   mov x9, XZR
0073   mov x10, XZR
0074   mov x11, XZR
0075   mov x12, XZR
0076   mov x13, XZR
0077   mov x14, XZR
0078   mov x15, XZR
0079   mov x16, XZR
0080   mov x17, XZR
0081   mov x18, XZR
0082   mov x19, XZR
0083   mov x20, XZR
0084   mov x21, XZR
0085   mov x22, XZR
0086   mov x23, XZR
0087   mov x24, XZR
0088   mov x25, XZR
0089   mov x26, XZR
0090   mov x27, XZR
0091   mov x28, XZR
0092   mov x29, XZR
0093   mov x30, XZR
0094 #ifdef AARCH64_MULTILIB_VFP
0095   mov CPTR_EL3, XZR
0096   mov CPTR_EL2, XZR
0097   mov d0, XZR
0098   mov d1, XZR
0099   mov d2, XZR
0100   mov d3, XZR
0101   mov d4, XZR
0102   mov d5, XZR
0103   mov d6, XZR
0104   mov d7, XZR
0105   mov d8, XZR
0106   mov d9, XZR
0107   mov d10, XZR
0108   mov d11, XZR
0109   mov d12, XZR
0110   mov d13, XZR
0111   mov d14, XZR
0112   mov d15, XZR
0113   mov d16, XZR
0114   mov d17, XZR
0115   mov d18, XZR
0116   mov d19, XZR
0117   mov d20, XZR
0118   mov d21, XZR
0119   mov d22, XZR
0120   mov d23, XZR
0121   mov d24, XZR
0122   mov d25, XZR
0123   mov d26, XZR
0124   mov d27, XZR
0125   mov d28, XZR
0126   mov d29, XZR
0127   mov d30, XZR
0128   mov d31, XZR
0129 #endif /* AARCH64_MULTILIB_VFP */
0130 #endif /* BSP_START_NEEDS_REGISTER_INITIALIZATION */
0131 
0132   /* Initialize SCTLR_EL1 */
0133   mov x0, XZR
0134 #if defined(RTEMS_DEBUG)
0135   /* Enable Stack alignment checking */
0136   orr x0, x0, #(1<<3)
0137 #endif
0138   msr SCTLR_EL1, x0
0139 
0140 #if defined(BSP_START_ENABLE_EL2_START_SUPPORT) || \
0141   defined(BSP_START_ENABLE_EL3_START_SUPPORT)
0142   mrs x0, CurrentEL
0143   cmp x0, #(1<<2)
0144   b.eq .L_el1_start
0145 #endif
0146 
0147 #if defined(BSP_START_ENABLE_EL3_START_SUPPORT)
0148   cmp x0, #(2<<2)
0149   b.eq .L_el2_start
0150 
0151 .L_el3_start:
0152   /*
0153    * Before leaving the Secure World, we need to initialize the GIC. We
0154    * do that here in an early stack context in EL3. This will NOT work
0155    * on secondary core boot! We assume only the primary boot core will
0156    * start in EL3 if any. Usually on real hardware, we should be running
0157    * on top of trusted firmware and will not boot in EL3. Qemu fakes it
0158    * for us and will start the primary core in EL3 and secondary cores
0159    * will be brought up in EL1NS as expected.
0160    */
0161   #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0162     ldr w1, =_ISR_Stack_size
0163     ldr w2, =_ISR_Stack_area_begin
0164   #else
0165     ldr x1, =_ISR_Stack_size
0166     ldr x2, =_ISR_Stack_area_begin
0167   #endif
0168   add x3, x1, x2
0169   /* using SP0 for the early init stack context at EL3 */
0170   msr spsel, #0
0171   mov sp, x3
0172 
0173   /*
0174    * Invoke the start hook 0.
0175    * We don't set up exception handling, so this hook better behave.
0176    */
0177   bl bsp_start_hook_0
0178 
0179   /* Drop from EL3 to EL2 */
0180 
0181   /* Initialize HCR_EL2 and SCTLR_EL2 */
0182   msr HCR_EL2, XZR
0183   msr SCTLR_EL2, XZR
0184   /* Set EL2 Execution state via SCR_EL3 */
0185   mrs x0, SCR_EL3
0186   /* Set EL2 to AArch64 */
0187   orr x0, x0, #(1<<10)
0188   /* Set EL1 to NS */
0189   orr x0, x0, #1
0190   msr SCR_EL3, x0
0191 
0192   /* set EL2h mode for eret */
0193   mov x0, #0b01001
0194   msr SPSR_EL3, x0
0195 
0196   /* Set EL2 entry point */
0197   adr x0, .L_el2_start
0198   msr ELR_EL3, x0
0199   eret
0200 #endif
0201 
0202 #if defined(BSP_START_ENABLE_EL2_START_SUPPORT) || \
0203   defined(BSP_START_ENABLE_EL3_START_SUPPORT)
0204 .L_el2_start:
0205   /* Drop from EL2 to EL1 */
0206 
0207   /* Configure HCR_EL2 */
0208   mrs x0, HCR_EL2
0209   /* Set EL1 Execution state to AArch64 */
0210   orr x0, x0, #(1<<31)
0211   /* Disable ID traps */
0212   bic x0, x0, #(1<<15)
0213   bic x0, x0, #(1<<16)
0214   bic x0, x0, #(1<<17)
0215   bic x0, x0, #(1<<18)
0216   msr HCR_EL2, x0
0217 
0218   /* Set to EL1h mode for eret */
0219   mov x0, #0b00101
0220   msr SPSR_EL2, x0
0221 
0222   /* Set EL1 entry point */
0223   adr x0, .L_el1_start
0224   msr ELR_EL2, x0
0225   eret
0226 
0227 .L_el1_start:
0228 #endif
0229 
0230 #ifdef RTEMS_SMP
0231   bl _AArch64_Get_current_processor_for_system_start
0232 
0233   /*
0234    * Check that this is a configured processor.  If not, then there is
0235    * not much that can be done since we do not have a stack available for
0236    * this processor.  Just loop forever in this case.
0237    */
0238 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0239   ldr w1, =_SMP_Processor_configured_maximum
0240 #else
0241   ldr x1, =_SMP_Processor_configured_maximum
0242 #endif
0243   ldr w1, [x1]
0244   cmp x1, x0
0245   bgt .Lconfigured_processor
0246 .Linvalid_processor_wait_for_ever:
0247   wfe
0248   b .Linvalid_processor_wait_for_ever
0249 .Lconfigured_processor:
0250 
0251   /*
0252    * Get current per-CPU control and store it in PL1 only Thread ID
0253    * Register (TPIDR_EL1).
0254    */
0255 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0256   ldr w1, =_Per_CPU_Information
0257 #else
0258   ldr x1, =_Per_CPU_Information
0259 #endif
0260   add x1, x1, x0, lsl #PER_CPU_CONTROL_SIZE_LOG2
0261   msr TPIDR_EL1, x1
0262 
0263 #endif
0264 
0265   /* Calculate interrupt stack area end for current processor */
0266 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0267   ldr w1, =_ISR_Stack_size
0268 #else
0269   ldr x1, =_ISR_Stack_size
0270 #endif
0271 #ifdef RTEMS_SMP
0272   add x3, x0, #1
0273   mul x1, x1, x3
0274 #endif
0275 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0276   ldr w2, =_ISR_Stack_area_begin
0277 #else
0278   ldr x2, =_ISR_Stack_area_begin
0279 #endif
0280   add x3, x1, x2
0281 
0282   /* Disable interrupts and debug */
0283   msr DAIFSet, #0xa
0284 
0285 #ifdef BSP_START_NEEDS_REGISTER_INITIALIZATION
0286   mov x8, XZR
0287   mov x9, XZR
0288   mov x10, XZR
0289   mov x11, XZR
0290   mov x12, XZR
0291   mov x13, XZR
0292   mov x14, XZR
0293   mov x15, XZR
0294 #endif
0295 
0296   /*
0297    * SPx: the stack pointer corresponding to the current exception level
0298    * Normal operation for RTEMS on AArch64 uses SPx and runs on EL1
0299    * Exception operation (synchronous errors, IRQ, FIQ, System Errors) uses SP0
0300   */
0301 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0302   ldr w1, =bsp_stack_exception_size
0303 #else
0304   ldr x1, =bsp_stack_exception_size
0305 #endif
0306   /* Switch to SP0 and set exception stack */
0307   msr spsel, #0
0308   mov sp, x3
0309   /* Switch back to SPx for normal operation */
0310   msr spsel, #1
0311   sub x3, x3, x1
0312 
0313   /* Set SP1 stack used for normal operation */
0314   mov sp, x3
0315 
0316   /* Stay in EL1 mode */
0317 
0318 #ifdef AARCH64_MULTILIB_VFP
0319 #ifdef AARCH64_MULTILIB_HAS_CPACR
0320   /* Read CPACR */
0321   mrs x0, CPACR_EL1
0322 
0323   /* Enable EL1 access permissions for CP10 */
0324   orr x0, x0, #(1 << 20)
0325 
0326   /* Write CPACR */
0327   msr CPACR_EL1, x0
0328   isb
0329 #endif
0330 
0331   /* FPU does not need to be enabled on AArch64 */
0332 
0333   /* Ensure FPU traps are disabled by default */
0334   mrs x0, FPCR
0335   bic x0, x0, #((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11) | (1 << 12))
0336   bic x0, x0, #(1 << 15)
0337   msr FPCR, x0
0338 
0339 #endif /* AARCH64_MULTILIB_VFP */
0340 
0341   /* Branch to start hook 1 */
0342   bl bsp_start_hook_1
0343 
0344   /* Branch to boot card */
0345   mov x0, #0
0346   bl boot_card