Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:23

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreCPUAArch64
0007  *
0008  * @brief AArch64 architecture context switch implementation.
0009  */
0010 
0011 /*
0012  * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
0013  * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040 
0041 #include <rtems/asm.h>
0042 
0043         .text
0044 
0045 /*
0046  *  void _CPU_Context_switch( run_context, heir_context )
0047  *  void _CPU_Context_restore( run_context, heir_context )
0048  *
0049  *  This routine performs a normal non-FP context.
0050  *
0051  *  X0 = run_context    X1 = heir_context
0052  *
0053  *  This function copies the current registers to where x0 points, then
0054  *  restores the ones from where x1 points.
0055  *
0056  */
0057 
0058 DEFINE_FUNCTION_AARCH64(_CPU_Context_switch)
0059     .globl  _CPU_Context_switch_no_return
0060     .set    _CPU_Context_switch_no_return, _CPU_Context_switch
0061 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0062 /* Sanitize inputs for ILP32 ABI */
0063     mov w0, w0
0064     mov w1, w1
0065   #ifdef RTEMS_SMP
0066     #define reg_2 x2
0067   #else
0068     #define reg_2 w2
0069   #endif
0070 #else
0071 #define reg_2 x2
0072 #endif
0073 
0074 /* Start saving context */
0075     GET_SELF_CPU_CONTROL    reg_2
0076     ldr w3, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
0077 
0078     stp x19, x20, [x0]
0079     stp x21, x22, [x0, #0x10]
0080     stp x23, x24, [x0, #0x20]
0081     stp x25, x26, [x0, #0x30]
0082     stp x27, x28, [x0, #0x40]
0083     stp fp,  lr,  [x0, #0x50]
0084     mov x4,  sp
0085     str x4,  [x0, #0x60]
0086 
0087 #ifdef AARCH64_MULTILIB_VFP
0088     add x5, x0, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
0089     stp d8,  d9,  [x5]
0090     stp d10, d11, [x5, #0x10]
0091     stp d12, d13, [x5, #0x20]
0092     stp d14, d15, [x5, #0x30]
0093 #endif
0094 
0095     str x3, [x0, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
0096 
0097 #ifdef RTEMS_SMP
0098     /*
0099      * The executing thread no longer executes on this processor.  Switch
0100      * the stack to the temporary interrupt stack of this processor.  Mark
0101      * the context of the executing thread as not executing.
0102      */
0103     dmb SY
0104     add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
0105     mov x3, #0
0106     strb    w3, [x0, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
0107 
0108 .L_check_is_executing:
0109 
0110     /* Check the is executing indicator of the heir context */
0111     add x3, x1, #AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
0112     ldaxrb  w4, [x3]
0113     cmp x4, #0
0114     bne .L_get_potential_new_heir
0115 
0116     /* Try to update the is executing indicator of the heir context */
0117     mov x4, #1
0118     stlxrb  w5, w4, [x3]
0119     cmp x5, #0
0120     bne .L_get_potential_new_heir
0121     dmb SY
0122 #endif
0123 
0124 /* Start restoring context */
0125 .L_restore:
0126 #if !defined(RTEMS_SMP) && defined(AARCH64_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE)
0127     clrex
0128 #endif
0129 
0130     ldr x3, [x1, #AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET]
0131 
0132     ldr x4, [x1, #AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE]
0133 
0134 #ifdef AARCH64_MULTILIB_VFP
0135     add x5, x1, #AARCH64_CONTEXT_CONTROL_D8_OFFSET
0136     ldp d8,  d9,  [x5]
0137     ldp d10, d11, [x5, #0x10]
0138     ldp d12, d13, [x5, #0x20]
0139     ldp d14, d15, [x5, #0x30]
0140 #endif
0141 
0142     msr TPIDR_EL0, x3
0143 
0144     str w4, [x2, #PER_CPU_ISR_DISPATCH_DISABLE]
0145 
0146     ldp x19, x20, [x1]
0147     ldp x21, x22, [x1, #0x10]
0148     ldp x23, x24, [x1, #0x20]
0149     ldp x25, x26, [x1, #0x30]
0150     ldp x27, x28, [x1, #0x40]
0151     ldp fp,  lr,  [x1, #0x50]
0152     ldr x4,  [x1, #0x60]
0153     mov sp,  x4
0154     ret
0155 
0156 /*
0157  *  void _CPU_Context_restore( new_context )
0158  *
0159  *  This function restores the registers from where x0 points.
0160  *  It must match _CPU_Context_switch()
0161  *
0162  */
0163 DEFINE_FUNCTION_AARCH64(_CPU_Context_restore)
0164 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0165 /* Sanitize input for ILP32 ABI */
0166     mov w0, w0
0167 #endif
0168 
0169         mov     x1, x0
0170     GET_SELF_CPU_CONTROL    reg_2
0171         b       .L_restore
0172 
0173 #ifdef RTEMS_SMP
0174 .L_get_potential_new_heir:
0175 
0176     /* We may have a new heir */
0177 
0178     /* Read the executing and heir */
0179 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0180     ldr w4, [x2, #PER_CPU_OFFSET_EXECUTING]
0181     ldr w5, [x2, #PER_CPU_OFFSET_HEIR]
0182 #else
0183     ldr x4, [x2, #PER_CPU_OFFSET_EXECUTING]
0184     ldr x5, [x2, #PER_CPU_OFFSET_HEIR]
0185 #endif
0186 
0187     /*
0188      * Update the executing only if necessary to avoid cache line
0189      * monopolization.
0190      */
0191     cmp x4, x5
0192     beq .L_check_is_executing
0193 
0194     /* Calculate the heir context pointer */
0195     sub x4, x1, x4
0196     add x1, x5, x4
0197 
0198     /* Update the executing */
0199 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0200     str w5, [x2, #PER_CPU_OFFSET_EXECUTING]
0201 #else
0202     str x5, [x2, #PER_CPU_OFFSET_EXECUTING]
0203 #endif
0204 
0205     b   .L_check_is_executing
0206 
0207 DEFINE_FUNCTION_AARCH64(_AArch64_Start_multitasking)
0208 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0209     /* Sanitize input for ILP32 ABI */
0210     mov w0, w0
0211 #endif
0212 
0213     mov x1, x0
0214     GET_SELF_CPU_CONTROL    reg_2
0215 
0216     /* Switch the stack to the temporary interrupt stack of this processor */
0217     add sp, x2, #(PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE)
0218 
0219     /* Enable interrupts */
0220     msr DAIFClr, #0x2
0221 
0222     b   .L_check_is_executing
0223 #endif