Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:23

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreCPUAArch64
0007  *
0008  * @brief This source file contains the implementation of
0009  *   _AArch64_Exception_interrupt_nest(),
0010  *   _AArch64_Exception_interrupt_no_nest(), and
0011  *   _AArch64_Exception_thread_dispatch().
0012  *
0013  * This file implements the SP0 and SPx interrupt exception handlers to
0014  * deal with nested and non-nested interrupts.
0015  */
0016 
0017 /*
0018  * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
0019  * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
0020  *
0021  * Redistribution and use in source and binary forms, with or without
0022  * modification, are permitted provided that the following conditions
0023  * are met:
0024  * 1. Redistributions of source code must retain the above copyright
0025  *    notice, this list of conditions and the following disclaimer.
0026  * 2. Redistributions in binary form must reproduce the above copyright
0027  *    notice, this list of conditions and the following disclaimer in the
0028  *    documentation and/or other materials provided with the distribution.
0029  *
0030  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0031  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0032  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0033  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0034  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0035  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0036  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0037  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0038  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0039  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0040  * POSSIBILITY OF SUCH DAMAGE.
0041  */
0042 
0043 #ifdef HAVE_CONFIG_H
0044 #include "config.h"
0045 #endif
0046 
0047 #include <rtems/asm.h>
0048 
0049 .globl  _AArch64_Exception_interrupt_no_nest
0050 .globl  _AArch64_Exception_interrupt_nest
0051 .globl  _AArch64_Exception_thread_dispatch
0052 
0053 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
0054   #ifdef RTEMS_SMP
0055     #define SELF_CPU_CONTROL_GET_REG x19
0056   #else
0057     #define SELF_CPU_CONTROL_GET_REG w19
0058   #endif
0059 #else
0060   #define SELF_CPU_CONTROL_GET_REG x19
0061 #endif
0062 #define SELF_CPU_CONTROL x19
0063 #define NON_VOLATILE_SCRATCH x20
0064 
0065 /* It's understood that CPU state is saved prior to and restored after this */
0066 /*
0067  * NOTE: This function does not follow the AArch64 procedure call specification
0068  * because all relevant state is known to be saved in the interrupt context,
0069  * hence the blind usage of x19, x20, and x21
0070  */
0071 .AArch64_Interrupt_Handler:
0072 /* Get per-CPU control of current processor */
0073     GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
0074 
0075 /* Increment interrupt nest and thread dispatch disable level */
0076     ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
0077     ldr w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0078     add w2, w2, #1
0079     add w3, w3, #1
0080     str w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
0081     str w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0082 
0083 /* Save LR */
0084     mov x21, LR
0085 
0086 /* Call BSP dependent interrupt dispatcher */
0087     bl  bsp_interrupt_dispatch
0088 
0089 /* Restore LR */
0090     mov LR, x21
0091 
0092 /* Load some per-CPU variables */
0093     ldr w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0094     ldrb    w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
0095     ldr w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
0096     ldr w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
0097 
0098 /* Decrement levels and determine thread dispatch state */
0099     eor w1, w1, w0
0100     sub w0, w0, #1
0101     orr w1, w1, w0
0102     orr w1, w1, w2
0103     sub w3, w3, #1
0104 
0105 /* Store thread dispatch disable and ISR nest levels */
0106     str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0107     str w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
0108 
0109 /* Return should_skip_thread_dispatch in x0 */
0110     mov x0, x1
0111 /* Return from handler */
0112     ret
0113 
0114 /* NOTE: This function does not follow the AArch64 procedure call specification
0115  * because all relevant state is known to be saved in the interrupt context,
0116  * hence the blind usage of x19, x20, and x21 */
0117 _AArch64_Exception_thread_dispatch:
0118 /* Get per-CPU control of current processor */
0119     GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
0120 
0121 /* Thread dispatch */
0122     mrs NON_VOLATILE_SCRATCH, DAIF
0123 
0124 .Ldo_thread_dispatch:
0125 
0126 /* Set ISR dispatch disable and thread dispatch disable level to one */
0127     mov w0, #1
0128     str w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
0129     str w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0130 
0131 /* Save LR */
0132     mov x21, LR
0133 
0134 /* Call _Thread_Do_dispatch(), this function will enable interrupts */
0135     mov x0, SELF_CPU_CONTROL
0136     mov x1, NON_VOLATILE_SCRATCH
0137     mov x2, #0x80
0138     bic x1, x1, x2
0139     bl  _Thread_Do_dispatch
0140 
0141 /* Restore LR */
0142     mov LR, x21
0143 
0144 /* Disable interrupts */
0145     msr DAIF, NON_VOLATILE_SCRATCH
0146 
0147 #ifdef RTEMS_SMP
0148     GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
0149 #endif
0150 
0151 /* Check if we have to do the thread dispatch again */
0152     ldrb    w0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
0153     cmp w0, #0
0154     bne .Ldo_thread_dispatch
0155 
0156 /* We are done with thread dispatching */
0157     mov w0, #0
0158     str w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
0159 
0160 /* Return from thread dispatch */
0161     ret
0162 
0163 /*
0164  * Must save corruptible registers and non-corruptible registers expected to be
0165  * used, x0 and lr expected to be already saved on the stack
0166  */
0167 .macro  push_interrupt_context
0168 /*
0169  * Push x1-x21 on to the stack, need 19-21 because they're modified without
0170  * obeying PCS
0171  */
0172     stp lr,     x1, [sp, #-0x10]!
0173     stp x2,     x3, [sp, #-0x10]!
0174     stp x4,     x5, [sp, #-0x10]!
0175     stp x6,     x7, [sp, #-0x10]!
0176     stp x8,     x9, [sp, #-0x10]!
0177     stp x10,    x11,    [sp, #-0x10]!
0178     stp x12,    x13,    [sp, #-0x10]!
0179     stp x14,    x15,    [sp, #-0x10]!
0180     stp x16,    x17,    [sp, #-0x10]!
0181     stp x18,    x19,    [sp, #-0x10]!
0182     stp x20,    x21,    [sp, #-0x10]!
0183 /*
0184  * Push q0-q31 on to the stack, need everything because parts of every register
0185  * are volatile/corruptible
0186  */
0187     stp q0,     q1, [sp, #-0x20]!
0188     stp q2,     q3, [sp, #-0x20]!
0189     stp q4,     q5, [sp, #-0x20]!
0190     stp q6,     q7, [sp, #-0x20]!
0191     stp q8,     q9, [sp, #-0x20]!
0192     stp q10,    q11,    [sp, #-0x20]!
0193     stp q12,    q13,    [sp, #-0x20]!
0194     stp q14,    q15,    [sp, #-0x20]!
0195     stp q16,    q17,    [sp, #-0x20]!
0196     stp q18,    q19,    [sp, #-0x20]!
0197     stp q20,    q21,    [sp, #-0x20]!
0198     stp q22,    q23,    [sp, #-0x20]!
0199     stp q24,    q25,    [sp, #-0x20]!
0200     stp q26,    q27,    [sp, #-0x20]!
0201     stp q28,    q29,    [sp, #-0x20]!
0202     stp q30,    q31,    [sp, #-0x20]!
0203 /* Get exception LR for PC and spsr */
0204     mrs x0, ELR_EL1
0205     mrs x1, SPSR_EL1
0206 /* Push pc and spsr */
0207     stp x0,     x1, [sp, #-0x10]!
0208 /* Get fpcr and fpsr */
0209     mrs x0, FPSR
0210     mrs x1, FPCR
0211 /* Push fpcr and fpsr */
0212     stp x0,     x1, [sp, #-0x10]!
0213 .endm
0214 
0215 /* Must match inverse order of .push_interrupt_context */
0216 .macro pop_interrupt_context
0217 /* Pop fpcr and fpsr */
0218     ldp x0,     x1, [sp], #0x10
0219 /* Restore fpcr and fpsr */
0220     msr FPCR, x1
0221     msr FPSR, x0
0222 /* Pop pc and spsr */
0223     ldp x0,     x1, [sp], #0x10
0224 /* Restore exception LR for PC and spsr */
0225     msr SPSR_EL1, x1
0226     msr ELR_EL1, x0
0227 /* Pop q0-q31 */
0228     ldp q30,    q31,    [sp], #0x20
0229     ldp q28,    q29,    [sp], #0x20
0230     ldp q26,    q27,    [sp], #0x20
0231     ldp q24,    q25,    [sp], #0x20
0232     ldp q22,    q23,    [sp], #0x20
0233     ldp q20,    q21,    [sp], #0x20
0234     ldp q18,    q19,    [sp], #0x20
0235     ldp q16,    q17,    [sp], #0x20
0236     ldp q14,    q15,    [sp], #0x20
0237     ldp q12,    q13,    [sp], #0x20
0238     ldp q10,    q11,    [sp], #0x20
0239     ldp q8,     q9, [sp], #0x20
0240     ldp q6,     q7, [sp], #0x20
0241     ldp q4,     q5, [sp], #0x20
0242     ldp q2,     q3, [sp], #0x20
0243     ldp q0,     q1, [sp], #0x20
0244 /* Pop x1-x21 */
0245     ldp x20,    x21,    [sp], #0x10
0246     ldp x18,    x19,    [sp], #0x10
0247     ldp x16,    x17,    [sp], #0x10
0248     ldp x14,    x15,    [sp], #0x10
0249     ldp x12,    x13,    [sp], #0x10
0250     ldp x10,    x11,    [sp], #0x10
0251     ldp x8,     x9, [sp], #0x10
0252     ldp x6,     x7, [sp], #0x10
0253     ldp x4,     x5, [sp], #0x10
0254     ldp x2,     x3, [sp], #0x10
0255     ldp lr,     x1, [sp], #0x10
0256 /* Must clear reservations here to ensure consistency with atomic operations */
0257     clrex
0258 .endm
0259 
0260 _AArch64_Exception_interrupt_nest:
0261 
0262 /* Execution template:
0263 Save volatile regs on interrupt stack
0264 Execute irq handler
0265 Restore volatile regs from interrupt stack
0266 Return to embedded exception vector code
0267 */
0268 
0269 /* Push interrupt context */
0270     push_interrupt_context
0271 
0272 /* Jump into the handler, ignore return value */
0273     bl .AArch64_Interrupt_Handler
0274 
0275 /*
0276  * SP should be where it was pre-handler (pointing at the exception frame)
0277  * or something has leaked stack space
0278  */
0279 /* Pop interrupt context */
0280     pop_interrupt_context
0281 /* Return to vector for final cleanup */
0282     ret
0283 
0284 _AArch64_Exception_interrupt_no_nest:
0285 /* Execution template:
0286 Save volatile registers on thread stack(some x, all q, ELR, etc.)
0287 Switch to interrupt stack
0288 Execute interrupt handler
0289 Switch to thread stack
0290 Call thread dispatch
0291 Restore volatile registers from thread stack
0292 Return to embedded exception vector code
0293 */
0294 
0295 
0296 /* Push interrupt context */
0297     push_interrupt_context
0298 
0299 /*
0300  * Switch to interrupt stack, interrupt dispatch may enable interrupts causing
0301  * nesting
0302  */
0303     msr spsel, #0
0304 
0305 /* Jump into the handler */
0306     bl .AArch64_Interrupt_Handler
0307 
0308 /*
0309  * Switch back to thread stack, interrupt dispatch should disable interrupts
0310  * before returning
0311  */
0312     msr spsel, #1
0313 
0314 /*
0315  * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
0316  * disable level.
0317  */
0318     cmp x0, #0
0319     bne .Lno_need_thread_dispatch
0320     bl  _AArch64_Exception_thread_dispatch
0321 
0322 .Lno_need_thread_dispatch:
0323 /*
0324  * SP should be where it was pre-handler (pointing at the exception frame)
0325  * or something has leaked stack space
0326  */
0327 /* Pop interrupt context */
0328     pop_interrupt_context
0329 /* Return to vector for final cleanup */
0330     ret