Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:23

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreCPUARM
0007  *
0008  * @brief ARM interrupt exception prologue and epilogue.
0009  */
0010 
0011 /*
0012  * Copyright (C) 2009, 2022 embedded brains GmbH & Co. KG
0013  *
0014  * Redistribution and use in source and binary forms, with or without
0015  * modification, are permitted provided that the following conditions
0016  * are met:
0017  * 1. Redistributions of source code must retain the above copyright
0018  *    notice, this list of conditions and the following disclaimer.
0019  * 2. Redistributions in binary form must reproduce the above copyright
0020  *    notice, this list of conditions and the following disclaimer in the
0021  *    documentation and/or other materials provided with the distribution.
0022  *
0023  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0024  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0025  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0026  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0027  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0028  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0029  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0030  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0031  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0032  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0033  * POSSIBILITY OF SUCH DAMAGE.
0034  */
0035 
0036 /*
0037  * The upper EXCHANGE_SIZE bytes of the INT stack area are used for data
0038  * exchange between INT and SVC mode.  Below of this is the actual INT stack.
0039  * The exchange area is only accessed if INT is disabled.
0040  */
0041 
0042 #ifdef HAVE_CONFIG_H
0043 #include "config.h"
0044 #endif
0045 
0046 #include <rtems/asm.h>
0047 
0048 #ifdef ARM_MULTILIB_ARCH_V4
0049 
0050 #define STACK_POINTER_ADJUST r7
0051 #define NON_VOLATILE_SCRATCH r9
0052 
0053 #ifndef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0054 
0055 #define EXCHANGE_LR r4
0056 #define EXCHANGE_SPSR r5
0057 #define EXCHANGE_CPSR r6
0058 #define EXCHANGE_INT_SP r8
0059 
0060 #define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP}
0061 #define EXCHANGE_SIZE 16
0062 
0063 #define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, NON_VOLATILE_SCRATCH, r12}
0064 #define CONTEXT_SIZE 32
0065 
0066 #endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0067 
0068 .arm
0069 .globl _ARMV4_Exception_interrupt
0070 _ARMV4_Exception_interrupt:
0071 
0072 #ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0073     /* Prepare return from interrupt */
0074     sub lr, lr, #4
0075 
0076     /* Save LR_irq and SPSR_irq to the SVC stack */
0077     srsfd   sp!, #ARM_PSR_M_SVC
0078 
0079     /* Switch to SVC mode */
0080     cps #ARM_PSR_M_SVC
0081 
0082     /*
0083      * Save the volatile registers, two non-volatile registers used for
0084      * interrupt processing, and the link register.
0085      */
0086     push    {r0-r3, STACK_POINTER_ADJUST, NON_VOLATILE_SCRATCH, r12, lr}
0087 #else /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0088     /* Save exchange registers to exchange area */
0089     stmdb   sp, EXCHANGE_LIST
0090 
0091     /* Set exchange registers */
0092     mov EXCHANGE_LR, lr
0093     mrs EXCHANGE_SPSR, SPSR
0094     mrs EXCHANGE_CPSR, CPSR
0095     sub EXCHANGE_INT_SP, sp, #EXCHANGE_SIZE
0096 
0097     /* Switch to SVC mode */
0098     orr EXCHANGE_CPSR, EXCHANGE_CPSR, #0x1
0099     msr CPSR_c, EXCHANGE_CPSR
0100 
0101     /*
0102      * Save context.  We save the link register separately because it has
0103      * to be restored in SVC mode.  The other registers can be restored in
0104      * INT mode.  Ensure that the size of the saved registers is an
0105      * integral multiple of 8 bytes.  Provide a non-volatile scratch
0106      * register which may be used accross function calls.
0107      */
0108     push    CONTEXT_LIST
0109     push    {STACK_POINTER_ADJUST, lr}
0110 #endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0111 
0112     /*
0113      * On a public interface, the stack pointer must be aligned on an
0114      * 8-byte boundary.  However, it may temporarily be only aligned on a
0115      * 4-byte boundary.  Make sure the stack pointer is aligned on an
0116      * 8-byte boundary.
0117      */
0118     and STACK_POINTER_ADJUST, sp, #0x4
0119     sub sp, sp, STACK_POINTER_ADJUST
0120 
0121     /* Get per-CPU control of current processor */
0122     GET_SELF_CPU_CONTROL    r0
0123 
0124 #ifdef ARM_MULTILIB_VFP
0125     /* Save VFP context */
0126     vmrs    r2, FPSCR
0127     vpush   {d0-d7}
0128 #ifdef ARM_MULTILIB_VFP_D32
0129     vpush   {d16-d31}
0130 #endif
0131     push    {r2, r3}
0132 #endif /* ARM_MULTILIB_VFP */
0133 
0134 #ifndef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0135     /* Remember INT stack pointer */
0136     mov r1, EXCHANGE_INT_SP
0137 
0138     /* Restore exchange registers from exchange area */
0139     ldmia   r1, EXCHANGE_LIST
0140 #endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0141 
0142     /* Get interrupt nest level */
0143     ldr r2, [r0, #PER_CPU_ISR_NEST_LEVEL]
0144 
0145     /* Switch stack if necessary and save original stack pointer */
0146     mov NON_VOLATILE_SCRATCH, sp
0147     cmp r2, #0
0148 #ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0149     ldreq   sp, [r0, #PER_CPU_INTERRUPT_STACK_HIGH]
0150 #else
0151     moveq   sp, r1
0152 #endif
0153 
0154     /* Increment interrupt nest and thread dispatch disable level */
0155     ldr r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0156     add r2, r2, #1
0157     add r3, r3, #1
0158     str r2, [r0, #PER_CPU_ISR_NEST_LEVEL]
0159     str r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0160 
0161     /* Call BSP dependent interrupt dispatcher */
0162 #ifdef RTEMS_PROFILING
0163     cmp r2, #1
0164     bne .Lskip_profiling
0165     BLX_TO_THUMB_1  _CPU_Counter_read
0166     push    {r0, r1}
0167     GET_SELF_CPU_CONTROL    r0
0168     BLX_TO_THUMB_1  bsp_interrupt_dispatch
0169     BLX_TO_THUMB_1  _CPU_Counter_read
0170     pop {r1, r3}
0171     mov r2, r0
0172     GET_SELF_CPU_CONTROL    r0
0173     BLX_TO_THUMB_1  _Profiling_Outer_most_interrupt_entry_and_exit
0174 .Lprofiling_done:
0175 #else
0176     BLX_TO_THUMB_1  bsp_interrupt_dispatch
0177 #endif
0178 
0179     /* Get per-CPU control of current processor */
0180     GET_SELF_CPU_CONTROL    r0
0181 
0182     /* Load some per-CPU variables */
0183     ldr r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0184     ldrb    r1, [r0, #PER_CPU_DISPATCH_NEEDED]
0185     ldr r2, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]
0186     ldr r3, [r0, #PER_CPU_ISR_NEST_LEVEL]
0187 
0188     /* Restore stack pointer */
0189     mov sp, NON_VOLATILE_SCRATCH
0190 
0191     /* Decrement levels and determine thread dispatch state */
0192     eor r1, r1, r12
0193     sub r12, r12, #1
0194     orr r1, r1, r12
0195     orr r1, r1, r2
0196     sub r3, r3, #1
0197 
0198     /* Store thread dispatch disable and ISR nest levels */
0199     str r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0200     str r3, [r0, #PER_CPU_ISR_NEST_LEVEL]
0201 
0202     /*
0203      * Check thread dispatch necessary, ISR dispatch disable and thread
0204      * dispatch disable level.
0205      */
0206     cmp r1, #0
0207     bne .Lthread_dispatch_done
0208 
0209     /* Save CPSR in non-volatile register */
0210     mrs NON_VOLATILE_SCRATCH, CPSR
0211 
0212     /* Thread dispatch */
0213 
0214 .Ldo_thread_dispatch:
0215 
0216     /* Set ISR dispatch disable and thread dispatch disable level to one */
0217     mov r12, #1
0218     str r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]
0219     str r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
0220 
0221     /* Call _Thread_Do_dispatch(), this function will enable interrupts */
0222     bic r1, NON_VOLATILE_SCRATCH, #0x80
0223     BLX_TO_THUMB_1  _Thread_Do_dispatch
0224 
0225     /* Disable interrupts */
0226     msr CPSR, NON_VOLATILE_SCRATCH
0227 
0228     /*
0229      * Get per-CPU control of current processor.  In SMP configurations, we
0230      * may run on another processor after the _Thread_Do_dispatch() call.
0231      */
0232     GET_SELF_CPU_CONTROL    r0
0233 
0234     /* Check if we have to do the thread dispatch again */
0235     ldrb    r12, [r0, #PER_CPU_DISPATCH_NEEDED]
0236     cmp r12, #0
0237     bne .Ldo_thread_dispatch
0238 
0239     /* We are done with thread dispatching */
0240     mov r12, #0
0241     str r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]
0242 
0243 .Lthread_dispatch_done:
0244 
0245 #ifdef ARM_MULTILIB_VFP
0246     /* Restore VFP context */
0247     pop {r2, r3}
0248 #ifdef ARM_MULTILIB_VFP_D32
0249     vpop    {d16-d31}
0250 #endif
0251     vpop    {d0-d7}
0252     vmsr    FPSCR, r2
0253 #endif /* ARM_MULTILIB_VFP */
0254 
0255     /* Undo stack pointer adjustment */
0256     add sp, sp, STACK_POINTER_ADJUST
0257 
0258 #ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0259     /*
0260      * Restore the volatile registers, two non-volatile registers used for
0261      * interrupt processing, and the link register.
0262      */
0263     pop {r0-r3, STACK_POINTER_ADJUST, NON_VOLATILE_SCRATCH, r12, lr}
0264 #else /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0265     /* Restore STACK_POINTER_ADJUST register and link register */
0266     pop {STACK_POINTER_ADJUST, lr}
0267 
0268     /*
0269      * XXX: Remember and restore stack pointer.  The data on the stack is
0270      * still in use.  So the stack is now in an inconsistent state.  The
0271      * FIQ handler implementation must not use this area.
0272      */
0273     mov r12, sp
0274     add sp, #CONTEXT_SIZE
0275 
0276     /* Get INT mode program status register */
0277     mrs r1, CPSR
0278     bic r1, r1, #0x1
0279 
0280     /* Switch to INT mode */
0281     msr CPSR_c, r1
0282 
0283     /* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */
0284     push    {EXCHANGE_LR, EXCHANGE_SPSR}
0285 
0286     /* Restore context */
0287     ldmia   r12, CONTEXT_LIST
0288 
0289     /* Set return address and program status */
0290     mov lr, EXCHANGE_LR
0291     msr SPSR_fsxc, EXCHANGE_SPSR
0292 
0293     /* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
0294     pop {EXCHANGE_LR, EXCHANGE_SPSR}
0295 #endif /* ARM_MULTILIB_HAS_STORE_RETURN_STATE */
0296 
0297 #ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
0298     /*
0299      * We must clear reservations here, since otherwise compare-and-swap
0300      * atomic operations with interrupts enabled may yield wrong results.
0301      * A compare-and-swap atomic operation is generated by the compiler
0302      * like this:
0303      *
0304      *   .L1:
0305      *     ldrex r1, [r0]
0306      *     cmp   r1, r3
0307      *     bne   .L2
0308      *     strex r3, r2, [r0]
0309      *     cmp   r3, #0
0310      *     bne   .L1
0311      *   .L2:
0312      *
0313      * Consider the following scenario.  A thread is interrupted right
0314      * before the strex.  The interrupt updates the value using a
0315      * compare-and-swap sequence.  Everything is fine up to this point.
0316      * The interrupt performs now a compare-and-swap sequence which fails
0317      * with a branch to .L2.  The current processor has now a reservation.
0318      * The interrupt returns without further strex.  The thread updates the
0319      * value using the unrelated reservation of the interrupt.
0320      */
0321     clrex
0322 #endif
0323 
0324     /* Return from interrupt */
0325 #ifdef ARM_MULTILIB_HAS_STORE_RETURN_STATE
0326     rfefd   sp!
0327 #else
0328     subs    pc, lr, #4
0329 #endif
0330 
0331 #ifdef RTEMS_PROFILING
0332 .Lskip_profiling:
0333     BLX_TO_THUMB_1  bsp_interrupt_dispatch
0334     b   .Lprofiling_done
0335 #endif
0336 
0337 #endif /* ARM_MULTILIB_ARCH_V4 */