Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:10

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSBSPsX8664AMD64
0007  *
0008  * @brief Contains the _ISR_Handler that acts as the common handler for all
0009  * vectors to be managed by the RTEMS interrupt manager.
0010  */
0011 
0012 /*
0013  * Copyright (C) 2024 Matheus Pecoraro
0014  * Copyright (c) 2018 Amaan Cheval <amaan.cheval@gmail.com>
0015  *
0016  * Redistribution and use in source and binary forms, with or without
0017  * modification, are permitted provided that the following conditions
0018  * are met:
0019  * 1. Redistributions of source code must retain the above copyright
0020  *    notice, this list of conditions and the following disclaimer.
0021  * 2. Redistributions in binary form must reproduce the above copyright
0022  *    notice, this list of conditions and the following disclaimer in the
0023  *    documentation and/or other materials provided with the distribution.
0024  *
0025  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
0026  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0027  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0028  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
0029  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0030  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
0031  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0032  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
0033  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
0034  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
0035  * SUCH DAMAGE.
0036  */
0037 
0038 #include <rtems/asm.h>
0039 #include <rtems/score/cpu.h>
0040 #include <rtems/score/percpu.h>
0041 
0042 #ifndef CPU_STACK_ALIGNMENT
0043 #error "Missing header? CPU_STACK_ALIGNMENT not defined"
0044 #endif
0045 
0046 BEGIN_CODE
0047 
0048 PUBLIC(apic_spurious_handler)
0049 SYM(apic_spurious_handler):
0050   iretq
0051 
0052 /*
0053  * These are callee-saved registers, which means we can use them in our
0054  * interrupts as persistent scratch registers (i.e. calls will not destroy
0055  * them), as long as we also save and restore it for the interrupted task.
0056  */
0057 .set SCRATCH_REG0,    rbp
0058 .set SCRATCH_REG1,    rbx
0059 
0060 /*
0061  * We need to set a distinct handler for every interrupt vector so that
0062  * we can pass the vector number to _ISR_Handler correctly.
0063  */
0064 #define DISTINCT_INTERRUPT_ENTRY(vector)   \
0065   .p2align 4                             ; \
0066   PUBLIC(rtems_irq_prologue_ ## vector)  ; \
0067 SYM(rtems_irq_prologue_ ## vector):      ; \
0068   pushq REG_ARG0                         ; \
0069   movq  $vector, REG_ARG0                ; \
0070   pushq SCRATCH_REG0                     ; \
0071   pushq SCRATCH_REG1                     ; \
0072   jmp   SYM(_ISR_Handler)
0073 
0074 DISTINCT_INTERRUPT_ENTRY(0)
0075 DISTINCT_INTERRUPT_ENTRY(1)
0076 DISTINCT_INTERRUPT_ENTRY(2)
0077 DISTINCT_INTERRUPT_ENTRY(3)
0078 DISTINCT_INTERRUPT_ENTRY(4)
0079 DISTINCT_INTERRUPT_ENTRY(5)
0080 DISTINCT_INTERRUPT_ENTRY(6)
0081 DISTINCT_INTERRUPT_ENTRY(7)
0082 DISTINCT_INTERRUPT_ENTRY(8)
0083 DISTINCT_INTERRUPT_ENTRY(9)
0084 DISTINCT_INTERRUPT_ENTRY(10)
0085 DISTINCT_INTERRUPT_ENTRY(11)
0086 DISTINCT_INTERRUPT_ENTRY(12)
0087 DISTINCT_INTERRUPT_ENTRY(13)
0088 DISTINCT_INTERRUPT_ENTRY(14)
0089 DISTINCT_INTERRUPT_ENTRY(15)
0090 DISTINCT_INTERRUPT_ENTRY(16)
0091 DISTINCT_INTERRUPT_ENTRY(17)
0092 DISTINCT_INTERRUPT_ENTRY(18)
0093 DISTINCT_INTERRUPT_ENTRY(19)
0094 DISTINCT_INTERRUPT_ENTRY(20)
0095 DISTINCT_INTERRUPT_ENTRY(21)
0096 DISTINCT_INTERRUPT_ENTRY(22)
0097 DISTINCT_INTERRUPT_ENTRY(23)
0098 DISTINCT_INTERRUPT_ENTRY(24)
0099 DISTINCT_INTERRUPT_ENTRY(25)
0100 DISTINCT_INTERRUPT_ENTRY(26)
0101 DISTINCT_INTERRUPT_ENTRY(27)
0102 DISTINCT_INTERRUPT_ENTRY(28)
0103 DISTINCT_INTERRUPT_ENTRY(29)
0104 DISTINCT_INTERRUPT_ENTRY(30)
0105 DISTINCT_INTERRUPT_ENTRY(31)
0106 DISTINCT_INTERRUPT_ENTRY(32)
0107 DISTINCT_INTERRUPT_ENTRY(33)
0108 
0109 SYM(_ISR_Handler):
0110 .save_cpu_interrupt_frame:
0111 .set SAVED_RSP, SCRATCH_REG0
0112   movq rsp, SAVED_RSP
0113 
0114   /* Make space for CPU_Interrupt_frame */
0115   subq $CPU_INTERRUPT_FRAME_CALLER_SAVED_SIZE, rsp
0116 .set ALIGNMENT_MASK, ~(CPU_STACK_ALIGNMENT - 1)
0117   andq $ALIGNMENT_MASK, rsp
0118   /* XXX: Save interrupt mask? (See #5122) */
0119 
0120   /* Save x87 FPU, MMX and SSE state */
0121   fwait
0122   fxsave64 (CPU_INTERRUPT_FRAME_SSE_STATE)(rsp)
0123   /* Reset to a clean state */
0124   fninit
0125   /* Use CPU_INTERRUPT_FRAME_RAX as scratch space */
0126   movl $0x1F80, (CPU_INTERRUPT_FRAME_RAX)(rsp)
0127   ldmxcsr (CPU_INTERRUPT_FRAME_RAX)(rsp)
0128 
0129   /* Save caller-saved registers to CPU_Interrupt_frame */
0130   movq rax,         (CPU_INTERRUPT_FRAME_RAX)(rsp)
0131   movq rcx,         (CPU_INTERRUPT_FRAME_RCX)(rsp)
0132   movq rdx,         (CPU_INTERRUPT_FRAME_RDX)(rsp)
0133   movq rsi,         (CPU_INTERRUPT_FRAME_RSI)(rsp)
0134   movq r8,          (CPU_INTERRUPT_FRAME_R8)(rsp)
0135   movq r9,          (CPU_INTERRUPT_FRAME_R9)(rsp)
0136   movq r10,         (CPU_INTERRUPT_FRAME_R10)(rsp)
0137   movq r11,         (CPU_INTERRUPT_FRAME_R11)(rsp)
0138 
0139   /* Save the initial rsp */
0140   movq SAVED_RSP,   (CPU_INTERRUPT_FRAME_RSP)(rsp)
0141 
0142 .switch_stack_if_needed:
0143   /* Save current aligned rsp so we can find CPU_Interrupt_frame again later */
0144   movq rsp, SAVED_RSP
0145 
0146   /*
0147    * Switch to interrupt stack if necessary; it's necessary if this is the
0148    * outermost interrupt, which means we've been using the task's stack so far
0149    */
0150 
0151 .set Per_CPU_Info, SCRATCH_REG1
0152   GET_SELF_CPU_CONTROL_RBX /* SCRATCH_REG1 == rbx */
0153   cmpl $0, PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
0154   jne  .skip_switch
0155 .switch_stack:
0156   movq PER_CPU_INTERRUPT_STACK_HIGH(Per_CPU_Info), rsp
0157 .skip_switch:
0158   incl PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
0159   incl PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
0160 
0161 .call_isr_dispatch:
0162   /* REG_ARG0 already includes the vector number, so we can simply call */
0163   call amd64_dispatch_isr
0164 
0165 .restore_stack:
0166   /* If this is the outermost stack, this restores the task stack */
0167   movq SAVED_RSP, rsp
0168 
0169   decl PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
0170   decl PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
0171   movl PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info), %eax
0172   orl PER_CPU_ISR_DISPATCH_DISABLE(Per_CPU_Info), %eax
0173   /**
0174    * If either thread dispatch disable level or ISR dispatch disable
0175    * are non-zero skip scheduling the dispatch
0176    */
0177   cmpl $0, %eax
0178   jne  .restore_cpu_interrupt_frame
0179   /* Only call _Thread_Do_dispatch if dispatch needed is not 0 */
0180   cmpb $0, PER_CPU_DISPATCH_NEEDED(Per_CPU_Info)
0181   je  .restore_cpu_interrupt_frame
0182 
0183 .schedule_dispatch:
0184   /* Set ISR dispatch disable and thread dispatch disable level to one */
0185   movl $1, PER_CPU_ISR_DISPATCH_DISABLE(Per_CPU_Info)
0186   movl $1, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
0187 
0188   /* Call Thread_Do_dispatch(), this function will enable interrupts */
0189   movq Per_CPU_Info, REG_ARG0
0190   movq $CPU_ISR_LEVEL_ENABLED, REG_ARG1 /* Set interrupt flag manually */
0191   call _Thread_Do_dispatch
0192 
0193   /* Disable interrupts */
0194   cli
0195 
0196   /**
0197    * It is possible that after returning from _Thread_Do_dispatch the
0198    * Per_CPU_Info has changed
0199    */
0200   GET_SELF_CPU_CONTROL_RBX /* Per_CPU_Info == SCRATCH_REG1 == rbx */
0201   cmpb $0, PER_CPU_DISPATCH_NEEDED(Per_CPU_Info)
0202   jne  .schedule_dispatch
0203 
0204   /* Done with thread dispatching */
0205   movl $0, PER_CPU_ISR_DISPATCH_DISABLE(Per_CPU_Info)
0206 
0207 .restore_cpu_interrupt_frame:
0208   /* Restore x87 FPU, MMX and SSE state */
0209   fwait
0210   fxrstor64 (CPU_INTERRUPT_FRAME_SSE_STATE)(rsp)
0211 
0212   /* Restore registers from CPU_Interrupt_frame */
0213   movq (CPU_INTERRUPT_FRAME_RAX)(rsp), rax
0214   movq (CPU_INTERRUPT_FRAME_RCX)(rsp), rcx
0215   movq (CPU_INTERRUPT_FRAME_RDX)(rsp), rdx
0216   movq (CPU_INTERRUPT_FRAME_RSI)(rsp), rsi
0217   movq (CPU_INTERRUPT_FRAME_R8)(rsp), r8
0218   movq (CPU_INTERRUPT_FRAME_R9)(rsp), r9
0219   movq (CPU_INTERRUPT_FRAME_R10)(rsp), r10
0220   movq (CPU_INTERRUPT_FRAME_R11)(rsp), r11
0221 
0222   /* Restore the rsp value from just before _ISR_Handler was called */
0223   movq (CPU_INTERRUPT_FRAME_RSP)(rsp), SAVED_RSP
0224   movq SAVED_RSP, rsp
0225 
0226   /* Restore args DISTINCT_INTERRUPT_ENTRY pushed to task stack */
0227   popq SCRATCH_REG1
0228   popq SCRATCH_REG0
0229   popq REG_ARG0
0230   iretq
0231 
0232 END_CODE
0233 
0234 END