Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*
0004  * Copyright (C) 2024 Matheus Pecoraro
0005  * Copyright (c) 2018 Amaan Cheval <amaan.cheval@gmail.com>
0006  *
0007  * Redistribution and use in source and binary forms, with or without
0008  * modification, are permitted provided that the following conditions
0009  * are met:
0010  * 1. Redistributions of source code must retain the above copyright
0011  *    notice, this list of conditions and the following disclaimer.
0012  * 2. Redistributions in binary form must reproduce the above copyright
0013  *    notice, this list of conditions and the following disclaimer in the
0014  *    documentation and/or other materials provided with the distribution.
0015  *
0016  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
0017  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0018  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0019  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
0020  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0021  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
0022  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0023  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
0024  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
0025  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
0026  * SUCH DAMAGE.
0027  */
0028 
0029 #ifdef HAVE_CONFIG_H
0030 #include "config.h"
0031 #endif
0032 
0033 #include <rtems/asm.h>
0034 #include <rtems/score/cpu.h>
0035 
0036 #ifndef CPU_STACK_ALIGNMENT
0037 #error "Missing header? CPU_STACK_ALIGNMENT not defined"
0038 #endif
0039 
0040 BEGIN_CODE
0041 
0042 /*
0043  *  void _CPU_Context_switch( run_context, heir_context )
0044  *
0045  *  This routine performs a normal non-FP context.
0046  */
0047 
0048 .p2align  1
0049 PUBLIC(_CPU_Context_switch)
0050 PUBLIC(_CPU_Context_switch_no_return)
0051 
0052 /* save context argument */
0053 .set RUNCONTEXT_ARG,   REG_ARG0
0054 /* restore context argument */
0055 .set HEIRCONTEXT_ARG,  REG_ARG1
0056 
0057 SYM(_CPU_Context_switch):
0058 SYM(_CPU_Context_switch_no_return):
0059   movq    RUNCONTEXT_ARG, r10   /* r10 = running threads context */
0060   GET_SELF_CPU_CONTROL_R11      /* r11 = per CPU information */
0061 
0062   /* Fill up Context_Control struct */
0063   pushf
0064   popq               CPU_CONTEXT_CONTROL_EFLAGS(r10) /* pop rflags into context */
0065   movq    rbx,       CPU_CONTEXT_CONTROL_RBX(r10)
0066   movq    rsp,       CPU_CONTEXT_CONTROL_RSP(r10)
0067   movq    rbp,       CPU_CONTEXT_CONTROL_RBP(r10)
0068   movq    r12,       CPU_CONTEXT_CONTROL_R12(r10)
0069   movq    r13,       CPU_CONTEXT_CONTROL_R13(r10)
0070   movq    r14,       CPU_CONTEXT_CONTROL_R14(r10)
0071   movq    r15,       CPU_CONTEXT_CONTROL_R15(r10)
0072 
0073   movl    PER_CPU_ISR_DISPATCH_DISABLE(r11), %edx
0074   movl    %edx, CPU_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE(r10)
0075 
0076   movq    r10, r8              /* r8  = running threads context */
0077   movq    HEIRCONTEXT_ARG, r10 /* r10 = heir threads context */
0078 
0079 #ifdef RTEMS_SMP
0080   /*
0081    * The executing thread no longer executes on this processor.  Switch
0082    * the stack to the temporary interrupt stack of this processor.  Mark
0083    * the context of the executing thread as not executing.
0084    */
0085   leaq    PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE(r11), rsp
0086   movw    $0, CPU_CONTEXT_CONTROL_IS_EXECUTING(r8)
0087 
0088 .check_is_executing:
0089   lock btsw $0, CPU_CONTEXT_CONTROL_IS_EXECUTING(r10) /* Indicator in carry flag */
0090   jnc       .restore
0091 
0092 .get_potential_new_heir:
0093   /* We may have a new heir */
0094 
0095   /* Read the executing and heir */
0096   movq    PER_CPU_OFFSET_EXECUTING(r11), r8
0097   movq    PER_CPU_OFFSET_HEIR(r11), r9
0098 
0099   /*
0100    * Update the executing only if necessary to avoid cache line
0101    * monopolization.
0102    */
0103   cmpq    r8, r9
0104   je      .check_is_executing
0105 
0106   /* Calculate the heir context pointer */
0107   addq    r9, r10
0108   subq    r8, r10
0109 
0110   /* Update the executing */
0111   movq    r9, PER_CPU_OFFSET_EXECUTING(r11)
0112 
0113   jmp     .check_is_executing
0114 #endif
0115 
0116 .restore:
0117   movl  CPU_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE(r10), %edx
0118   movl  %edx, PER_CPU_ISR_DISPATCH_DISABLE(r11)
0119 
0120   movq  CPU_CONTEXT_CONTROL_RBX(r10), rbx
0121   movq  CPU_CONTEXT_CONTROL_RSP(r10), rsp
0122 
0123   /*
0124    * We need to load rflags after rsp to avoid an interrupt while the ISR stack
0125    * is still being used during the initialization process
0126    */
0127   pushq CPU_CONTEXT_CONTROL_EFLAGS(r10)       /* push rflags */
0128   popf                                        /* restore rflags */
0129 
0130   movq  CPU_CONTEXT_CONTROL_RBP(r10), rbp
0131   movq  CPU_CONTEXT_CONTROL_R12(r10), r12
0132   movq  CPU_CONTEXT_CONTROL_R13(r10), r13
0133   movq  CPU_CONTEXT_CONTROL_R14(r10), r14
0134   movq  CPU_CONTEXT_CONTROL_R15(r10), r15
0135 
0136   movq CPU_CONTEXT_CONTROL_FS(r10), rax
0137   /* High bits in %edx and low bits in %eax */
0138   movq rax, rdx
0139   shrq $32, rdx
0140   movl $FSBASE_MSR, %ecx
0141   wrmsr
0142 
0143   ret
0144 
0145 /*
0146  *  void _CPU_Context_restore( new_context )
0147  *
0148  *  This routine performs a normal non-FP context restore.
0149  */
0150 
0151 PUBLIC(_CPU_Context_restore)
0152 
0153 .set NEWCONTEXT_ARG,   REG_ARG0       /* context to restore argument */
0154 
0155 SYM(_CPU_Context_restore):
0156   movq      NEWCONTEXT_ARG, r10  /* r10 = running threads context */
0157   GET_SELF_CPU_CONTROL_R11       /* r11 = per CPU information */
0158   jmp       .restore
0159 
0160 END_CODE
0161 END