Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*  context.S
0004  *
0005  *  This file contains the basic algorithms for all assembly code used
0006  *  in an specific CPU port of RTEMS.  These algorithms must be implemented
0007  *  in assembly language. 
0008  *
0009  *  COPYRIGHT (c) 2010. Gedare Bloom.
0010  *
0011  * Redistribution and use in source and binary forms, with or without
0012  * modification, are permitted provided that the following conditions
0013  * are met:
0014  * 1. Redistributions of source code must retain the above copyright
0015  *    notice, this list of conditions and the following disclaimer.
0016  * 2. Redistributions in binary form must reproduce the above copyright
0017  *    notice, this list of conditions and the following disclaimer in the
0018  *    documentation and/or other materials provided with the distribution.
0019  *
0020  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0021  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0022  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0023  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0024  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0025  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0026  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0027  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0028  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0029  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0030  * POSSIBILITY OF SUCH DAMAGE.
0031  */
0032 
0033 #include <rtems/asm.h>
0034 
0035 
0036 /* 
0037  *  The assembler needs to be told that we know what to do with 
0038  *  the global registers.
0039  */
0040 .register %g2, #scratch
0041 .register %g3, #scratch
0042 .register %g6, #scratch
0043 .register %g7, #scratch
0044 
0045 #if (SPARC_HAS_FPU == 1)
0046 
0047 /*
0048  *  void _CPU_Context_save_fp(
0049  *    void **fp_context_ptr
0050  *  )
0051  *
0052  *  This routine is responsible for saving the FP context
0053  *  at *fp_context_ptr.  If the point to load the FP context
0054  *  from is changed then the pointer is modified by this routine.
0055  *
0056  */
0057 
0058   .align 4
0059 PUBLIC(_CPU_Context_save_fp)
0060   SYM(_CPU_Context_save_fp):
0061     save    %sp, -SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp
0062 
0063     /*
0064      *  The following enables the floating point unit.
0065      */
0066 
0067     sparc64_enable_FPU(%l0)
0068 
0069   /*
0070    *  Although sun4v supports alternate register names for double-
0071    *  and quad-word floating point, SPARC v9 only uses f[#]
0072    *
0073    *  Because quad-word fp is not supported by the hardware in 
0074    *  many situations, we stick with double-word fp operations
0075    */
0076   ldx    [%i0], %l0     
0077   std     %f0, [%l0]
0078   std     %f2, [%l0 + F2_OFFSET]
0079   std     %f4, [%l0 + F4_OFFSET]
0080   std     %f6, [%l0 + F6_OFFSET]
0081   std     %f8, [%l0 + F8_OFFSET]
0082   std     %f10, [%l0 + F1O_OFFSET]
0083   std     %f12, [%l0 + F12_OFFSET]
0084   std     %f14, [%l0 + F14_OFFSET]
0085   std     %f16, [%l0 + F16_OFFSET]
0086   std     %f18, [%l0 + F18_OFFSET]
0087   std     %f20, [%l0 + F2O_OFFSET]
0088   std     %f22, [%l0 + F22_OFFSET]
0089   std     %f24, [%l0 + F24_OFFSET]
0090   std     %f26, [%l0 + F26_OFFSET]
0091   std     %f28, [%l0 + F28_OFFSET]
0092   std     %f30, [%l0 + F3O_OFFSET]
0093   std     %f32, [%l0 + F32_OFFSET]
0094   std     %f34, [%l0 + F34_OFFSET]
0095   std     %f36, [%l0 + F36_OFFSET]
0096   std     %f38, [%l0 + F38_OFFSET]
0097   std     %f40, [%l0 + F4O_OFFSET]
0098   std     %f42, [%l0 + F42_OFFSET]
0099   std     %f44, [%l0 + F44_OFFSET]
0100   std     %f46, [%l0 + F46_OFFSET]
0101   std     %f48, [%l0 + F48_OFFSET]
0102   std     %f50, [%l0 + F5O_OFFSET]
0103   std     %f52, [%l0 + F52_OFFSET]
0104   std     %f54, [%l0 + F54_OFFSET]
0105   std     %f56, [%l0 + F56_OFFSET]
0106   std     %f58, [%l0 + F58_OFFSET]
0107   std     %f60, [%l0 + F6O_OFFSET]       
0108   std     %f62, [%l0 + F62_OFFSET]
0109   stx     %fsr, [%l0 + FSR_OFFSET]
0110   ret
0111   restore
0112 
0113   /*
0114    *  void _CPU_Context_restore_fp(
0115    *    void **fp_context_ptr
0116    *  )
0117    *
0118    *  This routine is responsible for restoring the FP context
0119    *  at *fp_context_ptr.  If the point to load the FP context
0120    *  from is changed then the pointer is modified by this routine.
0121    *
0122    */
0123 
0124   .align 4
0125 PUBLIC(_CPU_Context_restore_fp)
0126   SYM(_CPU_Context_restore_fp):
0127     save    %sp, -SPARC64_MINIMUM_STACK_FRAME_SIZE , %sp
0128 
0129     /*
0130      *  The following enables the floating point unit.
0131      */
0132 
0133     sparc64_enable_FPU(%l0)
0134 
0135   ldx     [%i0], %l0
0136   ldd     [%l0 + FO_OFFSET], %f0
0137   ldd     [%l0 + F2_OFFSET], %f2
0138   ldd     [%l0 + F4_OFFSET], %f4
0139   ldd     [%l0 + F6_OFFSET], %f6
0140   ldd     [%l0 + F8_OFFSET], %f8
0141   ldd     [%l0 + F1O_OFFSET], %f10
0142   ldd     [%l0 + F12_OFFSET], %f12
0143   ldd     [%l0 + F14_OFFSET], %f14
0144   ldd     [%l0 + F16_OFFSET], %f16
0145   ldd     [%l0 + F18_OFFSET], %f18
0146   ldd     [%l0 + F2O_OFFSET], %f20
0147   ldd     [%l0 + F22_OFFSET], %f22
0148   ldd     [%l0 + F24_OFFSET], %f24
0149   ldd     [%l0 + F26_OFFSET], %f26
0150   ldd     [%l0 + F28_OFFSET], %f28
0151   ldd     [%l0 + F3O_OFFSET], %f30
0152   ldd     [%l0 + F32_OFFSET], %f32
0153   ldd     [%l0 + F34_OFFSET], %f34
0154   ldd     [%l0 + F36_OFFSET], %f36
0155   ldd     [%l0 + F38_OFFSET], %f38
0156   ldd     [%l0 + F4O_OFFSET], %f40
0157   ldd     [%l0 + F42_OFFSET], %f42
0158   ldd     [%l0 + F44_OFFSET], %f44
0159   ldd     [%l0 + F46_OFFSET], %f46
0160   ldd     [%l0 + F48_OFFSET], %f48
0161   ldd     [%l0 + F5O_OFFSET], %f50
0162   ldd     [%l0 + F52_OFFSET], %f52
0163   ldd     [%l0 + F54_OFFSET], %f54
0164   ldd     [%l0 + F56_OFFSET], %f56
0165   ldd     [%l0 + F58_OFFSET], %f58
0166   ldd     [%l0 + F6O_OFFSET], %f60
0167   ldd     [%l0 + F62_OFFSET], %f62
0168   ldx     [%l0 + FSR_OFFSET], %fsr
0169   ret
0170   restore
0171 
0172 #endif /* SPARC_HAS_FPU */
0173 
0174   /*
0175    *  void _CPU_Context_switch(
0176    *    Context_Control  *run,
0177    *    Context_Control  *heir
0178    *  )
0179    *
0180    *  This routine performs a normal non-FP context switch.
0181    */
0182 
0183   .align 4
0184 PUBLIC(_CPU_Context_switch)
0185   SYM(_CPU_Context_switch):
0186     ! skip g0
0187       stx     %g1, [%o0 + G1_OFFSET]       ! save the global registers
0188       stx     %g2, [%o0 + G2_OFFSET]
0189       stx     %g3, [%o0 + G3_OFFSET]       
0190       stx     %g4, [%o0 + G4_OFFSET]
0191       stx     %g5, [%o0 + G5_OFFSET]       
0192       stx     %g6, [%o0 + G6_OFFSET]
0193       stx     %g7, [%o0 + G7_OFFSET]
0194 
0195       ! load the address of the ISR stack nesting prevention flag
0196       setx  SYM(_CPU_ISR_Dispatch_disable), %g1, %g2
0197       lduw  [%g2], %g2
0198 
0199       ! save it a bit later so we do not waste a couple of cycles
0200 
0201       stx     %l0, [%o0 + L0_OFFSET]       ! save the local registers
0202       stx     %l1, [%o0 + L1_OFFSET]
0203       stx     %l2, [%o0 + L2_OFFSET]
0204       stx     %l3, [%o0 + L3_OFFSET]
0205       stx     %l4, [%o0 + L4_OFFSET]
0206       stx     %l5, [%o0 + L5_OFFSET]
0207       stx     %l6, [%o0 + L6_OFFSET]
0208       stx     %l7, [%o0 + L7_OFFSET]
0209 
0210       ! Now actually save ISR stack nesting prevention flag
0211       stuw     %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
0212 
0213       stx     %i0, [%o0 + I0_OFFSET]       ! save the input registers
0214       stx     %i1, [%o0 + I1_OFFSET]
0215       stx     %i2, [%o0 + I2_OFFSET]
0216       stx     %i3, [%o0 + I3_OFFSET]
0217       stx     %i4, [%o0 + I4_OFFSET]
0218       stx     %i5, [%o0 + I5_OFFSET]
0219       stx     %i6, [%o0 + I6_FP_OFFSET]
0220       stx     %i7, [%o0 + I7_OFFSET]
0221 
0222       stx     %o0, [%o0 + O0_OFFSET]       ! save the output registers
0223       stx     %o1, [%o0 + O1_OFFSET]
0224       stx     %o2, [%o0 + O2_OFFSET]
0225       stx     %o3, [%o0 + O3_OFFSET]
0226       stx     %o4, [%o0 + O4_OFFSET]
0227       stx     %o5, [%o0 + O5_OFFSET]
0228       stx     %o6, [%o0 + O6_SP_OFFSET]
0229       stx     %o7, [%o0 + O7_OFFSET]       ! o7 is the PC
0230 
0231 !      rdpr    %pil, %o2
0232 !      stuw    %o2, [%o0 + PIL_OFFSET] ! save pil
0233 
0234 !      rdpr    %pstate, %o2
0235 !      stx     %o2, [%o0 + PSTATE_OFFSET]      ! save status register
0236 
0237       /*
0238        *  This is entered from _CPU_Context_restore with:
0239        *    o1 = context to restore
0240 !       *    o2 = pstate
0241        *
0242        *  NOTE: Flushing the register windows is necessary, but it adds 
0243        *  an unpredictable (but bounded) overhead to context switching.
0244        */
0245 
0246 PUBLIC(_CPU_Context_restore_heir)
0247   SYM(_CPU_Context_restore_heir):
0248 
0249     flushw
0250 
0251 
0252 
0253     ! skip g0
0254     ldx     [%o1 + G1_OFFSET], %g1        ! restore the global registers
0255     ldx     [%o1 + G2_OFFSET], %g2
0256     ldx     [%o1 + G3_OFFSET], %g3
0257     ldx     [%o1 + G4_OFFSET], %g4
0258     ldx     [%o1 + G5_OFFSET], %g5
0259     ldx     [%o1 + G6_OFFSET], %g6
0260     ldx     [%o1 + G7_OFFSET], %g7
0261 
0262     ! Load thread specific ISR dispatch prevention flag
0263     ldx    [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
0264     setx  SYM(_CPU_ISR_Dispatch_disable), %o5, %o3
0265     ! Store it to memory later to use the cycles
0266 
0267     ldx     [%o1 + L0_OFFSET], %l0        ! restore the local registers
0268     ldx     [%o1 + L1_OFFSET], %l1
0269     ldx     [%o1 + L2_OFFSET], %l2
0270     ldx     [%o1 + L3_OFFSET], %l3
0271     ldx     [%o1 + L4_OFFSET], %l4
0272     ldx     [%o1 + L5_OFFSET], %l5
0273     ldx     [%o1 + L6_OFFSET], %l6
0274     ldx     [%o1 + L7_OFFSET], %l7
0275 
0276     ! Now restore thread specific ISR dispatch prevention flag
0277     stuw  %o2, [%o3]
0278 
0279     ldx     [%o1 + I0_OFFSET], %i0        ! restore the input registers
0280     ldx     [%o1 + I1_OFFSET], %i1
0281     ldx     [%o1 + I2_OFFSET], %i2
0282     ldx     [%o1 + I3_OFFSET], %i3
0283     ldx     [%o1 + I4_OFFSET], %i4
0284     ldx     [%o1 + I5_OFFSET], %i5
0285     ldx     [%o1 + I6_FP_OFFSET], %i6
0286     ldx     [%o1 + I7_OFFSET], %i7
0287 
0288     ldx     [%o1 + O0_OFFSET], %o0
0289     ldx     [%o1 + O2_OFFSET], %o2        ! restore the output registers
0290     ldx     [%o1 + O3_OFFSET], %o3
0291     ldx     [%o1 + O4_OFFSET], %o4
0292     ldx     [%o1 + O5_OFFSET], %o5
0293     ldx     [%o1 + O6_SP_OFFSET], %o6
0294     ldx     [%o1 + O7_OFFSET], %o7       ! PC
0295 
0296     ! on a hunch... we should be able to use some of the %o regs
0297 !    lduw    [%o1 + PIL_OFFSET], %o2
0298 !    wrpr    %g0, %o2, %pil
0299 
0300 !    ldx     [%o1 + PSTATE_OFFSET], %o2
0301 
0302     ! do o1 last to avoid destroying heir context pointer
0303     ldx     [%o1 + O1_OFFSET], %o1        ! overwrite heir pointer
0304 !    wrpr    %g0, %o2, %pstate
0305 
0306     retl
0307     nop
0308 
0309     /*
0310      *  void _CPU_Context_restore(
0311      *    Context_Control *new_context
0312      *  )
0313      *
0314      *  This routine is generally used only to perform restart self.
0315      *
0316      *  NOTE: It is unnecessary to reload some registers.
0317      */
0318     /* if _CPU_Context_restore_heir does not flushw, then do it here */
0319   .align 4
0320 PUBLIC(_CPU_Context_restore)
0321   SYM(_CPU_Context_restore):
0322     save    %sp, -SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp
0323 !    rdpr    %pstate, %o2
0324     ba      SYM(_CPU_Context_restore_heir)
0325     mov     %i0, %o1                      ! in the delay slot
0326 
0327 /* end of file */