Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:26

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*  cpu_asm.s
0004  *
0005  *  This file contains the basic algorithms for all assembly code used
0006  *  in an specific CPU port of RTEMS.  These algorithms must be implemented
0007  *  in assembly language. 
0008  *
0009  *  COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR).
0010  *  COPYRIGHT (c) 2010. Gedare Bloom.
0011  *
0012  * Redistribution and use in source and binary forms, with or without
0013  * modification, are permitted provided that the following conditions
0014  * are met:
0015  * 1. Redistributions of source code must retain the above copyright
0016  *    notice, this list of conditions and the following disclaimer.
0017  * 2. Redistributions in binary form must reproduce the above copyright
0018  *    notice, this list of conditions and the following disclaimer in the
0019  *    documentation and/or other materials provided with the distribution.
0020  *
0021  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0022  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0023  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0024  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0025  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0026  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0027  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0028  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0029  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0030  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0031  * POSSIBILITY OF SUCH DAMAGE.
0032  */
0033 
0034 #include <rtems/asm.h>
0035 #include <rtems/score/percpu.h>
0036 
0037 
0038 /* 
0039  *  The assembler needs to be told that we know what to do with 
0040  *  the global registers.
0041  */
0042 .register %g2, #scratch
0043 .register %g3, #scratch
0044 .register %g6, #scratch
0045 .register %g7, #scratch
0046 
0047 
0048     /*
0049      *  void _ISR_Handler()
0050      *
0051      *  This routine provides the RTEMS interrupt management.
0052      *
0053      *  We enter this handler from the 8 instructions in the trap table with
0054      *  the following registers assumed to be set as shown:
0055      *
0056      *    g4 = tstate (old l0)
0057      *    g2 = trap type (vector) (old l3)
0058      *
0059      *  NOTE: By an executive defined convention:
0060      *    if trap type is between 0 and 511 it is an asynchronous trap
0061      *    if trap type is between 512 and 1023 it is an asynchonous trap
0062      */
0063 
0064   .align 4
0065 PUBLIC(_ISR_Handler)
0066   SYM(_ISR_Handler):
0067 
0068     /* 
0069      * The ISR is called at TL = 1. 
0070      * On sun4u we use the alternate globals set.     
0071      *
0072      * On entry:
0073      *   g4 = tstate (from trap table)
0074      *   g2 = trap vector #
0075      * 
0076      * In either case, note that trap handlers share a register window with 
0077      * the interrupted context, unless we explicitly enter a new window. This 
0078      * differs from Sparc v8, in which a dedicated register window is saved 
0079      * for trap handling.  This means we have to avoid overwriting any registers
0080      * that we don't save.
0081      *
0082      */
0083 
0084 
0085     /*
0086      *  save some or all context on stack
0087      */
0088 
0089     /*
0090      *  Save the state of the interrupted task -- especially the global
0091      *  registers -- in the Interrupt Stack Frame.  Note that the ISF
0092      *  includes a regular minimum stack frame which will be used if
0093      *  needed by register window overflow and underflow handlers.
0094      *
0095      *  This is slightly wasteful, since the stack already has the window
0096      *  overflow space reserved, but there is no obvious way to ensure 
0097      *  we can store the interrupted state and still handle window 
0098      *  spill/fill correctly, since there is no room for the ISF.
0099      *
0100      */
0101 
0102     /* this is for debugging purposes, make sure that TL = 1, otherwise 
0103      * things might get dicey */
0104     rdpr %tl, %g1
0105     cmp %g1, 1
0106     be 1f
0107     nop
0108 
0109     0: ba 0b
0110     nop
0111 
0112     1:
0113     /* first store the sp of the interrupted task temporarily in g1 */
0114     mov   %sp, %g1
0115 
0116     sub     %sp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
0117     ! make space for Stack_Frame||ISF
0118 
0119     /* save tstate, tpc, tnpc, pil */
0120     stx   %g4, [%sp + STACK_BIAS + ISF_TSTATE_OFFSET]  
0121     rdpr  %pil, %g3
0122     rdpr  %tpc, %g4
0123     rdpr  %tnpc, %g5
0124     stx   %g3, [%sp + STACK_BIAS + ISF_PIL_OFFSET]
0125     stx   %g4, [%sp + STACK_BIAS + ISF_TPC_OFFSET]
0126     stx   %g5, [%sp + STACK_BIAS + ISF_TNPC_OFFSET]
0127     stx   %g2, [%sp + STACK_BIAS + ISF_TVEC_OFFSET]
0128 
0129     rd  %y, %g4        ! save y
0130     stx   %g4, [%sp + STACK_BIAS + ISF_Y_OFFSET]
0131 
0132     ! save interrupted frame's output regs
0133     stx     %o0, [%sp + STACK_BIAS + ISF_O0_OFFSET]     ! save o0
0134     stx     %o1, [%sp + STACK_BIAS + ISF_O1_OFFSET]     ! save o1
0135     stx     %o2, [%sp + STACK_BIAS + ISF_O2_OFFSET]     ! save o2
0136     stx     %o3, [%sp + STACK_BIAS + ISF_O3_OFFSET]     ! save o3
0137     stx     %o4, [%sp + STACK_BIAS + ISF_O4_OFFSET]     ! save o4
0138     stx     %o5, [%sp + STACK_BIAS + ISF_O5_OFFSET]     ! save o5
0139     stx     %g1, [%sp + STACK_BIAS + ISF_O6_SP_OFFSET]  ! save o6/sp
0140     stx     %o7, [%sp + STACK_BIAS + ISF_O7_OFFSET]     ! save o7
0141 
0142     mov  %g1, %o5    ! hold the old sp here for now
0143     mov  %g2, %o1    ! we'll need trap # later
0144 
0145     /* switch to TL[0] */
0146     wrpr  %g0, 0, %tl
0147 
0148     /* switch to normal globals */
0149 #if defined (SUN4U)
0150     /* the assignment to pstate below will mask out the AG bit */
0151 #elif defined (SUN4V)
0152     wrpr  %g0, 0, %gl
0153 #endif
0154     /* get pstate to known state */
0155     wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK, %pstate
0156 
0157     ! save globals
0158     stx     %g1, [%sp + STACK_BIAS + ISF_G1_OFFSET]     ! save g1
0159     stx     %g2, [%sp + STACK_BIAS + ISF_G2_OFFSET]     ! save g2
0160     stx     %g3, [%sp + STACK_BIAS + ISF_G3_OFFSET]     ! save g3
0161     stx     %g4, [%sp + STACK_BIAS + ISF_G4_OFFSET]     ! save g4
0162     stx     %g5, [%sp + STACK_BIAS + ISF_G5_OFFSET]     ! save g5
0163     stx     %g6, [%sp + STACK_BIAS + ISF_G6_OFFSET]     ! save g6
0164     stx     %g7, [%sp + STACK_BIAS + ISF_G7_OFFSET]     ! save g7
0165 
0166 
0167   mov  %o1, %g2  ! get the trap #
0168   mov  %o5, %g7  ! store the interrupted %sp (preserve)
0169   mov  %sp, %o1  ! 2nd arg to ISR Handler = address of ISF
0170   add  %o1, STACK_BIAS, %o1 ! need to adjust for stack bias, 2nd arg = ISF
0171 
0172   /*
0173    *  Increment ISR nest level and Thread dispatch disable level.
0174    *
0175    *  Register usage for this section: (note, these are used later)
0176    *
0177    *    g3 = _Thread_Dispatch_disable_level pointer
0178    *    g5 = _Thread_Dispatch_disable_level value (uint32_t)
0179    *    g6 = _ISR_Nest_level pointer
0180    *    g4 = _ISR_Nest_level value (uint32_t)
0181    *    o5 = temp
0182    *
0183    *  NOTE: It is assumed that g6 - g7 will be preserved until the ISR
0184    *        nest and thread dispatch disable levels are unnested.
0185    */
0186 
0187   setx  THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
0188   lduw  [%g3], %g5
0189   setx  ISR_NEST_LEVEL, %o5, %g6
0190   lduw  [%g6], %g4
0191 
0192   add      %g5, 1, %g5
0193   stuw     %g5, [%g3]
0194 
0195   add      %g4, 1, %g4
0196   stuw     %g4, [%g6]
0197 
0198   /*
0199    *  If ISR nest level was zero (now 1), then switch stack.
0200    */
0201 
0202   subcc    %g4, 1, %g4             ! outermost interrupt handler?
0203   bnz      dont_switch_stacks      ! No, then do not switch stacks
0204 
0205   setx  SYM(INTERRUPT_STACK_HIGH), %o5, %g1
0206   ldx  [%g1], %sp
0207 
0208   /*
0209    * Adjust the stack for the stack bias
0210    */
0211   sub     %sp, STACK_BIAS, %sp
0212 
0213   /*
0214    *  Make sure we have a place on the stack for the window overflow
0215    *  trap handler to write into.  At this point it is safe to
0216    *  enable traps again.
0217    */
0218 
0219   sub      %sp, SPARC64_MINIMUM_STACK_FRAME_SIZE, %sp
0220 
0221   dont_switch_stacks:
0222   /*
0223    *  Check if we have an external interrupt (trap 0x41 - 0x4f). If so,
0224    *  set the PIL to mask off interrupts with lower priority.
0225    *
0226    *  The original PIL is not modified since it will be restored
0227    *  when the interrupt handler returns.
0228    */
0229 
0230   and      %g2, 0x0ff, %g1 ! is bottom byte of vector number [0x41,0x4f]?
0231 
0232   subcc    %g1, 0x41, %g0
0233   bl       dont_fix_pil
0234   subcc    %g1, 0x4f, %g0
0235   bg       dont_fix_pil
0236   nop
0237   wrpr     %g0, %g1, %pil
0238 
0239   dont_fix_pil:
0240   /* We need to be careful about enabling traps here.
0241    *
0242    * We already stored off the tstate, tpc, and tnpc, and switched to
0243    * TL = 0, so it should be safe.
0244    */
0245 
0246   /* zero out g4 so that ofw calls work */
0247   mov  %g0, %g4
0248 
0249   ! **** ENABLE TRAPS ****
0250   wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
0251     SPARC_PSTATE_IE_MASK, %pstate 
0252 
0253     /*
0254      *  Vector to user's handler.
0255      *
0256      *  NOTE: TBR may no longer have vector number in it since
0257      *        we just enabled traps.  It is definitely in g2.
0258      */
0259     setx  SYM(_ISR_Vector_table), %o5, %g1
0260     and      %g2, 0x1FF, %o5        ! remove synchronous trap indicator
0261     sll      %o5, 3, %o5            ! o5 = offset into table
0262     ldx      [%g1 + %o5], %g1       ! g1 = _ISR_Vector_table[ vector ]
0263 
0264 
0265     ! o1 = 2nd arg = address of the ISF
0266     !   WAS LOADED WHEN ISF WAS SAVED!!!
0267     mov      %g2, %o0               ! o0 = 1st arg = vector number
0268     call     %g1, 0
0269     nop                             ! delay slot
0270 
0271     /*
0272      *  Redisable traps so we can finish up the interrupt processing.
0273      *  This is a conservative place to do this.
0274      */
0275     ! **** DISABLE TRAPS ****
0276     wrpr  %g0, SPARC_PSTATE_PRIV_MASK, %pstate
0277 
0278     /* 
0279      * We may safely use any of the %o and %g registers, because 
0280      * we saved them earlier (and any other interrupt that uses 
0281      * them will also save them).  Right now, the state of those
0282      * registers are as follows:
0283      *  %o registers: unknown (user's handler may have destroyed)
0284      *  %g1,g4,g5: scratch
0285      *  %g2: unknown: was trap vector
0286      *  %g3: uknown: was _Thread_Dispatch_Disable_level pointer
0287      *  %g6: _ISR_Nest_level
0288      *  %g7: interrupted task's sp
0289      */
0290 
0291     /*
0292      *  Increment ISR nest level and Thread dispatch disable level.
0293      *
0294      *  Register usage for this section: (note: as used above)
0295      *
0296      *    g3 = _Thread_Dispatch_disable_level pointer
0297      *    g5 = _Thread_Dispatch_disable_level value
0298      *    g6 = _ISR_Nest_level pointer
0299      *    g4 = _ISR_Nest_level value
0300      *    o5 = temp
0301      */
0302 
0303     /* We have to re-load the values from memory, because there are
0304      * not enough registers that we know will be preserved across the
0305      * user's handler. If this is a problem, we can create a register
0306      * window for _ISR_Handler.
0307      */
0308 
0309     setx  THREAD_DISPATCH_DISABLE_LEVEL, %o5, %g3
0310     lduw  [%g3],%g5
0311     lduw  [%g6],%g4
0312     sub   %g5, 1, %g5
0313     stuw  %g5, [%g3]
0314     sub   %g4, 1, %g4
0315     stuw  %g4, [%g6]
0316 
0317     orcc  %g4, %g0, %g0           ! ISRs still nested?
0318     bnz   dont_restore_stack      ! Yes then don't restore stack yet
0319     nop
0320 
0321     /*
0322      *  This is the outermost interrupt handler. Need to get off the
0323      *  CPU Interrupt Stack and back to the tasks stack.
0324      *
0325      *  The following subtract should get us back on the interrupted
0326      *  tasks stack and add enough room to invoke the dispatcher.
0327      *  When we enable traps, we are mostly back in the context
0328      *  of the task and subsequent interrupts can operate normally.
0329      *
0330      *  Now %sp points to the bottom of the ISF.
0331      *  
0332      */
0333 
0334     sub      %g7,   CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
0335 
0336     dont_restore_stack:
0337 
0338     /*
0339      *  If dispatching is disabled (includes nested interrupt case),
0340      *  then do a "simple" exit.
0341      */
0342 
0343     orcc     %g5, %g0, %g0   ! Is dispatching disabled?
0344     bnz      simple_return   ! Yes, then do a "simple" exit
0345     ! NOTE: Use the delay slot
0346     mov      %g0, %g4  ! clear g4 for ofw
0347 
0348     ! Are we dispatching from a previous ISR in the interrupted thread?
0349     setx  SYM(_CPU_ISR_Dispatch_disable), %o5, %g5
0350     lduw     [%g5], %o5
0351     orcc     %o5, %g0, %g0   ! Is this thread already doing an ISR?
0352     bnz      simple_return   ! Yes, then do a "simple" exit
0353     nop
0354 
0355     setx    DISPATCH_NEEDED, %o5, %g7
0356 
0357 
0358     /*
0359      *  If a context switch is necessary, then do fudge stack to
0360      *  return to the interrupt dispatcher.
0361      */
0362 
0363     ldub     [%g7], %o5
0364 
0365     orcc     %o5, %g0, %g0   ! Is thread switch necessary?
0366     bz       simple_return   ! no, then do a simple return. otherwise fallthru
0367     nop
0368 
0369     /*
0370      *  Invoke interrupt dispatcher.
0371      */
0372 
0373     ! Set ISR dispatch nesting prevention flag
0374       mov      1, %o1
0375       setx     SYM(_CPU_ISR_Dispatch_disable), %o5, %o2
0376       stuw     %o1, [%o2]
0377 
0378 
0379       !  **** ENABLE TRAPS ****
0380       wrpr  %g0, SPARC_PSTATE_PRIV_MASK | SPARC_PSTATE_PEF_MASK | \
0381         SPARC_PSTATE_IE_MASK, %pstate
0382         isr_dispatch:
0383         call    SYM(_Thread_Dispatch), 0
0384         nop
0385 
0386         /*
0387          *  We invoked _Thread_Dispatch in a state similar to the interrupted
0388          *  task.  In order to safely be able to tinker with the register
0389          *  windows and get the task back to its pre-interrupt state, 
0390          *  we need to disable interrupts. 
0391          */
0392       mov   2, %g4        ! syscall (disable interrupts)
0393       ta    0             ! syscall (disable interrupts)
0394       mov   0, %g4
0395 
0396   /*
0397    *  While we had ISR dispatching disabled in this thread,
0398    *  did we miss anything.  If so, then we need to do another
0399    *  _Thread_Dispatch before leaving this ISR Dispatch context.
0400    */
0401 
0402   setx     DISPATCH_NEEDED, %o5, %o1
0403   ldub     [%o1], %o2
0404 
0405   orcc     %o2, %g0, %g0   ! Is thread switch necessary?
0406   bz       allow_nest_again ! No, then clear out and return
0407   nop
0408 
0409   ! Yes, then invoke the dispatcher
0410 dispatchAgain:
0411   mov      3, %g4        ! syscall (enable interrupts)
0412   ta       0             ! syscall (enable interrupts)
0413   ba       isr_dispatch
0414   mov      0, %g4
0415 
0416   allow_nest_again:
0417 
0418   ! Zero out ISR stack nesting prevention flag
0419   setx    SYM(_CPU_ISR_Dispatch_disable), %o5, %o1
0420   stuw    %g0,[%o1]
0421 
0422   /*
0423    *  The CWP in place at this point may be different from
0424    *  that which was in effect at the beginning of the ISR if we
0425    *  have been context switched between the beginning of this invocation
0426    *  of _ISR_Handler and this point.  Thus the CWP and WIM should
0427    *  not be changed back to their values at ISR entry time.  Any
0428    *  changes to the PSR must preserve the CWP.
0429    */
0430 
0431   simple_return:
0432   flushw          ! get register windows to a 'clean' state 
0433 
0434   ! **** DISABLE TRAPS ****
0435   wrpr    %g0, SPARC_PSTATE_PRIV_MASK, %pstate
0436 
0437   ldx     [%sp + STACK_BIAS + ISF_Y_OFFSET], %o1      ! restore y
0438   wr      %o1, 0, %y
0439 
0440   ldx  [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
0441 
0442 ! see if cwp is proper (tstate.cwp == cwp)
0443   and  %g1, 0x1F, %g6
0444   rdpr  %cwp, %g7
0445   cmp  %g6, %g7
0446   bz  good_window
0447   nop
0448 
0449   /*
0450    * Fix the CWP. Need the cwp to be the proper cwp that
0451    * gets restored when returning from the trap via retry/done. Do 
0452    * this before reloading the task's output regs. Basically fake a
0453    * window spill/fill.
0454    *
0455    * Is this necessary on sun4v? Why not just re-write 
0456    * tstate.cwp to be equal to the current cwp?
0457    */
0458   mov  %sp, %g1
0459   stx  %l0, [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET]
0460   stx  %l1, [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET]
0461   stx  %l2, [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET]
0462   stx  %l3, [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET]
0463   stx  %l4, [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET]
0464   stx  %l5, [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET]
0465   stx  %l6, [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET]
0466   stx  %l7, [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET]
0467   stx  %i0, [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET]
0468   stx  %i1, [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET]
0469   stx  %i2, [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET]
0470   stx  %i3, [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET]
0471   stx  %i4, [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET]
0472   stx  %i5, [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET]
0473   stx  %i6, [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET]
0474   stx  %i7, [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET]
0475   wrpr  %g0, %g6, %cwp
0476   mov  %g1, %sp
0477   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L0_OFFSET], %l0
0478   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L1_OFFSET], %l1
0479   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L2_OFFSET], %l2
0480   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L3_OFFSET], %l3
0481   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L4_OFFSET], %l4
0482   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L5_OFFSET], %l5
0483   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L6_OFFSET], %l6
0484   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_L7_OFFSET], %l7
0485   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I0_OFFSET], %i0
0486   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I1_OFFSET], %i1
0487   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I2_OFFSET], %i2
0488   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I3_OFFSET], %i3
0489   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I4_OFFSET], %i4
0490   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I5_OFFSET], %i5
0491   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
0492   ldx  [%sp + STACK_BIAS + CPU_STACK_FRAME_I7_OFFSET], %i7
0493 
0494 
0495   good_window:
0496 
0497 
0498   /*
0499    *  Restore tasks global and out registers
0500    */
0501 
0502   ldx     [%sp + STACK_BIAS + ISF_G1_OFFSET], %g1    ! restore g1
0503   ldx     [%sp + STACK_BIAS + ISF_G2_OFFSET], %g2    ! restore g2
0504   ldx     [%sp + STACK_BIAS + ISF_G3_OFFSET], %g3    ! restore g3
0505   ldx     [%sp + STACK_BIAS + ISF_G4_OFFSET], %g4    ! restore g4
0506   ldx     [%sp + STACK_BIAS + ISF_G5_OFFSET], %g5    ! restore g5
0507   ldx     [%sp + STACK_BIAS + ISF_G6_OFFSET], %g6    ! restore g6
0508   ldx     [%sp + STACK_BIAS + ISF_G7_OFFSET], %g7    ! restore g7
0509 
0510   ! Assume the interrupted context is in TL 0 with GL 0 / normal globals.
0511   ! When tstate is restored at done/retry, the interrupted context is restored.
0512   ! return to TL[1], GL[1], and restore TSTATE, TPC, and TNPC
0513   wrpr  %g0, 1, %tl
0514 
0515   ! return to GL=1 or AG
0516 #if defined(SUN4U)
0517     rdpr  %pstate, %o1
0518     or  %o1, SPARC_PSTATE_AG_MASK, %o1
0519     wrpr  %o1, %g0, %pstate                 ! go to AG.
0520 #elif defined(SUN4V)
0521   wrpr  %g0, 1, %gl
0522 #endif
0523 
0524 ! now we can use global registers (at gl=1 or AG)
0525   ldx   [%sp + STACK_BIAS + ISF_PIL_OFFSET], %g3
0526   ldx   [%sp + STACK_BIAS + ISF_TPC_OFFSET], %g4
0527   ldx   [%sp + STACK_BIAS + ISF_TNPC_OFFSET], %g5
0528   ldx   [%sp + STACK_BIAS + ISF_TSTATE_OFFSET], %g1
0529   ldx   [%sp + STACK_BIAS + ISF_TVEC_OFFSET], %g2
0530   wrpr  %g0, %g3, %pil
0531   wrpr  %g0, %g4, %tpc
0532   wrpr  %g0, %g5, %tnpc
0533 
0534   wrpr    %g0, %g1, %tstate
0535 
0536   ldx     [%sp + STACK_BIAS + ISF_O0_OFFSET], %o0    ! restore o0
0537   ldx     [%sp + STACK_BIAS + ISF_O1_OFFSET], %o1    ! restore o1
0538   ldx     [%sp + STACK_BIAS + ISF_O2_OFFSET], %o2    ! restore o2
0539   ldx     [%sp + STACK_BIAS + ISF_O3_OFFSET], %o3    ! restore o3
0540   ldx     [%sp + STACK_BIAS + ISF_O4_OFFSET], %o4    ! restore o4
0541   ldx     [%sp + STACK_BIAS + ISF_O5_OFFSET], %o5    ! restore o5
0542   ! sp is restored later
0543   ldx     [%sp + STACK_BIAS + ISF_O7_OFFSET], %o7    ! restore o7
0544 
0545   ldx     [%sp + STACK_BIAS + ISF_O6_SP_OFFSET], %o6 ! restore o6/sp
0546 
0547   /*
0548    *  Determine whether to re-execute the trapping instruction 
0549    *  (asynchronous trap) or to skip the trapping instruction
0550    *  (synchronous trap).
0551    */
0552 
0553   andcc   %g2, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
0554   ! Is this a synchronous trap?
0555   be  not_synch             ! No, then skip trapping instruction
0556   mov  0, %g4
0557   retry        ! re-execute trapping instruction
0558   not_synch:
0559   done        ! skip trapping instruction
0560 
0561 /* end of file */