Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:24

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /*
0004  *  This file contains the basic algorithms for all assembly code used
0005  *  in an specific CPU port of RTEMS.  These algorithms must be implemented
0006  *  in assembly language
0007  *
0008  *  History:
0009  *    Baseline: no_cpu
0010  *    1996:     Ported to MIPS64ORION by Craig Lebakken <craigl@transition.com>
0011  *          COPYRIGHT (c) 1996 by Transition Networks Inc.
0012  *          To anyone who acknowledges that the modifications to this file to
0013  *          port it to the MIPS64ORION are provided "AS IS" without any
0014  *          express or implied warranty:
0015  *             permission to use, copy, modify, and distribute this file
0016  *             for any purpose is hereby granted without fee, provided that
0017  *             the above copyright notice and this notice appears in all
0018  *             copies, and that the name of Transition Networks not be used in
0019  *             advertising or publicity pertaining to distribution of the
0020  *             software without specific, written prior permission. Transition
0021  *             Networks makes no representations about the suitability
0022  *             of this software for any purpose.
0023  *    2000: Reworked by Alan Cudmore <alanc@linuxstart.com> to become
0024  *          the baseline of the more general MIPS port.
0025  *    2001: Joel Sherrill <joel@OARcorp.com> continued this rework,
0026  *          rewriting as much as possible in C and added the JMR3904 BSP
0027  *          so testing could be performed on a simulator.
0028  *    2001: Greg Menke <gregory.menke@gsfc.nasa.gov>, bench tested ISR
0029  *      performance, tweaking this code and the isr vectoring routines
0030  *          to reduce overhead & latencies.  Added optional
0031  *      instrumentation as well.
0032  *    2002: Greg Menke <gregory.menke@gsfc.nasa.gov>, overhauled cpu_asm.S,
0033  *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
0034  *          and deferred FP contexts.
0035  *    2002: Joel Sherrill <joel@OARcorp.com> enhanced the exception processing
0036  *          by increasing the amount of context saved/restored.
0037  *    2004: 24March, Art Ferrer, NASA/GSFC, added save of FP status/control
0038  *          register to fix intermittent FP error encountered on ST5 mission
0039  *          implementation on Mongoose V processor.
0040  *    2004: April 7, Greg Menke <gregory.menke@gsfc.nasa.gov> Added __mips==32
0041  *          support for R4000 processors running 32 bit code.  Fixed #define
0042  *          problems that caused fpu code to always be included even when no
0043  *          fpu is present.
0044  *
0045  *  COPYRIGHT (c) 1989-2002.
0046  *  On-Line Applications Research Corporation (OAR).
0047  *
0048  * Redistribution and use in source and binary forms, with or without
0049  * modification, are permitted provided that the following conditions
0050  * are met:
0051  * 1. Redistributions of source code must retain the above copyright
0052  *    notice, this list of conditions and the following disclaimer.
0053  * 2. Redistributions in binary form must reproduce the above copyright
0054  *    notice, this list of conditions and the following disclaimer in the
0055  *    documentation and/or other materials provided with the distribution.
0056  *
0057  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0058  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0059  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0060  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0061  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0062  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0063  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0064  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0065  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0066  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0067  * POSSIBILITY OF SUCH DAMAGE.
0068  */
0069 
0070 #ifdef HAVE_CONFIG_H
0071 #include "config.h"
0072 #endif
0073 
0074 #include <rtems/asm.h>
0075 #include <rtems/mips/iregdef.h>
0076 #include <rtems/mips/idtcpu.h>
0077 #include <rtems/score/percpu.h>
0078 
0079 #define ASSEMBLY_ONLY
0080 #include <rtems/score/cpu.h>
0081 
0082 #if TRUE
0083 #else
0084 #error TRUE is not true
0085 #endif
0086 #if FALSE
0087 #error FALSE is not false
0088 #else
0089 #endif
0090 
0091 /*
0092 #if ( CPU_HARDWARE_FP == TRUE )
0093 #warning CPU_HARDWARE_FP == TRUE
0094 #else
0095 #warning CPU_HARDWARE_FP != TRUE
0096 #endif
0097 */
0098 
0099 
0100 /* enable debugging shadow writes to misc ram, this is a vestigal
0101 * Mongoose-ism debug tool- but may be handy in the future so we
0102 * left it in...
0103 */
0104 
0105 /* #define INSTRUMENT_ISR_VECTORING */
0106 /* #define INSTRUMENT_EXECUTING_THREAD */
0107 
0108 
0109 
0110 /*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
0111  *  and MIPS ISA Level 1 (R3xxx).
0112  */
0113 
0114 #if __mips == 3
0115 /* 64 bit register operations */
0116 #define NOP nop
0117 #define ADD dadd
0118 #define STREG   sd
0119 #define LDREG   ld
0120 #define MFCO    dmfc0       /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
0121 #define MTCO    dmtc0       /* Only use this op for coprocessor registers that are 64 bit in R4000 architecture */
0122 #define ADDU    addu
0123 #define ADDIU   addiu
0124 #if (__mips_fpr==32)
0125 #define STREGC1 swc1
0126 #define LDREGC1 lwc1
0127 #elif (__mips_fpr==64)      /* Use these instructions if there are 64 bit floating point registers. This requires FR bit to be set in C0_SR */
0128 #define STREGC1 sdc1
0129 #define LDREGC1 ldc1
0130 #endif
0131 #define R_SZ    8
0132 #define F_SZ    8
0133 #define SZ_INT  8
0134 #define SZ_INT_POW2 3
0135 
0136 /* XXX if we don't always want 64 bit register ops, then another ifdef */
0137 
0138 #elif (__mips == 1 ) || (__mips == 32)
0139 /* 32 bit register operations*/
0140 #define NOP nop
0141 #define ADD add
0142 #define STREG   sw
0143 #define LDREG   lw
0144 #define MFCO    mfc0
0145 #define MTCO    mtc0
0146 #define ADDU    add
0147 #define ADDIU   addi
0148 #define STREGC1 swc1
0149 #define LDREGC1 lwc1
0150 #define R_SZ    4
0151 #define F_SZ    4
0152 #define SZ_INT  4
0153 #define SZ_INT_POW2 2
0154 #else
0155 #error "mips assembly: what size registers do I deal with?"
0156 #endif
0157 
0158 
0159 #define ISR_VEC_SIZE    4
0160 #define EXCP_STACK_SIZE (NREGS*R_SZ)
0161 
0162 
0163 #ifdef __GNUC__
0164 #define ASM_EXTERN(x,size) .extern x,size
0165 #else
0166 #define ASM_EXTERN(x,size)
0167 #endif
0168 
0169 /* NOTE: these constants must match the Context_Control structure in cpu.h */
0170 #define S0_OFFSET 0
0171 #define S1_OFFSET 1
0172 #define S2_OFFSET 2
0173 #define S3_OFFSET 3
0174 #define S4_OFFSET 4
0175 #define S5_OFFSET 5
0176 #define S6_OFFSET 6
0177 #define S7_OFFSET 7
0178 #define SP_OFFSET 8
0179 #define FP_OFFSET 9
0180 #define RA_OFFSET 10
0181 #define C0_SR_OFFSET 11
0182 #define C0_EPC_OFFSET 12
0183 
0184 /* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
0185 #define FP0_OFFSET  0
0186 #define FP1_OFFSET  1
0187 #define FP2_OFFSET  2
0188 #define FP3_OFFSET  3
0189 #define FP4_OFFSET  4
0190 #define FP5_OFFSET  5
0191 #define FP6_OFFSET  6
0192 #define FP7_OFFSET  7
0193 #define FP8_OFFSET  8
0194 #define FP9_OFFSET  9
0195 #define FP10_OFFSET 10
0196 #define FP11_OFFSET 11
0197 #define FP12_OFFSET 12
0198 #define FP13_OFFSET 13
0199 #define FP14_OFFSET 14
0200 #define FP15_OFFSET 15
0201 #define FP16_OFFSET 16
0202 #define FP17_OFFSET 17
0203 #define FP18_OFFSET 18
0204 #define FP19_OFFSET 19
0205 #define FP20_OFFSET 20
0206 #define FP21_OFFSET 21
0207 #define FP22_OFFSET 22
0208 #define FP23_OFFSET 23
0209 #define FP24_OFFSET 24
0210 #define FP25_OFFSET 25
0211 #define FP26_OFFSET 26
0212 #define FP27_OFFSET 27
0213 #define FP28_OFFSET 28
0214 #define FP29_OFFSET 29
0215 #define FP30_OFFSET 30
0216 #define FP31_OFFSET 31
0217 #define FPCS_OFFSET 32
0218 
0219 
0220 ASM_EXTERN(__exceptionStackFrame, SZ_INT)
0221 
0222 /*
0223  *  _CPU_Context_save_fp_context
0224  *
0225  *  This routine is responsible for saving the FP context
0226  *  at *fp_context_ptr.  If the point to load the FP context
0227  *  from is changed then the pointer is modified by this routine.
0228  *
0229  *  Sometimes a macro implementation of this is in cpu.h which dereferences
0230  *  the ** and a similarly named routine in this file is passed something
0231  *  like a (Context_Control_fp *).  The general rule on making this decision
0232  *  is to avoid writing assembly language.
0233  */
0234 
0235 /* void _CPU_Context_save_fp(
0236  *   void **fp_context_ptr
0237  * );
0238  */
0239 
0240 #if ( CPU_HARDWARE_FP == TRUE )
0241 FRAME(_CPU_Context_save_fp,sp,0,ra)
0242         .set noreorder
0243         .set noat
0244 
0245     /*
0246     ** Make sure the FPU is on before we save state.  This code
0247     ** is here because the FPU context switch might occur when an
0248     ** integer task is switching out with a FP task switching in.
0249     */
0250     mfc0    t0,C0_SR
0251     li  t2,SR_CU1
0252     move    t1,t0
0253     or  t0,t2       /* turn on the fpu */
0254 #if (__mips == 3) || (__mips == 32)
0255     li  t2,SR_IE
0256 #elif __mips == 1
0257     li  t2,SR_IEC
0258 #endif
0259     not t2
0260     and t0,t2       /* turn off interrupts */
0261     mtc0    t0,C0_SR
0262 
0263     lw  a1,(a0)     /* get address of context storage area */
0264     move    t0,ra
0265     jal _CPU_Context_save_fp_from_exception
0266     NOP
0267 
0268     /*
0269     ** Reassert the task's state because we've not saved it yet.
0270     */
0271     mtc0    t1,C0_SR
0272     j   t0
0273     NOP
0274 
0275     .globl _CPU_Context_save_fp_from_exception
0276 _CPU_Context_save_fp_from_exception:
0277         STREGC1 $f0,FP0_OFFSET*F_SZ(a1)
0278         STREGC1 $f1,FP1_OFFSET*F_SZ(a1)
0279         STREGC1 $f2,FP2_OFFSET*F_SZ(a1)
0280         STREGC1 $f3,FP3_OFFSET*F_SZ(a1)
0281         STREGC1 $f4,FP4_OFFSET*F_SZ(a1)
0282         STREGC1 $f5,FP5_OFFSET*F_SZ(a1)
0283         STREGC1 $f6,FP6_OFFSET*F_SZ(a1)
0284         STREGC1 $f7,FP7_OFFSET*F_SZ(a1)
0285         STREGC1 $f8,FP8_OFFSET*F_SZ(a1)
0286         STREGC1 $f9,FP9_OFFSET*F_SZ(a1)
0287         STREGC1 $f10,FP10_OFFSET*F_SZ(a1)
0288         STREGC1 $f11,FP11_OFFSET*F_SZ(a1)
0289         STREGC1 $f12,FP12_OFFSET*F_SZ(a1)
0290         STREGC1 $f13,FP13_OFFSET*F_SZ(a1)
0291         STREGC1 $f14,FP14_OFFSET*F_SZ(a1)
0292         STREGC1 $f15,FP15_OFFSET*F_SZ(a1)
0293         STREGC1 $f16,FP16_OFFSET*F_SZ(a1)
0294         STREGC1 $f17,FP17_OFFSET*F_SZ(a1)
0295         STREGC1 $f18,FP18_OFFSET*F_SZ(a1)
0296         STREGC1 $f19,FP19_OFFSET*F_SZ(a1)
0297         STREGC1 $f20,FP20_OFFSET*F_SZ(a1)
0298         STREGC1 $f21,FP21_OFFSET*F_SZ(a1)
0299         STREGC1 $f22,FP22_OFFSET*F_SZ(a1)
0300         STREGC1 $f23,FP23_OFFSET*F_SZ(a1)
0301         STREGC1 $f24,FP24_OFFSET*F_SZ(a1)
0302         STREGC1 $f25,FP25_OFFSET*F_SZ(a1)
0303         STREGC1 $f26,FP26_OFFSET*F_SZ(a1)
0304         STREGC1 $f27,FP27_OFFSET*F_SZ(a1)
0305         STREGC1 $f28,FP28_OFFSET*F_SZ(a1)
0306         STREGC1 $f29,FP29_OFFSET*F_SZ(a1)
0307         STREGC1 $f30,FP30_OFFSET*F_SZ(a1)
0308         STREGC1 $f31,FP31_OFFSET*F_SZ(a1)
0309         cfc1 a0,$31                    /* Read FP status/conrol reg */
0310         cfc1 a0,$31                    /* Two reads clear pipeline */
0311         NOP
0312         NOP
0313         sw a0, FPCS_OFFSET*F_SZ(a1)    /* Store value to FPCS location */
0314         NOP
0315         j ra
0316         NOP
0317         .set at
0318 ENDFRAME(_CPU_Context_save_fp)
0319 #endif
0320 
0321 /*
0322  *  _CPU_Context_restore_fp_context
0323  *
0324  *  This routine is responsible for restoring the FP context
0325  *  at *fp_context_ptr.  If the point to load the FP context
0326  *  from is changed then the pointer is modified by this routine.
0327  *
0328  *  Sometimes a macro implementation of this is in cpu.h which dereferences
0329  *  the ** and a similarly named routine in this file is passed something
0330  *  like a (Context_Control_fp *).  The general rule on making this decision
0331  *  is to avoid writing assembly language.
0332  */
0333 
0334 /* void _CPU_Context_restore_fp(
0335  *   void **fp_context_ptr
0336  * )
0337  */
0338 
0339 #if ( CPU_HARDWARE_FP == TRUE )
0340 FRAME(_CPU_Context_restore_fp,sp,0,ra)
0341         .set noat
0342         .set noreorder
0343 
0344     /*
0345     ** Make sure the FPU is on before we retrieve state.  This code
0346     ** is here because the FPU context switch might occur when an
0347     ** integer task is switching out with a FP task switching in.
0348     */
0349     mfc0    t0,C0_SR
0350     li  t2,SR_CU1
0351     move    t1,t0
0352     or  t0,t2       /* turn on the fpu */
0353 #if (__mips == 3) || (__mips == 32)
0354     li  t2,SR_IE
0355 #elif __mips == 1
0356     li  t2,SR_IEC
0357 #endif
0358     not t2
0359     and t0,t2       /* turn off interrupts */
0360     mtc0    t0,C0_SR
0361 
0362     lw  a1,(a0)     /* get address of context storage area */
0363     move    t0,ra
0364     jal _CPU_Context_restore_fp_from_exception
0365     NOP
0366 
0367     /*
0368     ** Reassert the old task's state because we've not restored the
0369     ** new one yet.
0370     */
0371     mtc0    t1,C0_SR
0372     j   t0
0373     NOP
0374 
0375     .globl _CPU_Context_restore_fp_from_exception
0376 _CPU_Context_restore_fp_from_exception:
0377         LDREGC1 $f0,FP0_OFFSET*F_SZ(a1)
0378         LDREGC1 $f1,FP1_OFFSET*F_SZ(a1)
0379         LDREGC1 $f2,FP2_OFFSET*F_SZ(a1)
0380         LDREGC1 $f3,FP3_OFFSET*F_SZ(a1)
0381         LDREGC1 $f4,FP4_OFFSET*F_SZ(a1)
0382         LDREGC1 $f5,FP5_OFFSET*F_SZ(a1)
0383         LDREGC1 $f6,FP6_OFFSET*F_SZ(a1)
0384         LDREGC1 $f7,FP7_OFFSET*F_SZ(a1)
0385         LDREGC1 $f8,FP8_OFFSET*F_SZ(a1)
0386         LDREGC1 $f9,FP9_OFFSET*F_SZ(a1)
0387         LDREGC1 $f10,FP10_OFFSET*F_SZ(a1)
0388         LDREGC1 $f11,FP11_OFFSET*F_SZ(a1)
0389         LDREGC1 $f12,FP12_OFFSET*F_SZ(a1)
0390         LDREGC1 $f13,FP13_OFFSET*F_SZ(a1)
0391         LDREGC1 $f14,FP14_OFFSET*F_SZ(a1)
0392         LDREGC1 $f15,FP15_OFFSET*F_SZ(a1)
0393         LDREGC1 $f16,FP16_OFFSET*F_SZ(a1)
0394         LDREGC1 $f17,FP17_OFFSET*F_SZ(a1)
0395         LDREGC1 $f18,FP18_OFFSET*F_SZ(a1)
0396         LDREGC1 $f19,FP19_OFFSET*F_SZ(a1)
0397         LDREGC1 $f20,FP20_OFFSET*F_SZ(a1)
0398         LDREGC1 $f21,FP21_OFFSET*F_SZ(a1)
0399         LDREGC1 $f22,FP22_OFFSET*F_SZ(a1)
0400         LDREGC1 $f23,FP23_OFFSET*F_SZ(a1)
0401         LDREGC1 $f24,FP24_OFFSET*F_SZ(a1)
0402         LDREGC1 $f25,FP25_OFFSET*F_SZ(a1)
0403         LDREGC1 $f26,FP26_OFFSET*F_SZ(a1)
0404         LDREGC1 $f27,FP27_OFFSET*F_SZ(a1)
0405         LDREGC1 $f28,FP28_OFFSET*F_SZ(a1)
0406         LDREGC1 $f29,FP29_OFFSET*F_SZ(a1)
0407         LDREGC1 $f30,FP30_OFFSET*F_SZ(a1)
0408         LDREGC1 $f31,FP31_OFFSET*F_SZ(a1)
0409         cfc1 a0,$31                  /* Read from FP status/control reg */
0410         cfc1 a0,$31                  /* Two reads clear pipeline */
0411         NOP                          /* NOPs ensure execution */
0412         NOP
0413         lw a0,FPCS_OFFSET*F_SZ(a1)   /* Load saved FPCS value */
0414         NOP
0415         ctc1 a0,$31                  /* Restore FPCS register */
0416         NOP
0417         j ra
0418         NOP
0419         .set at
0420 ENDFRAME(_CPU_Context_restore_fp)
0421 #endif
0422 
0423 /*  _CPU_Context_switch
0424  *
0425  *  This routine performs a normal non-FP context switch.
0426  */
0427 
0428 /* void _CPU_Context_switch(
0429  *   Context_Control  *run,
0430  *   Context_Control  *heir
0431  * )
0432  */
0433 
0434 FRAME(_CPU_Context_switch,sp,0,ra)
0435         .set noreorder
0436 
0437         mfc0    t0,C0_SR
0438 #if (__mips == 3) || (__mips == 32)
0439     li  t1,SR_IE
0440 #elif __mips == 1
0441     li  t1,SR_IEC
0442 #endif
0443     STREG   t0,C0_SR_OFFSET*R_SZ(a0)    /* save the task's SR */
0444     not t1
0445         and t0,t1               /* mask off interrupts while we context switch */
0446         mtc0    t0,C0_SR
0447     NOP
0448 
0449         STREG ra,RA_OFFSET*R_SZ(a0)     /* save current context */
0450         STREG sp,SP_OFFSET*R_SZ(a0)
0451         STREG fp,FP_OFFSET*R_SZ(a0)
0452         STREG s0,S0_OFFSET*R_SZ(a0)
0453         STREG s1,S1_OFFSET*R_SZ(a0)
0454         STREG s2,S2_OFFSET*R_SZ(a0)
0455         STREG s3,S3_OFFSET*R_SZ(a0)
0456         STREG s4,S4_OFFSET*R_SZ(a0)
0457         STREG s5,S5_OFFSET*R_SZ(a0)
0458         STREG s6,S6_OFFSET*R_SZ(a0)
0459         STREG s7,S7_OFFSET*R_SZ(a0)
0460 
0461 
0462     /*
0463     ** this code grabs the userspace EPC if we're dispatching from
0464     ** an interrupt frame or supplies the address of the dispatch
0465     ** routines if not.  This is entirely for the gdbstub's benefit so
0466     ** it can know where each task is running.
0467     **
0468     ** Its value is only set when calling threadDispatch from
0469     ** the interrupt handler and is cleared immediately when this
0470     ** routine gets it.
0471     */
0472 
0473     la  t0,__exceptionStackFrame    /* see if we're coming in from an exception */
0474     LDREG   t1, (t0)
0475     NOP
0476     beqz    t1,1f
0477 
0478     STREG   zero, (t0)          /* and clear it */
0479     NOP
0480     LDREG   t0,R_EPC*R_SZ(t1)       /* get the userspace EPC from the frame */
0481     b   2f
0482     NOP
0483 
0484 1:  la      t0,_Thread_Dispatch     /* if ==0, we're switched out */
0485 
0486 2:  STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
0487 
0488 
0489 _CPU_Context_switch_restore:
0490     LDREG ra,RA_OFFSET*R_SZ(a1)     /* restore context */
0491         LDREG sp,SP_OFFSET*R_SZ(a1)
0492         LDREG fp,FP_OFFSET*R_SZ(a1)
0493         LDREG s0,S0_OFFSET*R_SZ(a1)
0494         LDREG s1,S1_OFFSET*R_SZ(a1)
0495         LDREG s2,S2_OFFSET*R_SZ(a1)
0496         LDREG s3,S3_OFFSET*R_SZ(a1)
0497         LDREG s4,S4_OFFSET*R_SZ(a1)
0498         LDREG s5,S5_OFFSET*R_SZ(a1)
0499         LDREG s6,S6_OFFSET*R_SZ(a1)
0500         LDREG s7,S7_OFFSET*R_SZ(a1)
0501 
0502         LDREG t0, C0_SR_OFFSET*R_SZ(a1)
0503 
0504 /*  NOP */
0505 /*#if (__mips == 3) || (__mips == 32) */
0506 /*        andi  t0,SR_EXL */
0507 /*        bnez  t0,_CPU_Context_1 */   /* set exception level from restore context */
0508 /*        li    t0,~SR_EXL */
0509 /*        MFC0  t1,C0_SR */
0510 /*        NOP */
0511 /*        and   t1,t0 */
0512 /*        MTC0  t1,C0_SR */
0513 /* */
0514 /*#elif __mips == 1 */
0515 /* */
0516 /*        andi  t0,(SR_INTERRUPT_ENABLE_BITS) */ /* we know 0 disabled */
0517 /*        beq   t0,$0,_CPU_Context_1  */         /* set level from restore context */
0518 /*        MFC0  t0,C0_SR */
0519 /*        NOP */
0520 /*        or    t0,(SR_INTERRUPT_ENABLE_BITS) */ /* new_sr = old sr with enabled  */
0521 /*        MTC0  t0,C0_SR */                     /* set with enabled */
0522 /*    NOP */
0523 
0524 
0525 /*
0526 ** Incorporate the incoming task's FP coprocessor state and interrupt mask/enable
0527 ** into the status register.  We jump thru the requisite hoops to ensure we
0528 ** maintain all other SR bits as global values.
0529 **
0530 ** Get the task's FPU enable, int mask & int enable bits.  Although we keep the
0531 ** software int enables on a per-task basis, the rtems_task_create
0532 ** Interrupt Level & int level manipulation functions cannot enable/disable them,
0533 ** so they are automatically enabled for all tasks.  To turn them off, a task
0534 ** must itself manipulate the SR register.
0535 **
0536 ** Although something of a hack on this processor, we treat the SR register
0537 ** int enables as the RTEMS interrupt level.  We use the int level
0538 ** value as a bitmask, not as any sort of greater than/less than metric.
0539 ** Manipulation of a task's interrupt level corresponds directly to manipulation
0540 ** of that task's SR bits, as seen in cpu.c
0541 **
0542 ** Note, interrupts are disabled before context is saved, though the task's
0543 ** interrupt enable state is recorded.  The task swapping in will apply its
0544 ** specific SR bits, including interrupt enable.  If further task-specific
0545 ** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
0546 ** cpu.h task initialization code that will be affected.
0547 */
0548 
0549     li  t2,SR_CU1
0550     or  t2,SR_IMASK
0551 
0552     /* int enable bits */
0553 #if (__mips == 3) || (__mips == 32)
0554     /*
0555     ** Save IE
0556     */
0557     or  t2,SR_IE
0558 #elif __mips == 1
0559     /*
0560     ** Save current, previous & old int enables.  This is key because
0561     ** we can dispatch from within the stack frame used by an
0562     ** interrupt service.  The int enables nest, but not beyond
0563     ** previous and old because of the dispatch interlock seen
0564     ** in the interrupt processing code.
0565     */
0566     or  t2,SR_IEC + SR_IEP + SR_IEO
0567 #endif
0568     and t0,t2       /* keep only the per-task bits */
0569 
0570     mfc0    t1,C0_SR    /* grab the current SR */
0571     not t2
0572     and t1,t2       /* mask off the old task's per-task bits */
0573     or  t1,t0       /* or in the new task's bits */
0574         mtc0    t1,C0_SR    /* and load the new SR */
0575     NOP
0576 
0577 /* _CPU_Context_1: */
0578         j   ra
0579         NOP
0580 ENDFRAME(_CPU_Context_switch)
0581 
0582 
0583 /*
0584  *  _CPU_Context_restore
0585  *
0586  *  This routine is generally used only to restart self in an
0587  *  efficient manner.  It may simply be a label in _CPU_Context_switch.
0588  *
0589  *  NOTE: May be unnecessary to reload some registers.
0590  *
0591  *  void _CPU_Context_restore(
0592  *    Context_Control *new_context
0593  *  );
0594  */
0595 
0596 FRAME(_CPU_Context_restore,sp,0,ra)
0597         .set noreorder
0598         move    a1,a0
0599         j   _CPU_Context_switch_restore
0600         NOP
0601 
0602 ENDFRAME(_CPU_Context_restore)
0603 
0604 .extern _Thread_Dispatch
0605 
0606 /*  void _DBG_Handler()
0607  *
0608  *  This routine services the (at least) MIPS1 debug vector,
0609  *  only used the the hardware debugging features.  This code,
0610  *  while optional, is best located here because its intrinsically
0611  *  associated with exceptions in general & thus tied pretty
0612  *  closely to _ISR_Handler.
0613  */
0614 FRAME(_DBG_Handler,sp,0,ra)
0615         .set noreorder
0616     la  k0,_ISR_Handler
0617     j   k0
0618     NOP
0619     .set reorder
0620 ENDFRAME(_DBG_Handler)
0621 
0622 /*  void __ISR_Handler()
0623  *
0624  *  This routine provides the RTEMS interrupt management.
0625  *
0626  *  void _ISR_Handler()
0627  *
0628  *
0629  *  This discussion ignores a lot of the ugly details in a real
0630  *  implementation such as saving enough registers/state to be
0631  *  able to do something real.  Keep in mind that the goal is
0632  *  to invoke a user's ISR handler which is written in C and
0633  *  uses a certain set of registers.
0634  *
0635  *  Also note that the exact order is to a large extent flexible.
0636  *  Hardware will dictate a sequence for a certain subset of
0637  *  _ISR_Handler while requirements for setting
0638  *
0639  *  At entry to "common" _ISR_Handler, the vector number must be
0640  *  available.  On some CPUs the hardware puts either the vector
0641  *  number or the offset into the vector table for this ISR in a
0642  *  known place.  If the hardware does not give us this information,
0643  *  then the assembly portion of RTEMS for this port will contain
0644  *  a set of distinct interrupt entry points which somehow place
0645  *  the vector number in a known place (which is safe if another
0646  *  interrupt nests this one) and branches to _ISR_Handler.
0647  *
0648  */
0649 
0650 FRAME(_ISR_Handler,sp,0,ra)
0651         .set noreorder
0652 
0653         /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
0654 
0655         /* wastes a lot of stack space for context?? */
0656     ADDIU    sp,sp,-EXCP_STACK_SIZE
0657 
0658         STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
0659         STREG v0, R_V0*R_SZ(sp)
0660         STREG v1, R_V1*R_SZ(sp)
0661         STREG a0, R_A0*R_SZ(sp)
0662         STREG a1, R_A1*R_SZ(sp)
0663         STREG a2, R_A2*R_SZ(sp)
0664         STREG a3, R_A3*R_SZ(sp)
0665         STREG t0, R_T0*R_SZ(sp)
0666         STREG t1, R_T1*R_SZ(sp)
0667         STREG t2, R_T2*R_SZ(sp)
0668         STREG t3, R_T3*R_SZ(sp)
0669         STREG t4, R_T4*R_SZ(sp)
0670         STREG t5, R_T5*R_SZ(sp)
0671         STREG t6, R_T6*R_SZ(sp)
0672         STREG t7, R_T7*R_SZ(sp)
0673         mflo  t0
0674         STREG t8, R_T8*R_SZ(sp)
0675         STREG t0, R_MDLO*R_SZ(sp)
0676         STREG t9, R_T9*R_SZ(sp)
0677         mfhi  t0
0678         STREG gp, R_GP*R_SZ(sp)
0679         STREG t0, R_MDHI*R_SZ(sp)
0680         STREG fp, R_FP*R_SZ(sp)
0681 
0682         .set noat
0683         STREG AT, R_AT*R_SZ(sp)
0684         .set at
0685 
0686         mfc0     t0,C0_SR
0687     MFCO     t1,C0_EPC
0688         STREG    t0,R_SR*R_SZ(sp)
0689         STREG    t1,R_EPC*R_SZ(sp)
0690 
0691 
0692 #ifdef INSTRUMENT_EXECUTING_THREAD
0693     lw t2, THREAD_EXECUTING
0694     NOP
0695     sw t2, 0x8001FFF0
0696 #endif
0697 
0698     /* determine if an interrupt generated this exception */
0699 
0700         mfc0     t0,C0_CAUSE
0701     NOP
0702 
0703     and      t1,t0,CAUSE_EXCMASK
0704         beq      t1, 0, _ISR_Handler_1
0705 
0706 _ISR_Handler_Exception:
0707 
0708     /*  If we return from the exception, it is assumed nothing
0709          *  bad is going on and we can continue to run normally.
0710          *  But we want to save the entire CPU context so exception
0711          *  handlers can look at it and change it.
0712          *
0713          *  NOTE: This is the path the debugger stub will take.
0714          */
0715 
0716     /* already got t0 = cause in the interrupt test above */
0717         STREG    t0,R_CAUSE*R_SZ(sp)
0718 
0719         STREG    sp, R_SP*R_SZ(sp)
0720 
0721         STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
0722         STREG    s1,R_S1*R_SZ(sp)
0723         STREG    s2,R_S2*R_SZ(sp)
0724         STREG    s3,R_S3*R_SZ(sp)
0725         STREG    s4,R_S4*R_SZ(sp)
0726         STREG    s5,R_S5*R_SZ(sp)
0727         STREG    s6,R_S6*R_SZ(sp)
0728         STREG    s7,R_S7*R_SZ(sp)
0729 
0730         /* CP0 special registers */
0731 
0732 #if __mips == 1
0733     mfc0     t0,C0_TAR
0734 #endif
0735         MFCO     t1,C0_BADVADDR
0736 
0737 #if __mips == 1
0738         STREG    t0,R_TAR*R_SZ(sp)
0739 #else
0740     NOP
0741 #endif
0742         STREG    t1,R_BADVADDR*R_SZ(sp)
0743 
0744 #if ( CPU_HARDWARE_FP == TRUE )
0745         mfc0     t0,C0_SR                 /* FPU is enabled, save state */
0746     NOP
0747         srl      t0,t0,16
0748         andi     t0,t0,(SR_CU1 >> 16)
0749         beqz     t0, 1f
0750         NOP
0751 
0752         la       a1,R_F0*R_SZ(sp)
0753         jal      _CPU_Context_save_fp_from_exception
0754         NOP
0755         mfc1     t0,C1_REVISION
0756         mfc1     t1,C1_STATUS
0757         STREG    t0,R_FEIR*R_SZ(sp)
0758         STREG    t1,R_FCSR*R_SZ(sp)
0759 
0760 1:
0761 #endif
0762 
0763     move     a0,sp
0764         jal  mips_vector_exceptions
0765     NOP
0766 
0767 
0768     /*
0769     ** Note, if the exception vector returns, rely on it to have
0770     ** adjusted EPC so we will return to some correct address.  If
0771     ** this is not done, we might get stuck in an infinite loop because
0772     ** we'll return to the instruction where the exception occurred and
0773     ** it could throw again.
0774     **
0775     ** It is expected the only code using the exception processing is
0776     ** either the gdb stub or some user code which is either going to
0777     ** panic or do something useful.  Regardless, it is up to each
0778     ** exception routine to properly adjust EPC, so the code below
0779     ** may be helpful for doing just that.
0780     */
0781 
0782 /* *********************************************************************
0783 ** this code follows the R3000's exception return logic, but is not
0784 ** needed because the gdb stub does it for us.  It might be useful
0785 ** for something else at some point...
0786 **
0787     * compute the address of the instruction we'll return to *
0788 
0789     LDREG   t1, R_CAUSE*R_SZ(sp)
0790     LDREG   t0, R_EPC*R_SZ(sp)
0791 
0792     * first see if the exception happened in the delay slot *
0793     li  t3,CAUSE_BD
0794     AND t4,t1,t3
0795     beqz    t4,excnodelay
0796     NOP
0797 
0798     * it did, now see if the branch occurred or not *
0799     li  t3,CAUSE_BT
0800     AND t4,t1,t3
0801     beqz    t4,excnobranch
0802     NOP
0803 
0804     * branch was taken, we resume at the branch target *
0805     LDREG   t0, R_TAR*R_SZ(sp)
0806     j   excreturn
0807     NOP
0808 
0809 excnobranch:
0810     ADDU    t0,R_SZ
0811 
0812 excnodelay:
0813     ADDU    t0,R_SZ
0814 
0815 excreturn:
0816     STREG   t0, R_EPC*R_SZ(sp)
0817     NOP
0818 ********************************************************************* */
0819 
0820 
0821  /* if we're returning into mips_break, move to the next instruction */
0822 
0823         LDREG   t0,R_EPC*R_SZ(sp)
0824     la  t1,mips_break
0825     xor t2,t0,t1
0826     bnez    t2,3f
0827 
0828     addu    t0,R_SZ
0829     STREG   t0,R_EPC*R_SZ(sp)
0830     NOP
0831 3:
0832 
0833 
0834 
0835 
0836 #if ( CPU_HARDWARE_FP == TRUE )
0837         mfc0     t0,C0_SR               /* FPU is enabled, restore state */
0838     NOP
0839         srl      t0,t0,16
0840         andi     t0,t0,(SR_CU1 >> 16)
0841         beqz     t0, 2f
0842         NOP
0843 
0844         la       a1,R_F0*R_SZ(sp)
0845         jal      _CPU_Context_restore_fp_from_exception
0846         NOP
0847         LDREG    t0,R_FEIR*R_SZ(sp)
0848         LDREG    t1,R_FCSR*R_SZ(sp)
0849         mtc1     t0,C1_REVISION
0850         mtc1     t1,C1_STATUS
0851 2:
0852 #endif
0853         LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
0854         LDREG    s1,R_S1*R_SZ(sp)
0855         LDREG    s2,R_S2*R_SZ(sp)
0856         LDREG    s3,R_S3*R_SZ(sp)
0857         LDREG    s4,R_S4*R_SZ(sp)
0858         LDREG    s5,R_S5*R_SZ(sp)
0859         LDREG    s6,R_S6*R_SZ(sp)
0860         LDREG    s7,R_S7*R_SZ(sp)
0861 
0862         /* do NOT restore the sp as this could mess up the world */
0863         /* do NOT restore the cause as this could mess up the world */
0864 
0865     /*
0866     ** Jump all the way out.  If theres a pending interrupt, just
0867     ** let it be serviced later.  Since we're probably using the
0868     ** gdb stub, we've already disrupted the ISR service timing
0869     ** anyhow.  We oughtn't mix exception and interrupt processing
0870     ** in the same exception call in case the exception stuff
0871     ** might interfere with the dispatching & timer ticks.
0872     */
0873     j    _ISR_Handler_exit
0874     NOP
0875 
0876 _ISR_Handler_1:
0877 
0878         mfc0     t1,C0_SR
0879         and      t0,CAUSE_IPMASK
0880         and      t0,t1
0881 
0882         /* external interrupt not enabled, ignore */
0883         /* but if it's not an exception or an interrupt, */
0884         /* Then where did it come from??? */
0885 
0886     beq      t0,zero,_ISR_Handler_exit
0887     NOP
0888 
0889 
0890   /*
0891    *  save some or all context on stack
0892    *  may need to save some special interrupt information for exit
0893    *
0894    *  if ( _ISR_Nest_level == 0 )
0895    *    switch to software interrupt stack
0896    */
0897 
0898 
0899   /*
0900    *  _ISR_Nest_level++;
0901    */
0902         lw  t0,ISR_NEST_LEVEL
0903     NOP
0904         add t0,t0,1
0905         sw  t0,ISR_NEST_LEVEL
0906   /*
0907    *  _Thread_Dispatch_disable_level++;
0908    */
0909         lw  t1,THREAD_DISPATCH_DISABLE_LEVEL
0910     NOP
0911         add t1,t1,1
0912         sw  t1,THREAD_DISPATCH_DISABLE_LEVEL
0913 
0914   /*
0915    *  Call the CPU model or BSP specific routine to decode the
0916    *  interrupt source and actually vector to device ISR handlers.
0917    */
0918 
0919 #ifdef INSTRUMENT_ISR_VECTORING
0920     NOP
0921     li  t1, 1
0922     sw  t1, 0x8001e000
0923 #endif
0924 
0925     move     a0,sp
0926         jal      mips_vector_isr_handlers
0927         NOP
0928 
0929 #ifdef INSTRUMENT_ISR_VECTORING
0930     li  t1, 0
0931     sw  t1, 0x8001e000
0932     NOP
0933 #endif
0934 
0935   /*
0936    *  --_ISR_Nest_level;
0937    */
0938         lw  t2,ISR_NEST_LEVEL
0939     NOP
0940         add t2,t2,-1
0941         sw  t2,ISR_NEST_LEVEL
0942   /*
0943    *  --_Thread_Dispatch_disable_level;
0944    */
0945         lw  t1,THREAD_DISPATCH_DISABLE_LEVEL
0946     NOP
0947         add t1,t1,-1
0948         sw  t1,THREAD_DISPATCH_DISABLE_LEVEL
0949   /*
0950    *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
0951    *    goto the label "exit interrupt (simple case)"
0952    */
0953         or  t0,t2,t1
0954         bne t0,zero,_ISR_Handler_exit
0955         NOP
0956 
0957 
0958   /*
0959    *  restore stack
0960    *
0961    *  if !_Thread_Dispatch_necessary 
0962    *    goto the label "exit interrupt (simple case)"
0963    */
0964         lbu t0,DISPATCH_NEEDED
0965     NOP
0966         or  t0,t0,t0
0967         beq t0,zero,_ISR_Handler_exit
0968         NOP
0969 
0970 
0971 
0972 #ifdef INSTRUMENT_EXECUTING_THREAD
0973     lw  t0,THREAD_EXECUTING
0974     NOP
0975     sw  t0,0x8001FFF4
0976 #endif
0977 
0978 /*
0979 ** Turn on interrupts before entering Thread_Dispatch which
0980 ** will run for a while, thus allowing new interrupts to
0981 ** be serviced.  Observe the Thread_Dispatch_disable_level interlock
0982 ** that prevents recursive entry into Thread_Dispatch.
0983 */
0984 
0985         mfc0    t0, C0_SR
0986 #if __mips == 1
0987 
0988     li  t1,SR_IEC
0989     or  t0, t1
0990 
0991 #elif (__mips == 3) || (__mips == 32)
0992 
0993     /*
0994     ** clear XL and set IE so we can get interrupts.
0995     */
0996     li  t1, SR_EXL
0997     not t1
0998     and t0,t1
0999     or  t0, SR_IE
1000 
1001 #endif
1002         mtc0    t0, C0_SR
1003     NOP
1004 
1005     /* save off our stack frame so the context switcher can get to it */
1006     la  t0,__exceptionStackFrame
1007     STREG   sp,(t0)
1008 
1009         jal     _Thread_Dispatch
1010         NOP
1011 
1012     /*
1013     ** And make sure its clear in case we didn't dispatch.  if we did, its
1014     ** already cleared
1015     */
1016     la  t0,__exceptionStackFrame
1017     STREG   zero,(t0)
1018     NOP
1019 
1020 /*
1021 ** turn interrupts back off while we restore context so
1022 ** a badly timed interrupt won't mess things up
1023 */
1024         mfc0    t0, C0_SR
1025 
1026 #if __mips == 1
1027 
1028     /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
1029     li  t1,SR_IEC | SR_KUP | SR_KUC
1030     not t1
1031     and t0, t1
1032         mtc0    t0, C0_SR
1033     NOP
1034 
1035 #elif (__mips == 3) || (__mips == 32)
1036 
1037     /* make sure EXL and IE are set so ints are disabled & we can update EPC for the return */
1038         li   t1,SR_IE       /* Clear IE first (recommended) */
1039         not  t1
1040         and  t0,t1
1041         mtc0 t0,C0_SR
1042     NOP
1043   
1044     /* apply task's SR with EXL set so the eret will return properly */
1045     or  t0, SR_EXL | SR_IE
1046     mtc0    t0, C0_SR
1047     NOP
1048 
1049     /* store new EPC value, which we can do since EXL=0 */
1050         LDREG   t0, R_EPC*R_SZ(sp)
1051     NOP
1052     MTCO    t0, C0_EPC
1053     NOP
1054  
1055 #endif
1056 
1057 
1058 
1059 
1060 
1061 
1062 #ifdef INSTRUMENT_EXECUTING_THREAD
1063     lw  t0,THREAD_EXECUTING
1064     NOP
1065     sw  t0,0x8001FFF8
1066 #endif
1067 
1068 
1069   /*
1070    *  prepare to get out of interrupt
1071    *  return from interrupt
1072    *
1073    *  LABEL "exit interrupt (simple case):"
1074    *  prepare to get out of interrupt
1075    *  return from interrupt
1076    */
1077 
1078 _ISR_Handler_exit:
1079 /*
1080 ** Skip the SR restore because its a global register. _CPU_Context_switch_restore
1081 ** adjusts it according to each task's configuration.  If we didn't dispatch, the
1082 ** SR value isn't changed, so all we need to do is return.
1083 **
1084 */
1085     /* restore context from stack */
1086 
1087 #ifdef INSTRUMENT_EXECUTING_THREAD
1088     lw  t0,THREAD_EXECUTING
1089     NOP
1090     sw  t0, 0x8001FFFC
1091 #endif
1092 
1093         LDREG t8, R_MDLO*R_SZ(sp)
1094         LDREG t0, R_T0*R_SZ(sp)
1095         mtlo  t8
1096         LDREG t8, R_MDHI*R_SZ(sp)
1097         LDREG t1, R_T1*R_SZ(sp)
1098         mthi  t8
1099         LDREG t2, R_T2*R_SZ(sp)
1100         LDREG t3, R_T3*R_SZ(sp)
1101         LDREG t4, R_T4*R_SZ(sp)
1102         LDREG t5, R_T5*R_SZ(sp)
1103         LDREG t6, R_T6*R_SZ(sp)
1104         LDREG t7, R_T7*R_SZ(sp)
1105         LDREG t8, R_T8*R_SZ(sp)
1106         LDREG t9, R_T9*R_SZ(sp)
1107         LDREG gp, R_GP*R_SZ(sp)
1108         LDREG fp, R_FP*R_SZ(sp)
1109         LDREG ra, R_RA*R_SZ(sp)
1110         LDREG a0, R_A0*R_SZ(sp)
1111         LDREG a1, R_A1*R_SZ(sp)
1112         LDREG a2, R_A2*R_SZ(sp)
1113         LDREG a3, R_A3*R_SZ(sp)
1114         LDREG v1, R_V1*R_SZ(sp)
1115         LDREG v0, R_V0*R_SZ(sp)
1116 
1117 #if __mips == 1
1118     LDREG     k1, R_EPC*R_SZ(sp)
1119 #endif
1120 
1121     .set noat
1122         LDREG     AT, R_AT*R_SZ(sp)
1123         .set at
1124 
1125         ADDIU     sp,sp,EXCP_STACK_SIZE
1126 
1127 #if (__mips == 3) || (__mips == 32)
1128     eret
1129 #elif __mips == 1
1130     j         k1
1131     rfe
1132 #endif
1133         NOP
1134 
1135        .set    reorder
1136 ENDFRAME(_ISR_Handler)
1137 
1138 
1139 FRAME(mips_break,sp,0,ra)
1140         .set noreorder
1141     break   0x0 /* this statement must be first in this function, assumed so by mips-stub.c */
1142     NOP
1143         j   ra
1144         NOP
1145        .set    reorder
1146 ENDFRAME(mips_break)
1147