![]() |
|
|||
File indexing completed on 2025-05-11 08:24:26
0001 /* SPDX-License-Identifier: BSD-2-Clause */ 0002 0003 /** 0004 * @file 0005 * 0006 * @brief SPARC64 CPU Dependent Source 0007 */ 0008 0009 /* 0010 * COPYRIGHT (c) 1989-2007. On-Line Applications Research Corporation (OAR). 0011 * 0012 * This file is based on the SPARC cpu.c file. Modifications are made to 0013 * provide support for the SPARC-v9. 0014 * COPYRIGHT (c) 2010. Gedare Bloom. 0015 * 0016 * Redistribution and use in source and binary forms, with or without 0017 * modification, are permitted provided that the following conditions 0018 * are met: 0019 * 1. Redistributions of source code must retain the above copyright 0020 * notice, this list of conditions and the following disclaimer. 0021 * 2. Redistributions in binary form must reproduce the above copyright 0022 * notice, this list of conditions and the following disclaimer in the 0023 * documentation and/or other materials provided with the distribution. 0024 * 0025 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 0026 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 0027 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 0028 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 0029 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 0030 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 0031 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 0032 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 0033 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 0034 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 0035 * POSSIBILITY OF SUCH DAMAGE. 0036 */ 0037 0038 #include <rtems/score/cpuimpl.h> 0039 #include <rtems/score/isr.h> 0040 #include <rtems/score/tls.h> 0041 #include <rtems/rtems/cache.h> 0042 0043 #if (SPARC_HAS_FPU == 1) 0044 Context_Control_fp _CPU_Null_fp_context; 0045 #endif 0046 0047 volatile uint32_t _CPU_ISR_Dispatch_disable; 0048 0049 /* 0050 * _CPU_Initialize 0051 * 0052 * This routine performs processor dependent initialization. 0053 * 0054 * INPUT PARAMETERS: NONE 0055 * 0056 * Output Parameters: NONE 0057 * 0058 * NOTE: There is no need to save the pointer to the thread dispatch routine. 0059 * The SPARC's assembly code can reference it directly with no problems. 0060 */ 0061 0062 void _CPU_Initialize(void) 0063 { 0064 #if (SPARC_HAS_FPU == 1) 0065 Context_Control_fp *pointer; 0066 0067 /* 0068 * This seems to be the most appropriate way to obtain an initial 0069 * FP context on the SPARC. The NULL fp context is copied in to 0070 * the task's FP context during Context_Initialize_fp. 0071 */ 0072 0073 pointer = &_CPU_Null_fp_context; 0074 _CPU_Context_save_fp( &pointer ); 0075 0076 #endif 0077 0078 /* 0079 * Since no tasks have been created yet and no interrupts have occurred, 0080 * there is no way that the currently executing thread can have an 0081 * interrupt stack frame on its stack. 0082 */ 0083 _CPU_ISR_Dispatch_disable = 0; 0084 } 0085 0086 void _CPU_Context_Initialize( 0087 Context_Control *the_context, 0088 void *stack_base, 0089 uint32_t size, 0090 uint32_t new_level, 0091 void *entry_point, 0092 bool is_fp, 0093 void *tls_area 0094 ) 0095 { 0096 uint64_t stack_high; /* highest "stack aligned" address */ 0097 0098 /* 0099 * On CPUs with stacks which grow down (i.e. SPARC), we build the stack 0100 * based on the stack_high address. 0101 */ 0102 0103 stack_high = ((uint64_t)(stack_base) + size); 0104 stack_high &= ~(CPU_STACK_ALIGNMENT - 1); 0105 0106 /* 0107 * See the README in this directory for a diagram of the stack. 0108 */ 0109 0110 the_context->o7 = ((uint64_t) entry_point) - 8; 0111 the_context->o6_sp = stack_high - SPARC64_MINIMUM_STACK_FRAME_SIZE - STACK_BIAS; 0112 the_context->i6_fp = 0; 0113 0114 /* ABI uses g4 as segment register, make sure it is zeroed */ 0115 the_context->g4 = 0; 0116 0117 /* PSTATE used to be built here, but is no longer included in context */ 0118 0119 /* 0120 * Since THIS thread is being created, there is no way that THIS 0121 * thread can have an interrupt stack frame on its stack. 0122 */ 0123 the_context->isr_dispatch_disable = 0; 0124 0125 if ( tls_area != NULL ) { 0126 void *tcb = _TLS_Initialize_area( tls_area ); 0127 0128 the_context->g7 = (uintptr_t) tcb; 0129 } 0130 } 0131 0132 /* 0133 * This initializes the set of opcodes placed in each trap 0134 * table entry. The routine which installs a handler is responsible 0135 * for filling in the fields for the _handler address and the _vector 0136 * trap type. 0137 * 0138 * The constants following this structure are masks for the fields which 0139 * must be filled in when the handler is installed. 0140 */ 0141 0142 /* 64-bit registers complicate this. Also, in sparc v9, 0143 * each trap level gets its own set of global registers, but 0144 * does not get its own dedicated register window. so we avoid 0145 * using the local registers in the trap handler. 0146 */ 0147 const CPU_Trap_table_entry _CPU_Trap_slot_template = { 0148 0x89508000, /* rdpr %tstate, %g4 */ 0149 0x05000000, /* sethi %hh(_handler), %g2 */ 0150 0x8410a000, /* or %g2, %hm(_handler), %g2 */ 0151 0x8528b020, /* sllx %g2, 32, %g2 */ 0152 0x07000000, /* sethi %hi(_handler), %g3 */ 0153 0x8610c002, /* or %g3, %g2, %g3 */ 0154 0x81c0e000, /* jmp %g3 + %lo(_handler) */ 0155 0x84102000 /* mov _vector, %g2 */ 0156 }; 0157 0158 0159 /* 0160 * _CPU_ISR_Get_level 0161 * 0162 * Input Parameters: NONE 0163 * 0164 * Output Parameters: 0165 * returns the current interrupt level (PIL field of the PSR) 0166 */ 0167 uint32_t _CPU_ISR_Get_level( void ) 0168 { 0169 uint32_t level; 0170 0171 sparc64_get_interrupt_level( level ); 0172 0173 return level; 0174 } 0175 0176 /* 0177 * _CPU_ISR_install_raw_handler 0178 * 0179 * This routine installs the specified handler as a "raw" non-executive 0180 * supported trap handler (a.k.a. interrupt service routine). 0181 * 0182 * Input Parameters: 0183 * vector - trap table entry number plus synchronous 0184 * vs. asynchronous information 0185 * new_handler - address of the handler to be installed 0186 * old_handler - pointer to an address of the handler previously installed 0187 * 0188 * Output Parameters: NONE 0189 * *new_handler - address of the handler previously installed 0190 * 0191 * NOTE: 0192 * 0193 * On the SPARC v9, there are really only 512 vectors. However, the executive 0194 * has no easy, fast, reliable way to determine which traps are synchronous 0195 * and which are asynchronous. By default, traps return to the 0196 * instruction which caused the interrupt. So if you install a software 0197 * trap handler as an executive interrupt handler (which is desirable since 0198 * RTEMS takes care of window and register issues), then the executive needs 0199 * to know that the return address is to the trap rather than the instruction 0200 * following the trap. 0201 * 0202 * So vectors 0 through 511 are treated as regular asynchronous traps which 0203 * provide the "correct" return address. Vectors 512 through 1023 are assumed 0204 * by the executive to be synchronous and to require that the return be to the 0205 * trapping instruction. 0206 * 0207 * If you use this mechanism to install a trap handler which must reexecute 0208 * the instruction which caused the trap, then it should be installed as 0209 * a synchronous trap. This will avoid the executive changing the return 0210 * address. 0211 */ 0212 void _CPU_ISR_install_raw_handler( 0213 uint32_t vector, 0214 CPU_ISR_raw_handler new_handler, 0215 CPU_ISR_raw_handler *old_handler 0216 ) 0217 { 0218 uint32_t real_vector; 0219 CPU_Trap_table_entry *tba; 0220 CPU_Trap_table_entry *slot; 0221 uint64_t u64_tba; 0222 uint64_t u64_handler; 0223 0224 /* 0225 * Get the "real" trap number for this vector ignoring the synchronous 0226 * versus asynchronous indicator included with our vector numbers. 0227 */ 0228 0229 real_vector = SPARC_REAL_TRAP_NUMBER( vector ); 0230 0231 /* 0232 * Get the current base address of the trap table and calculate a pointer 0233 * to the slot we are interested in. 0234 */ 0235 0236 sparc64_get_tba( u64_tba ); 0237 0238 /* u32_tbr &= 0xfffff000; */ 0239 u64_tba &= 0xffffffffffff8000; /* keep only trap base address */ 0240 0241 tba = (CPU_Trap_table_entry *) u64_tba; 0242 0243 /* use array indexing to fill in lower bits -- require 0244 * CPU_Trap_table_entry to be full-sized. */ 0245 slot = &tba[ real_vector ]; 0246 0247 /* 0248 * Get the address of the old_handler from the trap table. 0249 * 0250 * NOTE: The old_handler returned will be bogus if it does not follow 0251 * the RTEMS model. 0252 */ 0253 0254 /* shift amount to shift of hi bits (31:10) */ 0255 #define HI_BITS_SHIFT 10 0256 0257 /* shift amount of hm bits (41:32) */ 0258 #define HM_BITS_SHIFT 32 0259 0260 /* shift amount of hh bits (63:42) */ 0261 #define HH_BITS_SHIFT 42 0262 0263 /* We're only interested in bits 0-9 of the immediate field*/ 0264 #define IMM_MASK 0x000003FF 0265 0266 if ( slot->rdpr_tstate_g4 == _CPU_Trap_slot_template.rdpr_tstate_g4 ) { 0267 u64_handler = 0268 (((uint64_t)((slot->sethi_of_hh_handler_to_g2 << HI_BITS_SHIFT) | 0269 (slot->or_g2_hm_handler_to_g2 & IMM_MASK))) << HM_BITS_SHIFT) | 0270 ((slot->sethi_of_handler_to_g3 << HI_BITS_SHIFT) | 0271 (slot->jmp_to_low_of_handler_plus_g3 & IMM_MASK)); 0272 *old_handler = (CPU_ISR_raw_handler) u64_handler; 0273 } else 0274 *old_handler = 0; 0275 0276 /* 0277 * Copy the template to the slot and then fix it. 0278 */ 0279 0280 *slot = _CPU_Trap_slot_template; 0281 0282 u64_handler = (uint64_t) new_handler; 0283 0284 /* mask for extracting %hh */ 0285 #define HH_BITS_MASK 0xFFFFFC0000000000 0286 0287 /* mask for extracting %hm */ 0288 #define HM_BITS_MASK 0x000003FF00000000 0289 0290 /* mask for extracting %hi */ 0291 #define HI_BITS_MASK 0x00000000FFFFFC00 0292 0293 /* mask for extracting %lo */ 0294 #define LO_BITS_MASK 0x00000000000003FF 0295 0296 0297 slot->mov_vector_g2 |= vector; 0298 slot->sethi_of_hh_handler_to_g2 |= 0299 (u64_handler & HH_BITS_MASK) >> HH_BITS_SHIFT; 0300 slot->or_g2_hm_handler_to_g2 |= 0301 (u64_handler & HM_BITS_MASK) >> HM_BITS_SHIFT; 0302 slot->sethi_of_handler_to_g3 |= 0303 (u64_handler & HI_BITS_MASK) >> HI_BITS_SHIFT; 0304 slot->jmp_to_low_of_handler_plus_g3 |= (u64_handler & LO_BITS_MASK); 0305 0306 /* need to flush icache after this !!! */ 0307 0308 /* need to flush icache in case old trap handler is in cache */ 0309 rtems_cache_invalidate_entire_instruction(); 0310 0311 } 0312 0313 /* 0314 * _CPU_ISR_install_vector 0315 * 0316 * This kernel routine installs the RTEMS handler for the 0317 * specified vector. 0318 * 0319 * Input parameters: 0320 * vector - interrupt vector number 0321 * new_handler - replacement ISR for this vector number 0322 * old_handler - pointer to former ISR for this vector number 0323 * 0324 * Output parameters: 0325 * *old_handler - former ISR for this vector number 0326 */ 0327 void _CPU_ISR_install_vector( 0328 uint32_t vector, 0329 CPU_ISR_handler new_handler, 0330 CPU_ISR_handler *old_handler 0331 ) 0332 { 0333 uint64_t real_vector; 0334 CPU_ISR_raw_handler ignored; 0335 0336 /* 0337 * Get the "real" trap number for this vector ignoring the synchronous 0338 * versus asynchronous indicator included with our vector numbers. 0339 */ 0340 real_vector = SPARC_REAL_TRAP_NUMBER( vector ); 0341 /* 0342 * Return the previous ISR handler. 0343 */ 0344 0345 *old_handler = _ISR_Vector_table[ vector ]; 0346 0347 /* 0348 * Install the wrapper so this ISR can be invoked properly. 0349 */ 0350 0351 _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored ); 0352 0353 /* 0354 * We put the actual user ISR address in '_ISR_vector_table'. This will 0355 * be used by the _ISR_Handler so the user gets control. 0356 */ 0357 0358 _ISR_Vector_table[ real_vector ] = new_handler; 0359 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.3.7 LXR engine. The LXR team |
![]() ![]() |