Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:58

0001 /**
0002  * @file
0003  *
0004  * @ingroup ppc_exc
0005  *
0006  * @brief PowerPC Exceptions implementation.
0007  */
0008 
0009 /*
0010  * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
0011  *                    Canon Centre Recherche France.
0012  *
0013  * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
0014  *
0015  * Copyright (C) 2009, 2012 embedded brains GmbH & Co. KG
0016  *
0017  * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
0018  * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
0019  *
0020  * The license and distribution terms for this file may be
0021  * found in the file LICENSE in this distribution or at
0022  * http://www.rtems.org/license/LICENSE.
0023  */
0024 
0025 #include <rtems.h>
0026 
0027 #include <bsp/vectors.h>
0028 #include <bsp/fatal.h>
0029 
0030 uint32_t ppc_exc_cache_wb_check = 1;
0031 
0032 static void ppc_exc_initialize_booke(void *vector_base)
0033 {
0034   /* Interrupt vector prefix register */
0035   ppc_mtivpr(vector_base);
0036 
0037   if (
0038     ppc_cpu_is_specific_e200(PPC_e200z0)
0039       || ppc_cpu_is_specific_e200(PPC_e200z1)
0040   ) {
0041     /*
0042      * These cores have hard wired IVOR registers.  An access will case a
0043      * program exception.
0044      */
0045     return;
0046   }
0047 
0048   /* Interupt vector offset registers */
0049   ppc_mtivor(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
0050   ppc_mtivor(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
0051   ppc_mtivor(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
0052   ppc_mtivor(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
0053   ppc_mtivor(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
0054   ppc_mtivor(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
0055   ppc_mtivor(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
0056   ppc_mtivor(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
0057   ppc_mtivor(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
0058   ppc_mtivor(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
0059   ppc_mtivor(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
0060   ppc_mtivor(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
0061   ppc_mtivor(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
0062   ppc_mtivor(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
0063   ppc_mtivor(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
0064   ppc_mtivor(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
0065   if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
0066     ppc_mtivor(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
0067     ppc_mtivor(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
0068     ppc_mtivor(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
0069   }
0070   if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
0071     ppc_mtivor(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
0072   }
0073 }
0074 
0075 static void ppc_exc_fatal_error(void)
0076 {
0077   bsp_fatal(PPC_FATAL_EXCEPTION_INITIALIZATION);
0078 }
0079 
0080 void ppc_exc_initialize_with_vector_base(
0081   uintptr_t interrupt_stack_begin,
0082   void *vector_base
0083 )
0084 {
0085   rtems_status_code sc = RTEMS_SUCCESSFUL;
0086   const ppc_exc_categories *const categories = ppc_exc_current_categories();
0087   unsigned vector = 0;
0088   uint32_t sda_base = 0;
0089   uint32_t r13 = 0;
0090 
0091   if (categories == NULL) {
0092     ppc_exc_fatal_error();
0093   }
0094 
0095   /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
0096    * early init code put it there.
0097    */
0098   __asm__ volatile (
0099     "lis %0, _SDA_BASE_@h\n"
0100     "ori %0, %0, _SDA_BASE_@l\n"
0101     "mr  %1, 13\n"
0102     : "=r" (sda_base), "=r"(r13)
0103   );
0104 
0105   if (sda_base != r13) {
0106     ppc_exc_fatal_error();
0107   }
0108 
0109   ppc_exc_initialize_interrupt_stack(interrupt_stack_begin);
0110 
0111 #ifndef PPC_EXC_CONFIG_BOOKE_ONLY
0112 
0113   /* Use current MMU / RI settings when running C exception handlers */
0114   ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
0115 
0116 #ifdef __ALTIVEC__
0117   /* Need vector unit enabled to save/restore altivec context */
0118   ppc_exc_msr_bits |= MSR_VE;
0119 #endif
0120 
0121 #endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
0122 
0123   if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
0124     ppc_exc_initialize_booke(vector_base);
0125   }
0126 
0127   for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
0128     ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
0129 
0130     if (category != PPC_EXC_INVALID) {
0131       void *const vector_address = ppc_exc_vector_address(vector, vector_base);
0132       uint32_t prologue [16];
0133       size_t prologue_size = sizeof(prologue);
0134 
0135       sc = ppc_exc_make_prologue(
0136         vector,
0137         vector_base,
0138         category,
0139         prologue,
0140         &prologue_size
0141       );
0142       if (sc != RTEMS_SUCCESSFUL) {
0143         ppc_exc_fatal_error();
0144       }
0145 
0146       ppc_code_copy(vector_address, prologue, prologue_size);
0147     }
0148   }
0149 
0150 #ifndef PPC_EXC_CONFIG_BOOKE_ONLY
0151   /* If we are on a classic PPC with MSR_DR enabled then
0152    * assert that the mapping for at least this task's
0153    * stack is write-back-caching enabled (see README/CAVEATS)
0154    * Do this only if the cache is physically enabled.
0155    * Since it is not easy to figure that out in a
0156    * generic way we need help from the BSP: BSPs
0157    * which run entirely w/o the cache may set
0158    * ppc_exc_cache_wb_check to zero prior to calling
0159    * this routine.
0160    *
0161    * We run this check only after exception handling is
0162    * initialized so that we have some chance to get
0163    * information printed if it fails.
0164    *
0165    * Note that it is unsafe to ignore this issue; if
0166    * the check fails, do NOT disable it unless caches
0167    * are always physically disabled.
0168    */
0169   if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
0170     /* The size of 63 assumes cache lines are at most 32 bytes */
0171     uint8_t dummy[63];
0172     uintptr_t p = (uintptr_t) dummy;
0173     /* If the dcbz instruction raises an alignment exception
0174      * then the stack is mapped as write-thru or caching-disabled.
0175      * The low-level code is not capable of dealing with this
0176      * ATM.
0177      */
0178     p = (p + 31U) & ~31U;
0179     __asm__ volatile ("dcbz 0, %0"::"b" (p));
0180     /* If we make it thru here then things seem to be OK */
0181   }
0182 #endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
0183 }