Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:23:53

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSBSPsPowerPCShared
0007  *
0008  * @brief General purpose assembler macros, linker command file support and
0009  * some inline functions for direct register access.
0010  */
0011 
0012 /*
0013  * Copyright (C) 2008, 2015 embedded brains GmbH & Co. KG
0014  *
0015  * access function for Device Control Registers inspired by "ppc405common.h"
0016  * from Michael Hamel ADInstruments May 2008
0017  *
0018  * Redistribution and use in source and binary forms, with or without
0019  * modification, are permitted provided that the following conditions
0020  * are met:
0021  * 1. Redistributions of source code must retain the above copyright
0022  *    notice, this list of conditions and the following disclaimer.
0023  * 2. Redistributions in binary form must reproduce the above copyright
0024  *    notice, this list of conditions and the following disclaimer in the
0025  *    documentation and/or other materials provided with the distribution.
0026  *
0027  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0028  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0029  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0030  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0031  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0032  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0033  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0034  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0035  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0036  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0037  * POSSIBILITY OF SUCH DAMAGE.
0038  */
0039 
0040 /**
0041  * @defgroup RTEMSBSPsPowerPCSharedUtility PowerPC Utility Module
0042  *
0043  * @ingroup RTEMSBSPsPowerPCShared
0044  *
0045  * @brief PowerPC Utility Module.
0046  *
0047  * @{
0048  */
0049 
0050 #ifndef __LIBCPU_POWERPC_UTILITY_H
0051 #define __LIBCPU_POWERPC_UTILITY_H
0052 
0053 #if !defined(ASM)
0054   #include <rtems.h>
0055 #endif
0056 
0057 #include <rtems/score/cpu.h>
0058 #include <rtems/powerpc/registers.h>
0059 #include <rtems/powerpc/powerpc.h>
0060 
0061 #ifdef __cplusplus
0062 extern "C" {
0063 #endif
0064 
0065 #if !defined(ASM)
0066 
0067 #include <rtems/bspIo.h>
0068 
0069 #include <libcpu/cpuIdent.h>
0070 
0071 #define LINKER_SYMBOL(sym) extern char sym [];
0072 
0073 /**
0074  * @brief Read one byte from @a src.
0075  */
0076 static inline uint8_t ppc_read_byte(const volatile void *src)
0077 {
0078   uint8_t value;
0079 
0080   __asm__ volatile (
0081     "lbz %0, 0(%1)"
0082     : "=r" (value)
0083     : "b" (src)
0084   );
0085 
0086   return value;
0087 }
0088 
0089 /**
0090  * @brief Read one half word from @a src.
0091  */
0092 static inline uint16_t ppc_read_half_word(const volatile void *src)
0093 {
0094   uint16_t value;
0095 
0096   __asm__ volatile (
0097     "lhz %0, 0(%1)"
0098     : "=r" (value)
0099     : "b" (src)
0100   );
0101 
0102   return value;
0103 }
0104 
0105 /**
0106  * @brief Read one word from @a src.
0107  */
0108 static inline uint32_t ppc_read_word(const volatile void *src)
0109 {
0110   uint32_t value;
0111 
0112   __asm__ volatile (
0113     "lwz %0, 0(%1)"
0114     : "=r" (value)
0115     : "b" (src)
0116   );
0117 
0118   return value;
0119 }
0120 
0121 /**
0122  * @brief Write one byte @a value to @a dest.
0123  */
0124 static inline void ppc_write_byte(uint8_t value, volatile void *dest)
0125 {
0126   __asm__ volatile (
0127     "stb %0, 0(%1)"
0128     :
0129     : "r" (value), "b" (dest)
0130   );
0131 }
0132 
0133 /**
0134  * @brief Write one half word @a value to @a dest.
0135  */
0136 static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
0137 {
0138   __asm__ volatile (
0139     "sth %0, 0(%1)"
0140     :
0141     : "r" (value), "b" (dest)
0142   );
0143 }
0144 
0145 /**
0146  * @brief Write one word @a value to @a dest.
0147  */
0148 static inline void ppc_write_word(uint32_t value, volatile void *dest)
0149 {
0150   __asm__ volatile (
0151     "stw %0, 0(%1)" :
0152     : "r" (value), "b" (dest)
0153   );
0154 }
0155 
0156 
0157 static inline void *ppc_stack_pointer(void)
0158 {
0159   void *sp;
0160 
0161   __asm__ volatile (
0162     "mr %0, 1"
0163     : "=r" (sp)
0164   );
0165 
0166   return sp;
0167 }
0168 
0169 static inline void ppc_set_stack_pointer(void *sp)
0170 {
0171   __asm__ volatile (
0172     "mr 1, %0"
0173     :
0174     : "r" (sp)
0175   );
0176 }
0177 
0178 static inline void *ppc_link_register(void)
0179 {
0180   void *lr;
0181 
0182   __asm__ volatile (
0183     "mflr %0"
0184     : "=r" (lr)
0185   );
0186 
0187   return lr;
0188 }
0189 
0190 static inline void ppc_set_link_register(void *lr)
0191 {
0192   __asm__ volatile (
0193     "mtlr %0"
0194     :
0195     : "r" (lr)
0196   );
0197 }
0198 
0199 static inline uint32_t ppc_machine_state_register(void)
0200 {
0201   uint32_t msr;
0202 
0203   __asm__ volatile (
0204     "mfmsr %0"
0205     : "=r" (msr)
0206   );
0207 
0208   return msr;
0209 }
0210 
0211 static inline void ppc_set_machine_state_register(uint32_t msr)
0212 {
0213   __asm__ volatile (
0214     "mtmsr %0"
0215     :
0216     : "r" (msr)
0217   );
0218 }
0219 
0220 static inline void ppc_synchronize_data(void)
0221 {
0222   RTEMS_COMPILER_MEMORY_BARRIER();
0223 
0224   __asm__ volatile ("sync");
0225 }
0226 
0227 static inline void ppc_light_weight_synchronize(void)
0228 {
0229   RTEMS_COMPILER_MEMORY_BARRIER();
0230 
0231   __asm__ volatile ("lwsync");
0232 }
0233 
0234 static inline void ppc_synchronize_instructions(void)
0235 {
0236   RTEMS_COMPILER_MEMORY_BARRIER();
0237 
0238   __asm__ volatile ("isync");
0239 }
0240 
0241 static inline void ppc_enforce_in_order_execution_of_io(void)
0242 {
0243   RTEMS_COMPILER_MEMORY_BARRIER();
0244 
0245   __asm__ volatile (
0246     ".machine \"push\"\n"
0247     ".machine \"any\"\n"
0248     "eieio\n"
0249     ".machine \"pop\""
0250   );
0251 }
0252 
0253 static inline void ppc_data_cache_block_flush(void *addr)
0254 {
0255   __asm__ volatile (
0256     "dcbf 0, %0"
0257     :
0258     : "r" (addr)
0259     : "memory"
0260   );
0261 }
0262 
0263 static inline void ppc_data_cache_block_flush_2(
0264   void *base,
0265   uintptr_t offset
0266 )
0267 {
0268   __asm__ volatile (
0269     "dcbf %0, %1"
0270     :
0271     : "b" (base), "r" (offset)
0272     : "memory"
0273   );
0274 }
0275 
0276 static inline void ppc_data_cache_block_invalidate(void *addr)
0277 {
0278   __asm__ volatile (
0279     "dcbi 0, %0"
0280     :
0281     : "r" (addr)
0282     : "memory"
0283   );
0284 }
0285 
0286 static inline void ppc_data_cache_block_invalidate_2(
0287   void *base,
0288   uintptr_t offset
0289 )
0290 {
0291   __asm__ volatile (
0292     "dcbi %0, %1"
0293     :
0294     : "b" (base), "r" (offset)
0295     : "memory"
0296   );
0297 }
0298 
0299 static inline void ppc_data_cache_block_store(const void *addr)
0300 {
0301   __asm__ volatile (
0302     "dcbst 0, %0"
0303     :
0304     : "r" (addr)
0305   );
0306 }
0307 
0308 static inline void ppc_data_cache_block_store_2(
0309   const void *base,
0310   uintptr_t offset
0311 )
0312 {
0313   __asm__ volatile (
0314     "dcbst %0, %1"
0315     :
0316     : "b" (base), "r" (offset)
0317   );
0318 }
0319 
0320 static inline void ppc_data_cache_block_touch(const void *addr)
0321 {
0322   __asm__ volatile (
0323     "dcbt 0, %0"
0324     :
0325     : "r" (addr)
0326   );
0327 }
0328 
0329 static inline void ppc_data_cache_block_touch_2(
0330   const void *base,
0331   uintptr_t offset
0332 )
0333 {
0334   __asm__ volatile (
0335     "dcbt %0, %1"
0336     :
0337     : "b" (base), "r" (offset)
0338   );
0339 }
0340 
0341 static inline void ppc_data_cache_block_touch_for_store(const void *addr)
0342 {
0343   __asm__ volatile (
0344     "dcbtst 0, %0"
0345     :
0346     : "r" (addr)
0347   );
0348 }
0349 
0350 static inline void ppc_data_cache_block_touch_for_store_2(
0351   const void *base,
0352   uintptr_t offset
0353 )
0354 {
0355   __asm__ volatile (
0356     "dcbtst %0, %1"
0357     :
0358     : "b" (base), "r" (offset)
0359   );
0360 }
0361 
0362 static inline void ppc_data_cache_block_clear_to_zero(void *addr)
0363 {
0364   __asm__ volatile (
0365     "dcbz 0, %0"
0366     :
0367     : "r" (addr)
0368     : "memory"
0369   );
0370 }
0371 
0372 static inline void ppc_data_cache_block_clear_to_zero_2(
0373   void *base,
0374   uintptr_t offset
0375 )
0376 {
0377   __asm__ volatile (
0378     "dcbz %0, %1"
0379     :
0380     : "b" (base), "r" (offset)
0381     : "memory"
0382   );
0383 }
0384 
0385 static inline void ppc_instruction_cache_block_invalidate(void *addr)
0386 {
0387   __asm__ volatile (
0388     "icbi 0, %0"
0389     :
0390     : "r" (addr)
0391   );
0392 }
0393 
0394 static inline void ppc_instruction_cache_block_invalidate_2(
0395   void *base,
0396   uintptr_t offset
0397 )
0398 {
0399   __asm__ volatile (
0400     "icbi %0, %1"
0401     :
0402     : "b" (base), "r" (offset)
0403   );
0404 }
0405 
0406 /**
0407  * @brief Enables external exceptions.
0408  *
0409  * You can use this function to enable the external exceptions and restore the
0410  * machine state with ppc_external_exceptions_disable() later.
0411  */
0412 static inline uint32_t ppc_external_exceptions_enable(void)
0413 {
0414   uint32_t current_msr;
0415   uint32_t new_msr;
0416 
0417   RTEMS_COMPILER_MEMORY_BARRIER();
0418 
0419   __asm__ volatile (
0420     "mfmsr %0;"
0421     "ori %1, %0, 0x8000;"
0422     "mtmsr %1"
0423     : "=r" (current_msr), "=r" (new_msr)
0424   );
0425 
0426   return current_msr;
0427 }
0428 
0429 /**
0430  * @brief Restores machine state.
0431  *
0432  * @see ppc_external_exceptions_enable()
0433  */
0434 static inline void ppc_external_exceptions_disable(uint32_t msr)
0435 {
0436   ppc_set_machine_state_register(msr);
0437 
0438   RTEMS_COMPILER_MEMORY_BARRIER();
0439 }
0440 
0441 static inline uint32_t ppc_count_leading_zeros(uint32_t value)
0442 {
0443   uint32_t count;
0444 
0445   __asm__ (
0446     "cntlzw %0, %1;"
0447     : "=r" (count)
0448     : "r" (value)
0449   );
0450 
0451   return count;
0452 }
0453 
0454 /*
0455  *  Simple spin delay in microsecond units for device drivers.
0456  *  This is very dependent on the clock speed of the target.
0457  */
0458 
0459 #if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
0460 /* Wonderful bookE doesn't have mftb/mftbu; they only
0461  * define the TBRU/TBRL SPRs so we use these. Luckily,
0462  * we run in supervisory mode so that should work on
0463  * all CPUs. In user mode we'd have a problem...
0464  * 2007/11/30, T.S.
0465  *
0466  * OTOH, PSIM currently lacks support for reading
0467  * SPRs 268/269. You need GDB patch sim/2376 to avoid
0468  * a crash...
0469  * OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
0470  * we NEED a mftb to access the time base.
0471  * 2009/10/30 Th. D.
0472  */
0473 #define CPU_Get_timebase_low( _value ) \
0474     __asm__ volatile( "mftb  %0" : "=r" (_value) )
0475 #else
0476 #define CPU_Get_timebase_low( _value ) \
0477     __asm__ volatile( "mfspr %0,268" : "=r" (_value) )
0478 #endif
0479 
0480 /* Must be provided for rtems_bsp_delay to work */
0481 extern     uint32_t bsp_clicks_per_usec;
0482 
0483 #define rtems_bsp_delay( _microseconds ) \
0484   do { \
0485     uint32_t   start, ticks, now; \
0486     CPU_Get_timebase_low( start ) ; \
0487     ticks = (_microseconds) * bsp_clicks_per_usec; \
0488     do \
0489       CPU_Get_timebase_low( now ) ; \
0490     while (now - start < ticks); \
0491   } while (0)
0492 
0493 #define rtems_bsp_delay_in_bus_cycles( _cycles ) \
0494   do { \
0495     uint32_t   start, now; \
0496     CPU_Get_timebase_low( start ); \
0497     do \
0498       CPU_Get_timebase_low( now ); \
0499     while (now - start < (_cycles)); \
0500   } while (0)
0501 
0502 /*
0503  *  Routines to access the decrementer register
0504  */
0505 
0506 #define PPC_Set_decrementer( _clicks ) \
0507   do { \
0508     __asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
0509   } while (0)
0510 
0511 #define PPC_Get_decrementer( _clicks ) \
0512     __asm__ volatile( "mfdec  %0" : "=r" (_clicks) )
0513 
0514 /*
0515  *  Routines to access the time base register
0516  */
0517 
0518 static inline uint64_t PPC_Get_timebase_register( void )
0519 {
0520   uint32_t tbr_low;
0521   uint32_t tbr_high;
0522   uint32_t tbr_high_old;
0523   uint64_t tbr;
0524 
0525   do {
0526 #if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
0527 /* See comment above (CPU_Get_timebase_low) */
0528     __asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
0529     __asm__ volatile( "mftb  %0" : "=r" (tbr_low));
0530     __asm__ volatile( "mftbu %0" : "=r" (tbr_high));
0531 #else
0532     __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
0533     __asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
0534     __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
0535 #endif
0536   } while ( tbr_high_old != tbr_high );
0537 
0538   tbr = tbr_high;
0539   tbr <<= 32;
0540   tbr |= tbr_low;
0541   return tbr;
0542 }
0543 
0544 static inline  void PPC_Set_timebase_register (uint64_t tbr)
0545 {
0546   uint32_t tbr_low;
0547   uint32_t tbr_high;
0548 
0549   tbr_low = (uint32_t) tbr;
0550   tbr_high = (uint32_t) (tbr >> 32);
0551   __asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
0552   __asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
0553 
0554 }
0555 
0556 static inline uint32_t ppc_decrementer_register(void)
0557 {
0558   uint32_t dec;
0559 
0560   PPC_Get_decrementer(dec);
0561 
0562   return dec;
0563 }
0564 
0565 static inline void ppc_set_decrementer_register(uint32_t dec)
0566 {
0567   PPC_Set_decrementer(dec);
0568 }
0569 
0570 /**
0571  * @brief Preprocessor magic for stringification of @a x.
0572  */
0573 #define PPC_STRINGOF(x) #x
0574 
0575 /**
0576  * @brief Returns the value of the Special Purpose Register with number @a spr.
0577  *
0578  * @note This macro uses a GNU C extension.
0579  */
0580 #define PPC_SPECIAL_PURPOSE_REGISTER(spr, val) \
0581   __asm__ volatile (\
0582     "mfspr %0, " PPC_STRINGOF(spr) \
0583     : "=r" (val) \
0584   )
0585 
0586 /**
0587  * @brief Sets the Special Purpose Register with number @a spr to the value in
0588  * @a val.
0589  */
0590 #define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
0591   do { \
0592     __asm__ volatile (\
0593       "mtspr " PPC_STRINGOF(spr) ", %0" \
0594       : \
0595       : "r" (val) \
0596     ); \
0597   } while (0)
0598 
0599 /**
0600  * @brief Sets in the Special Purpose Register with number @a spr all bits
0601  * which are set in @a bits.
0602  *
0603  * Interrupts are disabled throughout this operation.
0604  */
0605 #define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
0606   do { \
0607     ISR_Level level; \
0608     uint32_t val; \
0609     uint32_t mybits = bits; \
0610     _ISR_Local_disable(level); \
0611     PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
0612     val |= mybits; \
0613     PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
0614     _ISR_Local_enable(level); \
0615   } while (0)
0616 
0617 /**
0618  * @brief Sets in the Special Purpose Register with number @a spr all bits
0619  * which are set in @a bits.  The previous register value will be masked with
0620  * @a mask.
0621  *
0622  * Interrupts are disabled throughout this operation.
0623  */
0624 #define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
0625   do { \
0626     ISR_Level level; \
0627     uint32_t val; \
0628     uint32_t mybits = bits; \
0629     uint32_t mymask = mask; \
0630     _ISR_Local_disable(level); \
0631     PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
0632     val &= ~mymask; \
0633     val |= mybits; \
0634     PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
0635     _ISR_Local_enable(level); \
0636   } while (0)
0637 
0638 /**
0639  * @brief Clears in the Special Purpose Register with number @a spr all bits
0640  * which are set in @a bits.
0641  *
0642  * Interrupts are disabled throughout this operation.
0643  */
0644 #define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
0645   do { \
0646     ISR_Level level; \
0647     uint32_t val; \
0648     uint32_t mybits = bits; \
0649     _ISR_Local_disable(level); \
0650     PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
0651     val &= ~mybits; \
0652     PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
0653     _ISR_Local_enable(level); \
0654   } while (0)
0655 
0656 /**
0657  * @brief Returns the value of the Thread Management Register with number @a tmr.
0658  *
0659  * @note This macro uses a GNU C extension.
0660  */
0661 #define PPC_THREAD_MGMT_REGISTER(tmr) \
0662   ({ \
0663     uint32_t val; \
0664     __asm__ volatile (\
0665       "mftmr %0, " PPC_STRINGOF(tmr) \
0666       : "=r" (val) \
0667     ); \
0668     val;\
0669   } )
0670 
0671 /**
0672  * @brief Sets the Thread Management Register with number @a tmr to the value in
0673  * @a val.
0674  */
0675 #define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
0676   do { \
0677     __asm__ volatile (\
0678       "mttmr " PPC_STRINGOF(tmr) ", %0" \
0679       : \
0680       : "r" (val) \
0681     ); \
0682   } while (0)
0683 
0684 /**
0685  * @brief Returns the value of the Device Control Register with number @a dcr.
0686  *
0687  * The PowerPC 4XX family has Device Control Registers.
0688  *
0689  * @note This macro uses a GNU C extension.
0690  */
0691 #define PPC_DEVICE_CONTROL_REGISTER(dcr) \
0692   ({ \
0693     uint32_t val; \
0694     __asm__ volatile (\
0695       "mfdcr %0, " PPC_STRINGOF(dcr) \
0696       : "=r" (val) \
0697     ); \
0698     val;\
0699   } )
0700 
0701 /**
0702  * @brief Sets the Device Control Register with number @a dcr to the value in
0703  * @a val.
0704  *
0705  * The PowerPC 4XX family has Device Control Registers.
0706  */
0707 #define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
0708   do { \
0709     __asm__ volatile (\
0710       "mtdcr " PPC_STRINGOF(dcr) ", %0" \
0711       : \
0712       : "r" (val) \
0713     ); \
0714   } while (0)
0715 
0716 /**
0717  * @brief Sets in the Device Control Register with number @a dcr all bits
0718  * which are set in @a bits.
0719  *
0720  * Interrupts are disabled throughout this operation.
0721  */
0722 #define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
0723   do { \
0724     ISR_Level level; \
0725     uint32_t val; \
0726     uint32_t mybits = bits; \
0727     _ISR_Local_disable(level); \
0728     val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
0729     val |= mybits; \
0730     PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
0731     _ISR_Local_enable(level); \
0732   } while (0)
0733 
0734 /**
0735  * @brief Sets in the Device Control Register with number @a dcr all bits
0736  * which are set in @a bits.  The previous register value will be masked with
0737  * @a mask.
0738  *
0739  * Interrupts are disabled throughout this operation.
0740  */
0741 #define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
0742   do { \
0743     ISR_Level level; \
0744     uint32_t val; \
0745     uint32_t mybits = bits; \
0746     uint32_t mymask = mask; \
0747     _ISR_Local_disable(level); \
0748     val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
0749     val &= ~mymask; \
0750     val |= mybits; \
0751     PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
0752     _ISR_Local_enable(level); \
0753   } while (0)
0754 
0755 /**
0756  * @brief Clears in the Device Control Register with number @a dcr all bits
0757  * which are set in @a bits.
0758  *
0759  * Interrupts are disabled throughout this operation.
0760  */
0761 #define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
0762   do { \
0763     ISR_Level level; \
0764     uint32_t val; \
0765     uint32_t mybits = bits; \
0766     _ISR_Local_disable(level); \
0767     val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
0768     val &= ~mybits; \
0769     PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
0770     _ISR_Local_enable(level); \
0771   } while (0)
0772 
0773 static inline uint32_t ppc_time_base(void)
0774 {
0775   uint32_t val;
0776 
0777   CPU_Get_timebase_low(val);
0778 
0779   return val;
0780 }
0781 
0782 static inline void ppc_set_time_base(uint32_t val)
0783 {
0784   PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWL, val);
0785 }
0786 
0787 static inline uint32_t ppc_time_base_upper(void)
0788 {
0789   uint32_t val;
0790   PPC_SPECIAL_PURPOSE_REGISTER(TBRU, val);
0791   return val;
0792 }
0793 
0794 static inline void ppc_set_time_base_upper(uint32_t val)
0795 {
0796   PPC_SET_SPECIAL_PURPOSE_REGISTER(TBWU, val);
0797 }
0798 
0799 static inline uint64_t ppc_time_base_64(void)
0800 {
0801   return PPC_Get_timebase_register();
0802 }
0803 
0804 static inline void ppc_set_time_base_64(uint64_t val)
0805 {
0806   PPC_Set_timebase_register(val);
0807 }
0808 
0809 static inline uint32_t ppc_alternate_time_base(void)
0810 {
0811   uint32_t val;
0812   PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL, val);
0813   return val;
0814 }
0815 
0816 static inline uint32_t ppc_alternate_time_base_upper(void)
0817 {
0818   uint32_t val;
0819   PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU, val);
0820   return val;
0821 }
0822 
0823 static inline uint64_t ppc_alternate_time_base_64(void)
0824 {
0825   uint32_t atbl;
0826   uint32_t atbu_0;
0827   uint32_t atbu_1;
0828 
0829   do {
0830     atbu_0 = ppc_alternate_time_base_upper();
0831     atbl = ppc_alternate_time_base();
0832     atbu_1 = ppc_alternate_time_base_upper();
0833   } while (atbu_0 != atbu_1);
0834 
0835   return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
0836 }
0837 
0838 static inline uint32_t ppc_processor_id(void)
0839 {
0840   uint32_t val;
0841   PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
0842   return val;
0843 }
0844 
0845 static inline void ppc_set_processor_id(uint32_t val)
0846 {
0847   PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
0848 }
0849 
0850 static inline uint32_t ppc_fsl_system_version(void)
0851 {
0852   uint32_t val;
0853   PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR, val);
0854   return val;
0855 }
0856 
0857 static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
0858 {
0859   return (svr >> 28) & 0xf;
0860 }
0861 
0862 static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
0863 {
0864   return (svr >> 16) & 0xfff;
0865 }
0866 
0867 static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
0868 {
0869   return (svr >> 12) & 0xf;
0870 }
0871 
0872 static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
0873 {
0874   return (svr >> 8) & 0xf;
0875 }
0876 
0877 static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
0878 {
0879   return (svr >> 4) & 0xf;
0880 }
0881 
0882 static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
0883 {
0884   return (svr >> 0) & 0xf;
0885 }
0886 
0887 static inline void ppc_msync(void)
0888 {
0889   __asm__ volatile (
0890     "msync"
0891     :
0892     :
0893     : "memory"
0894   );
0895 }
0896 
0897 static inline void ppc_tlbre(void)
0898 {
0899   __asm__ volatile (
0900     "tlbre"
0901     :
0902     :
0903     : "memory"
0904   );
0905 }
0906 
0907 static inline void ppc_tlbwe(void)
0908 {
0909   __asm__ volatile (
0910     "tlbwe"
0911     :
0912     :
0913     : "memory"
0914   );
0915 }
0916 
0917 static inline void ppc_tlbsx(void *addr)
0918 {
0919   __asm__ volatile (
0920     "tlbsx 0, %0"
0921     :
0922     : "r" (addr)
0923     : "memory"
0924   );
0925 }
0926 
0927 static inline void ppc_mtivpr(void *prefix)
0928 {
0929   __asm__ volatile (
0930     "mtivpr %0"
0931     :
0932     : "r" (prefix)
0933   );
0934 }
0935 
0936 #define ppc_mtivor(x, vec) __asm__ volatile ( \
0937     "mtivor" RTEMS_XSTRING(x) " %0" \
0938     : \
0939     : "r" (vec) \
0940   )
0941 
0942 void ppc_code_copy(void *dest, const void *src, size_t n);
0943 
0944 /* FIXME: Do not use this function */
0945 void printBAT(int bat, uint32_t upper, uint32_t lower);
0946 
0947 /* FIXME: Do not use this function */
0948 void ShowBATS(void);
0949 
0950 #endif /* ifndef ASM */
0951 
0952 #if defined(ASM)
0953 #include <rtems/asm.h>
0954 
0955 .macro LA reg, addr
0956 #if defined(__powerpc64__)
0957     lis \reg, (\addr)@highest
0958     ori \reg, \reg, (\addr)@higher
0959     rldicr  \reg, \reg, 32, 31
0960     oris    \reg, \reg, (\addr)@h
0961     ori \reg, \reg, (\addr)@l
0962 #else
0963     lis \reg, (\addr)@h
0964     ori \reg, \reg, (\addr)@l
0965 #endif
0966 .endm
0967 
0968 .macro LA32 reg, addr
0969     lis \reg, (\addr)@h
0970     ori \reg, \reg, (\addr)@l
0971 .endm
0972 
0973 .macro LWI reg, value
0974     lis \reg, (\value)@h
0975     ori \reg, \reg, (\value)@l
0976 .endm
0977 
0978 .macro LW reg, addr
0979     lis \reg, \addr@ha
0980     lwz \reg, \addr@l(\reg)
0981 .endm
0982 
0983 /*
0984  * Tests the bits in reg1 against the bits set in mask.  A match is indicated
0985  * by EQ = 0 in CR0.  A mismatch is indicated by EQ = 1 in CR0.  The register
0986  * reg2 is used to load the mask.
0987  */
0988 .macro  TSTBITS reg1, reg2, mask
0989     LWI     \reg2, \mask
0990     and \reg1, \reg1, \reg2
0991     cmplw   \reg1, \reg2
0992 .endm
0993 
0994 .macro  SETBITS reg1, reg2, mask
0995     LWI     \reg2, \mask
0996     or  \reg1, \reg1, \reg2
0997 .endm
0998 
0999 .macro  CLRBITS reg1, reg2, mask
1000     LWI     \reg2, \mask
1001     andc    \reg1, \reg1, \reg2
1002 .endm
1003 
1004 .macro GLOBAL_FUNCTION name
1005     .global \name
1006     .type \name, @function
1007 \name:
1008 .endm
1009 
1010 /*
1011  * Obtain interrupt mask
1012  */
1013 .macro GET_INTERRUPT_MASK mask
1014     lis \mask, _PPC_INTERRUPT_DISABLE_MASK@h
1015     ori \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
1016 .endm
1017 
1018 /*
1019  * Disables all asynchronous exeptions (interrupts) which may cause a context
1020  * switch.
1021  */
1022 .macro INTERRUPT_DISABLE level, mask
1023     mfmsr   \level
1024     GET_INTERRUPT_MASK mask=\mask
1025     andc    \mask, \level, \mask
1026     mtmsr   \mask
1027 .endm
1028 
1029 /*
1030  * Restore previous machine state.
1031  */
1032 .macro INTERRUPT_ENABLE level
1033     mtmsr   \level
1034 .endm
1035 
1036 .macro SET_SELF_CPU_CONTROL reg_0, reg_1
1037 #if defined(RTEMS_SMP)
1038     /* Use Book E Processor ID Register (PIR) */
1039     mfspr   \reg_0, 286
1040     slwi    \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
1041 #if defined(__powerpc64__)
1042     LA  \reg_1, _Per_CPU_Information
1043     add \reg_0, \reg_0, \reg_1
1044 #else
1045     addis   \reg_0, \reg_0, _Per_CPU_Information@ha
1046     addi    \reg_0, \reg_0, _Per_CPU_Information@l
1047 #endif
1048     mtspr   PPC_PER_CPU_CONTROL_REGISTER, \reg_0
1049 #endif
1050 .endm
1051 
1052 .macro GET_SELF_CPU_CONTROL reg
1053 #if defined(RTEMS_SMP)
1054     mfspr   \reg, PPC_PER_CPU_CONTROL_REGISTER
1055 #else
1056     lis \reg, _Per_CPU_Information@h
1057     ori \reg, \reg, _Per_CPU_Information@l
1058 #endif
1059 .endm
1060 
1061 .macro SHIFT_RIGHT_IMMEDIATE rd, rs, imm
1062 #if defined(__powerpc64__)
1063     srdi    \rd, \rs, \imm
1064 #else
1065     srwi    \rd, \rs, \imm
1066 #endif
1067 .endm
1068 
1069 .macro COMPARE_LOGICAL cr, ra, rb
1070 #if defined(__powerpc64__)
1071     cmpld   \cr, \ra, \rb
1072 #else
1073     cmplw   \cr, \ra, \rb
1074 #endif
1075 .endm
1076 
1077 .macro CLEAR_RIGHT_IMMEDIATE rd, rs, imm
1078 #if defined(__powerpc64__)
1079     clrrdi  \rd, \rs, \imm
1080 #else
1081     clrrwi  \rd, \rs, \imm
1082 #endif
1083 .endm
1084 
1085 #define LINKER_SYMBOL(sym) .extern sym
1086 
1087 #endif /* ASM */
1088 
1089 #ifdef __cplusplus
1090 }
1091 #endif
1092 
1093 /** @} */
1094 
1095 #endif /* __LIBCPU_POWERPC_UTILITY_H */