Back to home page

LXR

 
 

    


File indexing completed on 2025-05-11 08:24:23

0001 /* SPDX-License-Identifier: BSD-2-Clause */
0002 
0003 /**
0004  * @file
0005  *
0006  * @ingroup RTEMSScoreCPUAArch64
0007  *
0008  * @brief AArch64 architecture support implementation.
0009  */
0010 
0011 /*
0012  * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
0013  * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
0014  *
0015  * Redistribution and use in source and binary forms, with or without
0016  * modification, are permitted provided that the following conditions
0017  * are met:
0018  * 1. Redistributions of source code must retain the above copyright
0019  *    notice, this list of conditions and the following disclaimer.
0020  * 2. Redistributions in binary form must reproduce the above copyright
0021  *    notice, this list of conditions and the following disclaimer in the
0022  *    documentation and/or other materials provided with the distribution.
0023  *
0024  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0025  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0026  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0027  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0028  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0029  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0030  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0031  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0032  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0033  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0034  * POSSIBILITY OF SUCH DAMAGE.
0035  */
0036 
0037 #ifdef HAVE_CONFIG_H
0038 #include "config.h"
0039 #endif
0040 
0041 #include <rtems/score/cpuimpl.h>
0042 #include <rtems/score/thread.h>
0043 #include <rtems/score/tls.h>
0044 
0045 #ifdef AARCH64_MULTILIB_VFP
0046   RTEMS_STATIC_ASSERT(
0047     offsetof( Context_Control, register_d8 )
0048       == AARCH64_CONTEXT_CONTROL_D8_OFFSET,
0049     AARCH64_CONTEXT_CONTROL_D8_OFFSET
0050   );
0051 #endif
0052 
0053   RTEMS_STATIC_ASSERT(
0054     offsetof( Context_Control, thread_id )
0055       == AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET,
0056     AARCH64_CONTEXT_CONTROL_THREAD_ID_OFFSET
0057   );
0058 
0059   RTEMS_STATIC_ASSERT(
0060     offsetof( Context_Control, isr_dispatch_disable )
0061       == AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE,
0062     AARCH64_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE
0063   );
0064 
0065 #ifdef RTEMS_SMP
0066   RTEMS_STATIC_ASSERT(
0067     offsetof( Context_Control, is_executing )
0068       == AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET,
0069     AARCH64_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
0070   );
0071 #endif
0072 
0073 RTEMS_STATIC_ASSERT(
0074   sizeof( CPU_Exception_frame ) == AARCH64_EXCEPTION_FRAME_SIZE,
0075   AARCH64_EXCEPTION_FRAME_SIZE
0076 );
0077 
0078 RTEMS_STATIC_ASSERT(
0079   sizeof( CPU_Exception_frame ) % CPU_STACK_ALIGNMENT == 0,
0080   CPU_Exception_frame_alignment
0081 );
0082 
0083 RTEMS_STATIC_ASSERT(
0084   offsetof( CPU_Exception_frame, register_sp )
0085     == AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET,
0086   AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET
0087 );
0088 
0089 RTEMS_STATIC_ASSERT(
0090   offsetof( CPU_Exception_frame, register_lr )
0091     == AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET,
0092   AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET
0093 );
0094 
0095 RTEMS_STATIC_ASSERT(
0096   offsetof( CPU_Exception_frame, register_daif )
0097     == AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET,
0098   AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET
0099 );
0100 
0101 RTEMS_STATIC_ASSERT(
0102   offsetof( CPU_Exception_frame, register_syndrome )
0103     == AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET,
0104   AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET
0105 );
0106 
0107 RTEMS_STATIC_ASSERT(
0108   offsetof( CPU_Exception_frame, vector )
0109     == AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET,
0110   AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET
0111 );
0112 
0113 RTEMS_STATIC_ASSERT(
0114   offsetof( CPU_Exception_frame, register_fpsr )
0115     == AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET,
0116   AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET
0117 );
0118 
0119 RTEMS_STATIC_ASSERT(
0120   offsetof( CPU_Exception_frame, register_q0 )
0121     == AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET,
0122   AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET
0123 );
0124 
0125 
0126 void _CPU_Context_Initialize(
0127   Context_Control *the_context,
0128   void *stack_area_begin,
0129   size_t stack_area_size,
0130   uint64_t new_level,
0131   void (*entry_point)( void ),
0132   bool is_fp,
0133   void *tls_area
0134 )
0135 {
0136   (void) new_level;
0137 
0138   the_context->register_sp = (uintptr_t) stack_area_begin + stack_area_size;
0139   the_context->register_lr = (uintptr_t) entry_point;
0140   the_context->isr_dispatch_disable = 0;
0141 
0142   the_context->thread_id = (uintptr_t) tls_area;
0143 
0144   if ( tls_area != NULL ) {
0145     the_context->thread_id = (uintptr_t) _TLS_Initialize_area( tls_area );
0146   }
0147 }
0148 
0149 #if !defined(RTEMS_PARAVIRT)
0150 void _CPU_ISR_Set_level( uint32_t level )
0151 {
0152   /* Set the mask bit if interrupts are disabled */
0153   if ( level ) {
0154     __asm__ volatile (
0155       "msr DAIFSet, #0x2\n"
0156       : : [level] "r" (level)
0157     );
0158   } else {
0159     __asm__ volatile (
0160       "msr DAIFClr, #0x2\n"
0161       : : [level] "r" (level)
0162     );
0163   }
0164 }
0165 
0166 uint32_t _CPU_ISR_Get_level( void )
0167 {
0168   uint64_t level;
0169 
0170   __asm__ volatile (
0171     "mrs %[level], DAIF\n"
0172     : [level] "=&r" (level)
0173   );
0174 
0175   return ( level & AARCH64_PSTATE_I ) != 0;
0176 }
0177 #endif /* RTEMS_PARAVIRT */
0178 
0179 void _CPU_Initialize( void )
0180 {
0181   /* Do nothing */
0182 }