瀏覽代碼

[libcpu][cm33] support cortex-m33

tangyuxin 5 年之前
父節點
當前提交
a16f27d84e

+ 23 - 0
libcpu/arm/cortex-m33/SConscript

@@ -0,0 +1,23 @@
+# RT-Thread building script for component
+
+from building import *
+
+Import('rtconfig')
+
+cwd     = GetCurrentDir()
+src     = Glob('*.c') + Glob('*.cpp')
+CPPPATH = [cwd]
+
+if rtconfig.PLATFORM == 'armcc':
+    src += Glob('*_rvds.S')
+
+if rtconfig.PLATFORM == 'gcc':
+    src += Glob('*_init.S')
+    src += Glob('*_gcc.S')
+
+if rtconfig.PLATFORM == 'iar':
+    src += Glob('*_iar.S')
+
+group = DefineGroup('cpu', src, depend = [''], CPPPATH = CPPPATH)
+
+Return('group')

+ 293 - 0
libcpu/arm/cortex-m33/context_gcc.S

@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2006-2018, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2009-10-11     Bernard      first version
+ * 2012-01-01     aozima       support context switch load/store FPU register.
+ * 2013-06-18     aozima       add restore MSP feature.
+ * 2013-06-23     aozima       support lazy stack optimized.
+ * 2018-07-24     aozima       enhancement hard fault exception handler.
+ */
+
+/**
+ * @addtogroup cortex-m4
+ */
+/*@{*/
+
+.cpu cortex-m4
+.syntax unified
+.thumb
+.text
+
+.equ    SCB_VTOR,           0xE000ED08              /* Vector Table Offset Register */
+.equ    NVIC_INT_CTRL,      0xE000ED04              /* interrupt control state register */
+.equ    NVIC_SYSPRI2,       0xE000ED20              /* system priority register (2) */
+.equ    NVIC_PENDSV_PRI,    0x00FF0000              /* PendSV priority value (lowest) */
+.equ    NVIC_PENDSVSET,     0x10000000              /* value to trigger PendSV exception */
+
+/*
+ * rt_base_t rt_hw_interrupt_disable();
+ */
+.global rt_hw_interrupt_disable
+.type rt_hw_interrupt_disable, %function
+rt_hw_interrupt_disable:
+    MRS     r0, PRIMASK
+    CPSID   I
+    BX      LR
+
+/*
+ * void rt_hw_interrupt_enable(rt_base_t level);
+ */
+.global rt_hw_interrupt_enable
+.type rt_hw_interrupt_enable, %function
+rt_hw_interrupt_enable:
+    MSR     PRIMASK, r0
+    BX      LR
+
+/*
+ * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
+ * r0 --> from
+ * r1 --> to
+ */
+.global rt_hw_context_switch_interrupt
+.type rt_hw_context_switch_interrupt, %function
+.global rt_hw_context_switch
+.type rt_hw_context_switch, %function
+
+rt_hw_context_switch_interrupt:
+rt_hw_context_switch:
+    /* set rt_thread_switch_interrupt_flag to 1 */
+    LDR     r2, =rt_thread_switch_interrupt_flag
+    LDR     r3, [r2]
+    CMP     r3, #1
+    BEQ     _reswitch
+    MOV     r3, #1
+    STR     r3, [r2]
+
+    LDR     r2, =rt_interrupt_from_thread   /* set rt_interrupt_from_thread */
+    STR     r0, [r2]
+
+_reswitch:
+    LDR     r2, =rt_interrupt_to_thread     /* set rt_interrupt_to_thread */
+    STR     r1, [r2]
+
+    LDR r0, =NVIC_INT_CTRL              /* trigger the PendSV exception (causes context switch) */
+    LDR r1, =NVIC_PENDSVSET
+    STR r1, [r0]
+    BX  LR
+
+/* r0 --> switch from thread stack
+ * r1 --> switch to thread stack
+ * psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
+ */
+.global PendSV_Handler
+.type PendSV_Handler, %function
+PendSV_Handler:
+    /* disable interrupt to protect context switch */
+    MRS r2, PRIMASK
+    CPSID   I
+
+    /* get rt_thread_switch_interrupt_flag */
+    LDR     r0, =rt_thread_switch_interrupt_flag    /* r0 = &rt_thread_switch_interrupt_flag */
+    LDR     r1, [r0]                                /* r1 = *r1 */
+    CMP     r1, #0x00                               /* compare r1 == 0x00 */
+    BNE     schedule
+    MSR     PRIMASK, r2                             /* if r1 == 0x00, do msr PRIMASK, r2 */
+    BX      lr                                      /* if r1 == 0x00, do bx lr */
+
+schedule
+    PUSH    {r2}                                    /* store interrupt state */
+
+    /* clear rt_thread_switch_interrupt_flag to 0 */
+    MOV     r1, #0x00                               /* r1 = 0x00 */
+    STR     r1, [r0]                                /* *r0 = r1 */
+
+    /* skip register save at the first time */
+    LDR     r0, =rt_interrupt_from_thread           /* r0 = &rt_interrupt_from_thread */
+    LDR     r1, [r0]                                /* r1 = *r0 */
+    CBZ     r1, switch_to_thread                    /* if r1 == 0, goto switch_to_thread */
+
+    /* Whether TrustZone thread stack exists */
+    LDR     r1,  =rt_trustzone_current_context      /* r1 = &rt_secure_current_context */
+    LDR     r1, [r1]                                /* r1 = *r1 */
+    CBZ     r1, contex_ns_store                     /* if r1 == 0, goto contex_ns_store */
+
+    /*call TrustZone fun, Save TrustZone stack */
+    STMFD   sp!, {r0-r1, lr}                        /* push register */
+    MOV     r0, r1                                  /* r0 = rt_secure_current_context */
+    BL rt_trustzone_context_store                   /* call TrustZone store fun */
+    LDMFD   sp!, {r0-r1, lr}                        /* pop register */
+
+    /* check break from TrustZone */
+    MOV     r2, lr                                  /* r2 = lr */
+    TST     r2, #0x40                               /* if EXC_RETURN[6] is 1, TrustZone stack was used */
+    BEQ     contex_ns_store                         /* if r2 & 0x40 == 0, goto contex_ns_store */
+
+    /* push PSPLIM CONTROL PSP LR current_context to stack */
+    MRS     r3, psplim                              /* r3 = psplim */
+    MRS     r4, control                             /* r4 = control */
+    MRS     r5, psp                                 /* r5 = psp */
+    STMFD   r5!, {r1-r4}                            /* push to thread stack */
+
+    /* update from thread stack pointer */
+    LDR     r0, [r0]                                /* r0 = rt_thread_switch_interrupt_flag */
+    STR     r5, [r0]                                /* *r0 = r5 */
+    b switch_to_thread                              /* goto switch_to_thread */
+
+contex_ns_store:
+
+    MRS     r1, psp                                 /* get from thread stack pointer */
+
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+    TST     lr, #0x10                               /* if(!EXC_RETURN[4]) */
+    VSTMDBEQ  r1!, {d8 - d15}                       /* push FPU register s16~s31 */
+#endif
+
+    STMFD   r1!, {r4 - r11}                         /* push r4 - r11 register */
+
+    LDR     r2,  =rt_trustzone_current_context      /* r2 = &rt_secure_current_context */
+    LDR     r2, [r2]                                /* r2 = *r2 */
+    MOV     r3, lr                                  /* r3 = lr */
+    MRS     r4, psplim                              /* r4 = psplim */
+    MRS     r5, control                             /* r5 = control */
+    STMFD   r1!, {r2-r5}                            /* push to thread stack */
+
+    LDR     r0, [r0]
+    STR     r1, [r0]                                /* update from thread stack pointer */
+
+switch_to_thread:
+    LDR     r1, =rt_interrupt_to_thread
+    LDR     r1, [r1]
+    LDR     r1, [r1]                                /* load thread stack pointer */
+
+    /* update current TrustZone context */
+    LDMFD   r1!, {r2-r5}                            /* pop thread stack */
+    MSR     psplim, r4                              /* psplim = r4 */
+    MSR     control, r5                             /* control = r5 */
+    MOV     lr, r3                                  /* lr = r3 */
+    LDR     r6,  =rt_trustzone_current_context      /* r6 = &rt_secure_current_context */
+    STR     r2, [r6]                                /* *r6 = r2 */
+    MOV     r0, r2                                  /* r0 = r2 */
+
+    /* Whether TrustZone thread stack exists */
+    CBZ     r0, contex_ns_load                      /* if r0 == 0, goto contex_ns_load */
+    PUSH    {r1, r3}                                /* push lr, thread_stack */
+    BL rt_trustzone_context_load                    /* call TrustZone load fun */
+    POP     {r1, r3}                                /* pop lr, thread_stack */
+    MOV     lr, r3                                  /* lr = r1 */
+    TST     r3, #0x40                               /* if EXC_RETURN[6] is 1, TrustZone stack was used */
+    BEQ     contex_ns_load                          /* if r1 & 0x40 == 0, goto contex_ns_load */
+    B pendsv_exit
+
+contex_ns_load:
+    LDMFD   r1!, {r4 - r11}                         /* pop r4 - r11 register */
+
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+    TST     lr, #0x10                               /* if(!EXC_RETURN[4]) */
+    VLDMIAEQ  r1!, {d8 - d15}                       /* pop FPU register s16~s31 */
+#endif
+
+pendsv_exit:
+    MSR     psp, r1                                 /* update stack pointer */
+    /* restore interrupt */
+    POP    {r2}
+    MSR     PRIMASK, r2
+
+    BX      lr
+
+/*
+ * void rt_hw_context_switch_to(rt_uint32 to);
+ * r0 --> to
+ */
+.global rt_hw_context_switch_to
+.type rt_hw_context_switch_to, %function
+rt_hw_context_switch_to:
+    LDR r1, =rt_interrupt_to_thread
+    STR r0, [r1]
+
+#if defined (__VFP_FP__) && !defined(__SOFTFP__)
+    /* CLEAR CONTROL.FPCA */
+    MRS     r2, CONTROL         /* read */
+    BIC     r2, #0x04           /* modify */
+    MSR     CONTROL, r2         /* write-back */
+#endif
+
+    /* set from thread to 0 */
+    LDR r1, =rt_interrupt_from_thread
+    MOV r0, #0x0
+    STR r0, [r1]
+
+    /* set interrupt flag to 1 */
+    LDR     r1, =rt_thread_switch_interrupt_flag
+    MOV     r0, #1
+    STR     r0, [r1]
+
+    /* set the PendSV exception priority */
+    LDR r0, =NVIC_SYSPRI2
+    LDR r1, =NVIC_PENDSV_PRI
+    LDR.W   r2, [r0,#0x00]       /* read       */
+    ORR     r1,r1,r2             /* modify     */
+    STR     r1, [r0]             /* write-back */
+
+    LDR r0, =NVIC_INT_CTRL      /* trigger the PendSV exception (causes context switch) */
+    LDR r1, =NVIC_PENDSVSET
+    STR r1, [r0]
+
+    /* restore MSP */
+    LDR     r0, =SCB_VTOR
+    LDR     r0, [r0]
+    LDR     r0, [r0]
+    NOP
+    MSR     msp, r0
+
+    /* enable interrupts at processor level */
+    CPSIE   F
+    CPSIE   I
+
+    /* never reach here! */
+
+/* compatible with old version */
+.global rt_hw_interrupt_thread_switch
+.type rt_hw_interrupt_thread_switch, %function
+rt_hw_interrupt_thread_switch:
+    BX  lr
+    NOP
+
+.global HardFault_Handler
+.type HardFault_Handler, %function
+HardFault_Handler:
+    /* get current context */
+    MRS     r0, msp                                 /* get fault context from handler. */
+    TST     lr, #0x04                               /* if(!EXC_RETURN[2]) */
+    BEQ     get_sp_done
+    MRS     r0, psp                                 /* get fault context from thread. */
+get_sp_done:
+
+    STMFD   r0!, {r4 - r11}                         /* push r4 - r11 register */
+
+    LDR     r2,  =rt_trustzone_current_context      /* r2 = &rt_secure_current_context */
+    LDR     r2, [r2]                                /* r2 = *r2 */
+    MOV     r3, lr                                  /* r3 = lr */
+    MRS     r4, psplim                              /* r4 = psplim */
+    MRS     r5, control                             /* r5 = control */
+    STMFD   r0!, {r2-r5}                            /* push to thread stack */
+
+    STMFD   r0!, {lr}                               /* push exec_return register */
+
+    TST     lr, #0x04                               /* if(!EXC_RETURN[2]) */
+    BEQ     update_msp
+    MSR     psp, r0                                 /* update stack pointer to PSP. */
+    B       update_done
+update_msp:
+    MSR     msp, r0                                 /* update stack pointer to MSP. */
+update_done:
+
+    PUSH    {LR}
+    BL      rt_hw_hard_fault_exception
+    POP     {LR}
+
+    ORR     lr, lr, #0x04
+    BX      lr

+ 300 - 0
libcpu/arm/cortex-m33/context_iar.S

@@ -0,0 +1,300 @@
+;/*
+; * Copyright (c) 2006-2018, RT-Thread Development Team
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Change Logs:
+; * Date           Author       Notes
+; * 2009-01-17     Bernard      first version
+; * 2009-09-27     Bernard      add protect when contex switch occurs
+; * 2012-01-01     aozima       support context switch load/store FPU register.
+; * 2013-06-18     aozima       add restore MSP feature.
+; * 2013-06-23     aozima       support lazy stack optimized.
+; * 2018-07-24     aozima       enhancement hard fault exception handler.
+; */
+
+;/**
+; * @addtogroup cortex-m33
+; */
+;/*@{*/
+
+SCB_VTOR        EQU     0xE000ED08               ; Vector Table Offset Register
+NVIC_INT_CTRL   EQU     0xE000ED04               ; interrupt control state register
+NVIC_SYSPRI2    EQU     0xE000ED20               ; system priority register (2)
+NVIC_PENDSV_PRI EQU     0x00FF0000               ; PendSV priority value (lowest)
+NVIC_PENDSVSET  EQU     0x10000000               ; value to trigger PendSV exception
+
+    SECTION    .text:CODE(2)
+    THUMB
+    REQUIRE8
+    PRESERVE8
+
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
+    IMPORT rt_trustzone_current_context
+    IMPORT rt_trustzone_context_load
+    IMPORT rt_trustzone_context_store
+
+;/*
+; * rt_base_t rt_hw_interrupt_disable();
+; */
+    EXPORT rt_hw_interrupt_disable
+rt_hw_interrupt_disable:
+    MRS     r0, PRIMASK
+    CPSID   I
+    BX      LR
+
+;/*
+; * void rt_hw_interrupt_enable(rt_base_t level);
+; */
+    EXPORT  rt_hw_interrupt_enable
+rt_hw_interrupt_enable:
+    MSR     PRIMASK, r0
+    BX      LR
+
+;/*
+; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
+; * r0 --> from
+; * r1 --> to
+; */
+    EXPORT rt_hw_context_switch_interrupt
+    EXPORT rt_hw_context_switch
+rt_hw_context_switch_interrupt:
+rt_hw_context_switch:
+    ; set rt_thread_switch_interrupt_flag to 1
+    LDR     r2, =rt_thread_switch_interrupt_flag
+    LDR     r3, [r2]
+    CMP     r3, #1
+    BEQ     _reswitch
+    MOV     r3, #1
+    STR     r3, [r2]
+
+    LDR     r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR     r0, [r2]
+
+_reswitch
+    LDR     r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR     r1, [r2]
+
+    LDR     r0, =NVIC_INT_CTRL              ; trigger the PendSV exception (causes context switch)
+    LDR     r1, =NVIC_PENDSVSET
+    STR     r1, [r0]
+    BX      LR
+
+; r0 --> switch from thread stack
+; r1 --> switch to thread stack
+; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
+    EXPORT PendSV_Handler
+PendSV_Handler:
+
+    ; disable interrupt to protect context switch
+    MRS     r2, PRIMASK
+    CPSID   I
+
+    ; get rt_thread_switch_interrupt_flag
+    LDR     r0, =rt_thread_switch_interrupt_flag    ; r0 = &rt_thread_switch_interrupt_flag
+    LDR     r1, [r0]                                ; r1 = *r1
+    CMP     r1, #0x00                               ; compare r1 == 0x00
+    BNE     schedule
+    MSR     PRIMASK, r2                             ; if r1 == 0x00, do msr PRIMASK, r2
+    BX      lr                                      ; if r1 == 0x00, do bx lr
+
+schedule
+    PUSH    {r2}                                    ; store interrupt state
+
+    ; clear rt_thread_switch_interrupt_flag to 0
+    MOV     r1, #0x00                               ; r1 = 0x00
+    STR     r1, [r0]                                ; *r0 = r1
+
+    ; skip register save at the first time
+    LDR     r0, =rt_interrupt_from_thread           ; r0 = &rt_interrupt_from_thread
+    LDR     r1, [r0]                                ; r1 = *r0
+    CBZ     r1, switch_to_thread                    ; if r1 == 0, goto switch_to_thread
+
+    ; Whether TrustZone thread stack exists
+    LDR     r1,  =rt_trustzone_current_context      ; r1 = &rt_secure_current_context
+    LDR     r1, [r1]                                ; r1 = *r1
+    CBZ     r1, contex_ns_store                     ; if r1 == 0, goto contex_ns_store
+
+    ;call TrustZone fun, Save TrustZone stack
+    STMFD   sp!, {r0-r1, lr}                        ; push register
+    MOV     r0, r1                                  ; r0 = rt_secure_current_context
+    BL rt_trustzone_context_store                   ; call TrustZone store fun
+    LDMFD   sp!, {r0-r1, lr}                        ; pop register
+
+    ; check break from TrustZone
+    MOV     r2, lr                                  ; r2 = lr
+    TST     r2, #0x40                               ; if EXC_RETURN[6] is 1, TrustZone stack was used
+    BEQ     contex_ns_store                         ; if r2 & 0x40 == 0, goto contex_ns_store
+
+    ; push PSPLIM CONTROL PSP LR current_context to stack
+    MRS     r3, psplim                              ; r3 = psplim
+    MRS     r4, control                             ; r4 = control
+    MRS     r5, psp                                 ; r5 = psp
+    STMFD   r5!, {r1-r4}                            ; push to thread stack
+
+    ; update from thread stack pointer
+    LDR     r0, [r0]                                ; r0 = rt_thread_switch_interrupt_flag
+    STR     r5, [r0]                                ; *r0 = r5
+    b switch_to_thread                              ; goto switch_to_thread
+
+contex_ns_store
+
+    MRS     r1, psp                                 ; get from thread stack pointer
+
+#if defined ( __ARMVFP__ )
+    TST     lr, #0x10                               ; if(!EXC_RETURN[4])
+    BNE     skip_push_fpu
+    VSTMDB  r1!, {d8 - d15}                         ; push FPU register s16~s31
+skip_push_fpu
+#endif
+
+    STMFD   r1!, {r4 - r11}         ; push r4 - r11 register
+
+    LDR     r2,  =rt_trustzone_current_context      ; r2 = &rt_secure_current_context
+    LDR     r2, [r2]                                ; r2 = *r2
+    MOV     r3, lr                                  ; r3 = lr
+    MRS     r4, psplim                              ; r4 = psplim
+    MRS     r5, control                             ; r5 = control
+    STMFD   r1!, {r2-r5}                            ; push to thread stack
+
+    LDR     r0, [r0]
+    STR     r1, [r0]                                ; update from thread stack pointer
+
+switch_to_thread
+
+    LDR     r1, =rt_interrupt_to_thread
+    LDR     r1, [r1]
+    LDR     r1, [r1]                ; load thread stack pointer
+
+    ; update current TrustZone context
+    LDMFD   r1!, {r2-r5}                            ; pop thread stack
+    MSR     psplim, r4                              ; psplim = r4
+    MSR     control, r5                             ; control = r5
+    MOV     lr, r3                                  ; lr = r3
+    LDR     r6,  =rt_trustzone_current_context      ; r6 = &rt_secure_current_context
+    STR     r2, [r6]                                ; *r6 = r2
+    MOV     r0, r2                                  ; r0 = r2
+
+    ; Whether TrustZone thread stack exists
+    CBZ     r0, contex_ns_load                      ; if r0 == 0, goto contex_ns_load
+    PUSH    {r1, r3}                                ; push lr, thread_stack
+    BL rt_trustzone_context_load                    ; call TrustZone load fun
+    POP     {r1, r3}                                ; pop lr, thread_stack
+    MOV     lr, r3                                  ; lr = r1
+    TST     r3, #0x40                               ; if EXC_RETURN[6] is 1, TrustZone stack was used
+    BEQ     contex_ns_load                          ; if r1 & 0x40 == 0, goto contex_ns_load
+    B pendsv_exit
+
+contex_ns_load
+    LDMFD   r1!, {r4 - r11}                         ; pop r4 - r11 register
+
+#if defined ( __ARMVFP__ )
+    TST     lr, #0x10                               ; if(!EXC_RETURN[4])
+    BNE     skip_pop_fpu
+    VLDMIA  r1!, {d8 - d15}                         ; pop FPU register s16~s31
+skip_pop_fpu
+#endif
+
+pendsv_exit
+    MSR     psp, r1                                 ; update stack pointer
+    ; restore interrupt
+    POP    {r2}
+    MSR     PRIMASK, r2
+
+    BX      lr
+
+;/*
+; * void rt_hw_context_switch_to(rt_uint32 to);
+; * r0 --> to
+; */
+    EXPORT rt_hw_context_switch_to
+rt_hw_context_switch_to:
+    LDR     r1, =rt_interrupt_to_thread
+    STR     r0, [r1]
+
+#if defined ( __ARMVFP__ )
+    ; CLEAR CONTROL.FPCA
+    MRS     r2, CONTROL             ; read
+    BIC     r2, r2, #0x04           ; modify
+    MSR     CONTROL, r2             ; write-back
+#endif
+
+    ; set from thread to 0
+    LDR     r1, =rt_interrupt_from_thread
+    MOV     r0, #0x0
+    STR     r0, [r1]
+
+    ; set interrupt flag to 1
+    LDR     r1, =rt_thread_switch_interrupt_flag
+    MOV     r0, #1
+    STR     r0, [r1]
+
+    ; set the PendSV exception priority
+    LDR     r0, =NVIC_SYSPRI2
+    LDR     r1, =NVIC_PENDSV_PRI
+    LDR.W   r2, [r0,#0x00]       ; read
+    ORR     r1,r1,r2             ; modify
+    STR     r1, [r0]             ; write-back
+
+    LDR     r0, =NVIC_INT_CTRL      ; trigger the PendSV exception (causes context switch)
+    LDR     r1, =NVIC_PENDSVSET
+    STR     r1, [r0]
+
+    ; restore MSP
+    LDR     r0, =SCB_VTOR
+    LDR     r0, [r0]
+    LDR     r0, [r0]
+    NOP
+    MSR     msp, r0
+
+    ; enable interrupts at processor level
+    CPSIE   F
+    CPSIE   I
+
+    ; never reach here!
+
+; compatible with old version
+    EXPORT rt_hw_interrupt_thread_switch
+rt_hw_interrupt_thread_switch:
+    BX      lr
+
+    IMPORT rt_hw_hard_fault_exception
+    EXPORT HardFault_Handler
+HardFault_Handler:
+
+    ; get current context
+    MRS     r0, msp                                 ; get fault context from handler.
+    TST     lr, #0x04                               ; if(!EXC_RETURN[2])
+    BEQ     get_sp_done
+    MRS     r0, psp                                 ; get fault context from thread.
+get_sp_done
+
+    STMFD   r0!, {r4 - r11}                         ; push r4 - r11 register
+    
+    LDR     r2,  =rt_trustzone_current_context      ; r2 = &rt_secure_current_context
+    LDR     r2, [r2]                                ; r2 = *r2
+    MOV     r3, lr                                  ; r3 = lr
+    MRS     r4, psplim                              ; r4 = psplim
+    MRS     r5, control                             ; r5 = control
+    STMFD   r0!, {r2-r5}                            ; push to thread stack
+
+    STMFD   r0!, {lr}                               ; push exec_return register
+
+    TST     lr, #0x04                               ; if(!EXC_RETURN[2])
+    BEQ     update_msp
+    MSR     psp, r0                                 ; update stack pointer to PSP.
+    B       update_done
+update_msp
+    MSR     msp, r0                                 ; update stack pointer to MSP.
+update_done
+
+    PUSH    {lr}
+    BL      rt_hw_hard_fault_exception
+    POP     {lr}
+
+    ORR     lr, lr, #0x04
+    BX      lr
+
+    END

+ 306 - 0
libcpu/arm/cortex-m33/context_rvds.S

@@ -0,0 +1,306 @@
+;/*
+;* Copyright (c) 2006-2018, RT-Thread Development Team
+;*
+;* SPDX-License-Identifier: Apache-2.0
+;*
+; * Change Logs:
+; * Date           Author       Notes
+; * 2009-01-17     Bernard      first version.
+; * 2012-01-01     aozima       support context switch load/store FPU register.
+; * 2013-06-18     aozima       add restore MSP feature.
+; * 2013-06-23     aozima       support lazy stack optimized.
+; * 2018-07-24     aozima       enhancement hard fault exception handler.
+; */
+
+;/**
+; * @addtogroup cortex-m33
+; */
+;/*@{*/
+
+SCB_VTOR        EQU     0xE000ED08               ; Vector Table Offset Register
+NVIC_INT_CTRL   EQU     0xE000ED04               ; interrupt control state register
+NVIC_SYSPRI2    EQU     0xE000ED20               ; system priority register (2)
+NVIC_PENDSV_PRI EQU     0x00FF0000               ; PendSV priority value (lowest)
+NVIC_PENDSVSET  EQU     0x10000000               ; value to trigger PendSV exception
+
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    THUMB
+    REQUIRE8
+    PRESERVE8
+
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
+    IMPORT rt_trustzone_current_context
+    IMPORT rt_trustzone_context_load
+    IMPORT rt_trustzone_context_store
+
+;/*
+; * rt_base_t rt_hw_interrupt_disable();
+; */
+rt_hw_interrupt_disable    PROC
+    EXPORT  rt_hw_interrupt_disable
+    MRS     r0, PRIMASK
+    CPSID   I
+    BX      LR
+    ENDP
+
+;/*
+; * void rt_hw_interrupt_enable(rt_base_t level);
+; */
+rt_hw_interrupt_enable    PROC
+    EXPORT  rt_hw_interrupt_enable
+    MSR     PRIMASK, r0
+    BX      LR
+    ENDP
+
+;/*
+; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
+; * r0 --> from
+; * r1 --> to
+; */
+rt_hw_context_switch_interrupt
+    EXPORT rt_hw_context_switch_interrupt
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+
+    ; set rt_thread_switch_interrupt_flag to 1
+    LDR     r2, =rt_thread_switch_interrupt_flag
+    LDR     r3, [r2]
+    CMP     r3, #1
+    BEQ     _reswitch
+    MOV     r3, #1
+    STR     r3, [r2]
+
+    LDR     r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR     r0, [r2]
+
+_reswitch
+    LDR     r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR     r1, [r2]
+
+    LDR     r0, =NVIC_INT_CTRL              ; trigger the PendSV exception (causes context switch)
+    LDR     r1, =NVIC_PENDSVSET
+    STR     r1, [r0]
+    BX      LR
+    ENDP
+
+; r0 --> switch from thread stack
+; r1 --> switch to thread stack
+; psr, pc, lr, r12, r3, r2, r1, r0 are pushed into [from] stack
+PendSV_Handler   PROC
+    EXPORT PendSV_Handler
+
+    ; disable interrupt to protect context switch
+    MRS     r2, PRIMASK                             ; R2 = PRIMASK
+    CPSID   I                                       ; disable all interrupt
+
+    ; get rt_thread_switch_interrupt_flag
+    LDR     r0, =rt_thread_switch_interrupt_flag    ; r0 = &rt_thread_switch_interrupt_flag
+    LDR     r1, [r0]                                ; r1 = *r1
+    CMP     r1, #0x00                               ; compare r1 == 0x00
+    BNE     schedule
+    MSR     PRIMASK, r2                             ; if r1 == 0x00, do msr PRIMASK, r2
+    BX      lr                                      ; if r1 == 0x00, do bx lr
+
+schedule
+    PUSH    {r2}                                    ; store interrupt state
+
+    ; clear rt_thread_switch_interrupt_flag to 0
+    MOV     r1, #0x00                               ; r1 = 0x00
+    STR     r1, [r0]                                ; *r0 = r1
+
+    ; skip register save at the first time
+    LDR     r0, =rt_interrupt_from_thread           ; r0 = &rt_interrupt_from_thread
+    LDR     r1, [r0]                                ; r1 = *r0
+    CBZ     r1, switch_to_thread                    ; if r1 == 0, goto switch_to_thread
+
+    ; Whether TrustZone thread stack exists
+    LDR     r1,  =rt_trustzone_current_context      ; r1 = &rt_secure_current_context
+    LDR     r1, [r1]                                ; r1 = *r1
+    CBZ     r1, contex_ns_store                     ; if r1 == 0, goto contex_ns_store
+
+    ;call TrustZone fun, Save TrustZone stack
+    STMFD   sp!, {r0-r1, lr}                        ; push register
+    MOV     r0, r1                                  ; r0 = rt_secure_current_context
+    BL rt_trustzone_context_store                   ; call TrustZone store fun
+    LDMFD   sp!, {r0-r1, lr}                        ; pop register
+
+    ; check break from TrustZone
+    MOV     r2, lr                                  ; r2 = lr
+    TST     r2, #0x40                               ; if EXC_RETURN[6] is 1, TrustZone stack was used
+    BEQ     contex_ns_store                         ; if r2 & 0x40 == 0, goto contex_ns_store
+
+    ; push PSPLIM CONTROL PSP LR current_context to stack
+    MRS     r3, psplim                              ; r3 = psplim
+    MRS     r4, control                             ; r4 = control
+    MRS     r5, psp                                 ; r5 = psp
+    STMFD   r5!, {r1-r4}                            ; push to thread stack
+
+    ; update from thread stack pointer
+    LDR     r0, [r0]                                ; r0 = rt_thread_switch_interrupt_flag
+    STR     r5, [r0]                                ; *r0 = r5
+    b switch_to_thread                              ; goto switch_to_thread
+
+contex_ns_store
+
+    MRS     r1, psp                                 ; get from thread stack pointer
+
+    IF      {FPU} != "SoftVFP"
+    TST     lr, #0x10                               ; if(!EXC_RETURN[4])
+    VSTMFDEQ  r1!, {d8 - d15}                       ; push FPU register s16~s31
+    ENDIF
+
+    STMFD   r1!, {r4 - r11}                         ; push r4 - r11 register
+
+    LDR     r2,  =rt_trustzone_current_context      ; r2 = &rt_secure_current_context
+    LDR     r2, [r2]                                ; r2 = *r2
+    MOV     r3, lr                                  ; r3 = lr
+    MRS     r4, psplim                              ; r4 = psplim
+    MRS     r5, control                             ; r5 = control
+    STMFD   r1!, {r2-r5}                            ; push to thread stack
+
+    LDR     r0, [r0]
+    STR     r1, [r0]                                ; update from thread stack pointer
+
+switch_to_thread
+    LDR     r1, =rt_interrupt_to_thread
+    LDR     r1, [r1]
+    LDR     r1, [r1]                                ; load thread stack pointer
+
+    ; update current TrustZone context
+    LDMFD   r1!, {r2-r5}                            ; pop thread stack
+    MSR     psplim, r4                              ; psplim = r4
+    MSR     control, r5                             ; control = r5
+    MOV     lr, r3                                  ; lr = r3
+    LDR     r6,  =rt_trustzone_current_context      ; r6 = &rt_secure_current_context
+    STR     r2, [r6]                                ; *r6 = r2
+    MOV     r0, r2                                  ; r0 = r2
+
+    ; Whether TrustZone thread stack exists
+    CBZ     r0, contex_ns_load                      ; if r0 == 0, goto contex_ns_load
+    PUSH    {r1, r3}                                ; push lr, thread_stack
+    BL rt_trustzone_context_load                    ; call TrustZone load fun
+    POP     {r1, r3}                                ; pop lr, thread_stack
+    MOV     lr, r3                                  ; lr = r1
+    TST     r3, #0x40                               ; if EXC_RETURN[6] is 1, TrustZone stack was used
+    BEQ     contex_ns_load                          ; if r1 & 0x40 == 0, goto contex_ns_load
+    B pendsv_exit
+
+contex_ns_load
+    LDMFD   r1!, {r4 - r11}                         ; pop r4 - r11 register
+
+    IF      {FPU} != "SoftVFP"
+    TST     lr, #0x10                               ; if(!EXC_RETURN[4])
+    VLDMFDEQ  r1!, {d8 - d15}                       ; pop FPU register s16~s31
+    ENDIF
+
+pendsv_exit
+    MSR     psp, r1                                 ; update stack pointer
+    ; restore interrupt
+    POP    {r2}
+    MSR     PRIMASK, r2
+
+    BX      lr
+    ENDP
+
+;/*
+; * void rt_hw_context_switch_to(rt_uint32 to);
+; * r0 --> to
+; * this fucntion is used to perform the first thread switch
+; */
+rt_hw_context_switch_to    PROC
+    EXPORT rt_hw_context_switch_to
+    ; set to thread
+    LDR     r1, =rt_interrupt_to_thread
+    STR     r0, [r1]
+
+    IF      {FPU} != "SoftVFP"
+    ; CLEAR CONTROL.FPCA
+    MRS     r2, CONTROL             ; read
+    BIC     r2, #0x04               ; modify
+    MSR     CONTROL, r2             ; write-back
+    ENDIF
+
+    ; set from thread to 0
+    LDR     r1, =rt_interrupt_from_thread
+    MOV     r0, #0x0
+    STR     r0, [r1]
+
+    ; set interrupt flag to 1
+    LDR     r1, =rt_thread_switch_interrupt_flag
+    MOV     r0, #1
+    STR     r0, [r1]
+
+    ; set the PendSV exception priority
+    LDR     r0, =NVIC_SYSPRI2
+    LDR     r1, =NVIC_PENDSV_PRI
+    LDR.W   r2, [r0,#0x00]       ; read
+    ORR     r1,r1,r2             ; modify
+    STR     r1, [r0]             ; write-back
+
+    ; trigger the PendSV exception (causes context switch)
+    LDR     r0, =NVIC_INT_CTRL
+    LDR     r1, =NVIC_PENDSVSET
+    STR     r1, [r0]
+
+    ; restore MSP
+    LDR     r0, =SCB_VTOR
+    LDR     r0, [r0]
+    LDR     r0, [r0]
+    MSR     msp, r0
+
+    ; enable interrupts at processor level
+    CPSIE   F
+    CPSIE   I
+
+    ; never reach here!
+    ENDP
+
+; compatible with old version
+rt_hw_interrupt_thread_switch PROC
+    EXPORT rt_hw_interrupt_thread_switch
+    BX      lr
+    ENDP
+
+    IMPORT rt_hw_hard_fault_exception
+    EXPORT HardFault_Handler
+HardFault_Handler    PROC
+
+    ; get current context
+    MRS     r0, msp                 ;get fault context from handler
+    TST     lr, #0x04               ;if(!EXC_RETURN[2])
+    BEQ     get_sp_done
+    MRS     r0, psp                 ;get fault context from thread
+get_sp_done
+
+    STMFD   r0!, {r4 - r11}         ; push r4 - r11 register
+
+    LDR     r2,  =rt_trustzone_current_context      ; r2 = &rt_secure_current_context
+    LDR     r2, [r2]                                ; r2 = *r2
+    MOV     r3, lr                                  ; r3 = lr
+    MRS     r4, psplim                              ; r4 = psplim
+    MRS     r5, control                             ; r5 = control
+    STMFD   r0!, {r2-r5}                            ; push to thread stack
+
+    STMFD   r0!, {lr}               ; push exec_return register
+
+    TST     lr, #0x04               ; if(!EXC_RETURN[2])
+    BEQ     update_msp
+    MSR     psp, r0                 ; update stack pointer to PSP
+    B       update_done
+update_msp
+    MSR     msp, r0                 ; update stack pointer to MSP
+update_done
+
+    PUSH    {lr}
+    BL      rt_hw_hard_fault_exception
+    POP     {lr}
+
+    ORR     lr, lr, #0x04
+    BX      lr
+    ENDP
+
+    ALIGN   4
+
+    END

+ 510 - 0
libcpu/arm/cortex-m33/cpuport.c

@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2006-2018, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2011-10-21     Bernard      the first version.
+ * 2011-10-27     aozima       update for cortex-M4 FPU.
+ * 2011-12-31     aozima       fixed stack align issues.
+ * 2012-01-01     aozima       support context switch load/store FPU register.
+ * 2012-12-11     lgnq         fixed the coding style.
+ * 2012-12-23     aozima       stack addr align to 8byte.
+ * 2012-12-29     Bernard      Add exception hook.
+ * 2013-06-23     aozima       support lazy stack optimized.
+ * 2018-07-24     aozima       enhancement hard fault exception handler.
+ * 2019-07-03     yangjie      add __rt_ffs() for armclang.
+ */
+
+#include <rtthread.h>
+
+#if               /* ARMCC */ (  (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP ))    \
+                  /* Clang */ || (defined ( __CLANG_ARM ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
+                  /* IAR */   || (defined ( __ICCARM__ ) && defined ( __ARMVFP__ ))        \
+                  /* GNU */   || (defined ( __GNUC__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) )
+#define USE_FPU   1
+#else
+#define USE_FPU   0
+#endif
+
+/* exception and interrupt handler table */
+rt_uint32_t rt_interrupt_from_thread;
+rt_uint32_t rt_interrupt_to_thread;
+rt_uint32_t rt_thread_switch_interrupt_flag;
+
+/* exception hook */
+static rt_err_t (*rt_exception_hook)(void *context) = RT_NULL;
+
+struct exception_stack_frame
+{
+    rt_uint32_t r0;
+    rt_uint32_t r1;
+    rt_uint32_t r2;
+    rt_uint32_t r3;
+    rt_uint32_t r12;
+    rt_uint32_t lr;
+    rt_uint32_t pc;
+    rt_uint32_t psr;
+};
+
+struct stack_frame
+{
+    rt_uint32_t tz;
+    rt_uint32_t lr;
+    rt_uint32_t psplim;
+    rt_uint32_t control;
+
+    /* r4 ~ r11 register */
+    rt_uint32_t r4;
+    rt_uint32_t r5;
+    rt_uint32_t r6;
+    rt_uint32_t r7;
+    rt_uint32_t r8;
+    rt_uint32_t r9;
+    rt_uint32_t r10;
+    rt_uint32_t r11;
+
+    struct exception_stack_frame exception_stack_frame;
+};
+
+struct exception_stack_frame_fpu
+{
+    rt_uint32_t r0;
+    rt_uint32_t r1;
+    rt_uint32_t r2;
+    rt_uint32_t r3;
+    rt_uint32_t r12;
+    rt_uint32_t lr;
+    rt_uint32_t pc;
+    rt_uint32_t psr;
+
+#if USE_FPU
+    /* FPU register */
+    rt_uint32_t S0;
+    rt_uint32_t S1;
+    rt_uint32_t S2;
+    rt_uint32_t S3;
+    rt_uint32_t S4;
+    rt_uint32_t S5;
+    rt_uint32_t S6;
+    rt_uint32_t S7;
+    rt_uint32_t S8;
+    rt_uint32_t S9;
+    rt_uint32_t S10;
+    rt_uint32_t S11;
+    rt_uint32_t S12;
+    rt_uint32_t S13;
+    rt_uint32_t S14;
+    rt_uint32_t S15;
+    rt_uint32_t FPSCR;
+    rt_uint32_t NO_NAME;
+#endif
+};
+
+struct stack_frame_fpu
+{
+    rt_uint32_t flag;
+
+    /* r4 ~ r11 register */
+    rt_uint32_t r4;
+    rt_uint32_t r5;
+    rt_uint32_t r6;
+    rt_uint32_t r7;
+    rt_uint32_t r8;
+    rt_uint32_t r9;
+    rt_uint32_t r10;
+    rt_uint32_t r11;
+
+#if USE_FPU
+    /* FPU register s16 ~ s31 */
+    rt_uint32_t s16;
+    rt_uint32_t s17;
+    rt_uint32_t s18;
+    rt_uint32_t s19;
+    rt_uint32_t s20;
+    rt_uint32_t s21;
+    rt_uint32_t s22;
+    rt_uint32_t s23;
+    rt_uint32_t s24;
+    rt_uint32_t s25;
+    rt_uint32_t s26;
+    rt_uint32_t s27;
+    rt_uint32_t s28;
+    rt_uint32_t s29;
+    rt_uint32_t s30;
+    rt_uint32_t s31;
+#endif
+
+    struct exception_stack_frame_fpu exception_stack_frame;
+};
+
+rt_uint8_t *rt_hw_stack_init(void       *tentry,
+                             void       *parameter,
+                             rt_uint8_t *stack_addr,
+                             void       *texit)
+{
+    struct stack_frame *stack_frame;
+    rt_uint8_t         *stk;
+    unsigned long       i;
+
+    stk  = stack_addr + sizeof(rt_uint32_t);
+    stk  = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
+    stk -= sizeof(struct stack_frame);
+
+    stack_frame = (struct stack_frame *)stk;
+
+    /* init all register */
+    for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
+    {
+        ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
+    }
+
+    stack_frame->exception_stack_frame.r0  = (unsigned long)parameter; /* r0 : argument */
+    stack_frame->exception_stack_frame.r1  = 0;                        /* r1 */
+    stack_frame->exception_stack_frame.r2  = 0;                        /* r2 */
+    stack_frame->exception_stack_frame.r3  = 0;                        /* r3 */
+    stack_frame->exception_stack_frame.r12 = 0;                        /* r12 */
+    stack_frame->exception_stack_frame.lr  = (unsigned long)texit;     /* lr */
+    stack_frame->exception_stack_frame.pc  = (unsigned long)tentry;    /* entry point, pc */
+    stack_frame->exception_stack_frame.psr = 0x01000000L;              /* PSR */
+
+    stack_frame->tz = 0x00;
+    stack_frame->lr = 0xFFFFFFBC;
+    stack_frame->psplim = 0x00;
+    stack_frame->control = 0x00;
+
+    /* return task's current stack address */
+    return stk;
+}
+
+/**
+ * This function set the hook, which is invoked on fault exception handling.
+ *
+ * @param exception_handle the exception handling hook function.
+ */
+void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
+{
+    rt_exception_hook = exception_handle;
+}
+
+#define SCB_CFSR        (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
+#define SCB_HFSR        (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
+#define SCB_MMAR        (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
+#define SCB_BFAR        (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
+#define SCB_AIRCR       (*(volatile unsigned long *)0xE000ED0C)  /* Reset control Address Register */
+#define SCB_RESET_VALUE 0x05FA0004                               /* Reset value, write to SCB_AIRCR can reset cpu */
+
+#define SCB_CFSR_MFSR   (*(volatile const unsigned char*)0xE000ED28)  /* Memory-management Fault Status Register */
+#define SCB_CFSR_BFSR   (*(volatile const unsigned char*)0xE000ED29)  /* Bus Fault Status Register */
+#define SCB_CFSR_UFSR   (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
+
+#ifdef RT_USING_FINSH
+static void usage_fault_track(void)
+{
+    rt_kprintf("usage fault:\n");
+    rt_kprintf("SCB_CFSR_UFSR:0x%02X ", SCB_CFSR_UFSR);
+
+    if(SCB_CFSR_UFSR & (1<<0))
+    {
+        /* [0]:UNDEFINSTR */
+        rt_kprintf("UNDEFINSTR ");
+    }
+
+    if(SCB_CFSR_UFSR & (1<<1))
+    {
+        /* [1]:INVSTATE */
+        rt_kprintf("INVSTATE ");
+    }
+
+    if(SCB_CFSR_UFSR & (1<<2))
+    {
+        /* [2]:INVPC */
+        rt_kprintf("INVPC ");
+    }
+
+    if(SCB_CFSR_UFSR & (1<<3))
+    {
+        /* [3]:NOCP */
+        rt_kprintf("NOCP ");
+    }
+
+    if(SCB_CFSR_UFSR & (1<<8))
+    {
+        /* [8]:UNALIGNED */
+        rt_kprintf("UNALIGNED ");
+    }
+
+    if(SCB_CFSR_UFSR & (1<<9))
+    {
+        /* [9]:DIVBYZERO */
+        rt_kprintf("DIVBYZERO ");
+    }
+
+    rt_kprintf("\n");
+}
+
+static void bus_fault_track(void)
+{
+    rt_kprintf("bus fault:\n");
+    rt_kprintf("SCB_CFSR_BFSR:0x%02X ", SCB_CFSR_BFSR);
+
+    if(SCB_CFSR_BFSR & (1<<0))
+    {
+        /* [0]:IBUSERR */
+        rt_kprintf("IBUSERR ");
+    }
+
+    if(SCB_CFSR_BFSR & (1<<1))
+    {
+        /* [1]:PRECISERR */
+        rt_kprintf("PRECISERR ");
+    }
+
+    if(SCB_CFSR_BFSR & (1<<2))
+    {
+        /* [2]:IMPRECISERR */
+        rt_kprintf("IMPRECISERR ");
+    }
+
+    if(SCB_CFSR_BFSR & (1<<3))
+    {
+        /* [3]:UNSTKERR */
+        rt_kprintf("UNSTKERR ");
+    }
+
+    if(SCB_CFSR_BFSR & (1<<4))
+    {
+        /* [4]:STKERR */
+        rt_kprintf("STKERR ");
+    }
+
+    if(SCB_CFSR_BFSR & (1<<7))
+    {
+        rt_kprintf("SCB->BFAR:%08X\n", SCB_BFAR);
+    }
+    else
+    {
+        rt_kprintf("\n");
+    }
+}
+
+static void mem_manage_fault_track(void)
+{
+    rt_kprintf("mem manage fault:\n");
+    rt_kprintf("SCB_CFSR_MFSR:0x%02X ", SCB_CFSR_MFSR);
+
+    if(SCB_CFSR_MFSR & (1<<0))
+    {
+        /* [0]:IACCVIOL */
+        rt_kprintf("IACCVIOL ");
+    }
+
+    if(SCB_CFSR_MFSR & (1<<1))
+    {
+        /* [1]:DACCVIOL */
+        rt_kprintf("DACCVIOL ");
+    }
+
+    if(SCB_CFSR_MFSR & (1<<3))
+    {
+        /* [3]:MUNSTKERR */
+        rt_kprintf("MUNSTKERR ");
+    }
+
+    if(SCB_CFSR_MFSR & (1<<4))
+    {
+        /* [4]:MSTKERR */
+        rt_kprintf("MSTKERR ");
+    }
+
+    if(SCB_CFSR_MFSR & (1<<7))
+    {
+        /* [7]:MMARVALID */
+        rt_kprintf("SCB->MMAR:%08X\n", SCB_MMAR);
+    }
+    else
+    {
+        rt_kprintf("\n");
+    }
+}
+
+static void hard_fault_track(void)
+{
+    if(SCB_HFSR & (1UL<<1))
+    {
+        /* [1]:VECTBL, Indicates hard fault is caused by failed vector fetch. */
+        rt_kprintf("failed vector fetch\n");
+    }
+
+    if(SCB_HFSR & (1UL<<30))
+    {
+        /* [30]:FORCED, Indicates hard fault is taken because of bus fault,
+                        memory management fault, or usage fault. */
+        if(SCB_CFSR_BFSR)
+        {
+            bus_fault_track();
+        }
+
+        if(SCB_CFSR_MFSR)
+        {
+            mem_manage_fault_track();
+        }
+
+        if(SCB_CFSR_UFSR)
+        {
+            usage_fault_track();
+        }
+    }
+
+    if(SCB_HFSR & (1UL<<31))
+    {
+        /* [31]:DEBUGEVT, Indicates hard fault is triggered by debug event. */
+        rt_kprintf("debug event\n");
+    }
+}
+#endif /* RT_USING_FINSH */
+
+struct exception_info
+{
+    rt_uint32_t exc_return;
+    struct stack_frame stack_frame;
+};
+
+void rt_hw_hard_fault_exception(struct exception_info *exception_info)
+{
+    extern long list_thread(void);
+    struct exception_stack_frame *exception_stack = &exception_info->stack_frame.exception_stack_frame;
+    struct stack_frame *context = &exception_info->stack_frame;
+
+    if (rt_exception_hook != RT_NULL)
+    {
+        rt_err_t result;
+
+        result = rt_exception_hook(exception_stack);
+        if (result == RT_EOK) return;
+    }
+
+    rt_kprintf("psr: 0x%08x\n", context->exception_stack_frame.psr);
+
+    rt_kprintf("r00: 0x%08x\n", context->exception_stack_frame.r0);
+    rt_kprintf("r01: 0x%08x\n", context->exception_stack_frame.r1);
+    rt_kprintf("r02: 0x%08x\n", context->exception_stack_frame.r2);
+    rt_kprintf("r03: 0x%08x\n", context->exception_stack_frame.r3);
+    rt_kprintf("r04: 0x%08x\n", context->r4);
+    rt_kprintf("r05: 0x%08x\n", context->r5);
+    rt_kprintf("r06: 0x%08x\n", context->r6);
+    rt_kprintf("r07: 0x%08x\n", context->r7);
+    rt_kprintf("r08: 0x%08x\n", context->r8);
+    rt_kprintf("r09: 0x%08x\n", context->r9);
+    rt_kprintf("r10: 0x%08x\n", context->r10);
+    rt_kprintf("r11: 0x%08x\n", context->r11);
+    rt_kprintf("r12: 0x%08x\n", context->exception_stack_frame.r12);
+    rt_kprintf(" lr: 0x%08x\n", context->exception_stack_frame.lr);
+    rt_kprintf(" pc: 0x%08x\n", context->exception_stack_frame.pc);
+
+    if (exception_info->exc_return & (1 << 2))
+    {
+        rt_kprintf("hard fault on thread: %s\r\n\r\n", rt_thread_self()->name);
+
+#ifdef RT_USING_FINSH
+        list_thread();
+#endif
+    }
+    else
+    {
+        rt_kprintf("hard fault on handler\r\n\r\n");
+    }
+
+    if ( (exception_info->exc_return & 0x10) == 0)
+    {
+        rt_kprintf("FPU active!\r\n");
+    }
+
+#ifdef RT_USING_FINSH
+    hard_fault_track();
+#endif /* RT_USING_FINSH */
+
+    while (1);
+}
+
+/**
+ * shutdown CPU
+ */
+void rt_hw_cpu_shutdown(void)
+{
+    rt_kprintf("shutdown...\n");
+
+    RT_ASSERT(0);
+}
+
+/**
+ * reset CPU
+ */
+RT_WEAK void rt_hw_cpu_reset(void)
+{
+    SCB_AIRCR = SCB_RESET_VALUE;
+}
+
+#ifdef RT_USING_CPU_FFS
+/**
+ * This function finds the first bit set (beginning with the least significant bit)
+ * in value and return the index of that bit.
+ *
+ * Bits are numbered starting at 1 (the least significant bit).  A return value of
+ * zero from any of these functions means that the argument was zero.
+ *
+ * @return return the index of the first bit set. If value is 0, then this function
+ * shall return 0.
+ */
+#if defined(__CC_ARM) 
+__asm int __rt_ffs(int value)
+{
+    CMP     r0, #0x00
+    BEQ     exit
+
+    RBIT    r0, r0
+    CLZ     r0, r0
+    ADDS    r0, r0, #0x01
+
+exit
+    BX      lr
+}
+#elif defined(__CLANG_ARM)
+int __rt_ffs(int value)
+{
+    __asm volatile(
+        "CMP     r0, #0x00            \n"
+        "BEQ     exit                 \n"
+
+        "RBIT    r0, r0               \n"
+        "CLZ     r0, r0               \n"
+        "ADDS    r0, r0, #0x01        \n"
+
+        "exit:                        \n"
+        "BX      lr                   \n"
+
+        : "=r"(value)
+        : "r"(value)
+    );
+    return value;
+}
+#elif defined(__IAR_SYSTEMS_ICC__)
+int __rt_ffs(int value)
+{
+    if (value == 0) return value;
+
+    asm("RBIT %0, %1" : "=r"(value) : "r"(value));
+    asm("CLZ  %0, %1" : "=r"(value) : "r"(value));
+    asm("ADDS %0, %1, #0x01" : "=r"(value) : "r"(value));
+
+    return value;
+}
+#elif defined(__GNUC__)
+int __rt_ffs(int value)
+{
+    return __builtin_ffs(value);
+}
+#endif
+
+#endif

+ 59 - 0
libcpu/arm/cortex-m33/syscall_gcc.S

@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2006-2018, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2019-10-25     tyx          first version
+ */
+
+.cpu cortex-m4
+.syntax unified
+.thumb
+.text
+
+/*
+ * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
+ */
+.global tzcall
+.type tzcall, %function
+tzcall:
+	SVC     1                       /* call SVC 1 */
+    BX      LR
+
+tzcall_entry:
+    PUSH    {R1, R4, LR}
+    MOV     R4, R1                  /* copy thread SP to R4 */
+    LDMFD   R4!, {r0 - r3}          /* pop user stack, get input arg0, arg1, arg2 */
+    STMFD   R4!, {r0 - r3}          /* push stack, user stack recovery */
+    BL      rt_secure_svc_handle    /* call fun */
+    POP     {R1, R4, LR}
+    STR     R0, [R1]                /* update return value */
+    BX      LR                      /* return to thread */
+
+syscall_entry:
+    BX      LR                      /* return to user app */
+
+.global SVC_Handler
+.type SVC_Handler, %function
+SVC_Handler:
+
+    /* get SP, save to R1 */
+    MRS     R1, MSP                 /* get fault context from handler. */
+    TST     LR, #0x04               /* if(!EXC_RETURN[2]) */
+    BEQ     get_sp_done
+    MRS     R1, PSP                 /* get fault context from thread. */
+get_sp_done:
+
+    /* get svc index */
+    LDR     R0, [R1, #24]
+    LDRB    R0, [R0, #-2]
+
+    /* if svc == 0, do system call */
+    CMP     R0, #0x0
+    BEQ    syscall_entry
+
+    /* if svc == 1, do TrustZone call */
+    CMP     R0, #0x1
+    BEQ    tzcall_entry

+ 63 - 0
libcpu/arm/cortex-m33/syscall_iar.S

@@ -0,0 +1,63 @@
+;/*
+; * Copyright (c) 2006-2018, RT-Thread Development Team
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Change Logs:
+; * Date           Author       Notes
+; * 2019-10-25     tyx          first version
+; */
+
+;/*
+; * @addtogroup cortex-m33
+; */
+
+    SECTION    .text:CODE(2)
+    THUMB
+    REQUIRE8
+    PRESERVE8
+
+;/*
+; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
+; */
+.global tzcall
+.type tzcall, %function
+tzcall:
+	SVC     1                       ;/* call SVC 1 */
+    BX      LR
+
+tzcall_entry:
+    PUSH    {R1, R4, LR}
+    MOV     R4, R1                  ;/* copy thread SP to R4 */
+    LDMFD   R4!, {r0 - r3}          ;/* pop user stack, get input arg0, arg1, arg2 */
+    STMFD   R4!, {r0 - r3}          ;/* push stack, user stack recovery */
+    BL      rt_secure_svc_handle    ;/* call fun */
+    POP     {R1, R4, LR}
+    STR     R0, [R1]                ;/* update return value */
+    BX      LR                      ;/* return to thread */
+
+syscall_entry:
+    BX      LR                      ;/* return to user app */
+
+.global SVC_Handler
+.type SVC_Handler, %function
+SVC_Handler:
+
+    ;/* get SP, save to R1 */
+    MRS     R1, MSP                 ;/* get fault context from handler. */
+    TST     LR, #0x04               ;/* if(!EXC_RETURN[2]) */
+    BEQ     get_sp_done
+    MRS     R1, PSP                 ;/* get fault context from thread. */
+get_sp_done:
+
+    ;/* get svc index */
+    LDR     R0, [R1, #24]
+    LDRB    R0, [R0, #-2]
+
+    ;/* if svc == 0, do system call */
+    CMP     R0, #0x0
+    BEQ    syscall_entry
+
+    ;/* if svc == 1, do TrustZone call */
+    CMP     R0, #0x1
+    BEQ    tzcall_entry

+ 74 - 0
libcpu/arm/cortex-m33/syscall_rvds.S

@@ -0,0 +1,74 @@
+;/*
+; * Copyright (c) 2006-2018, RT-Thread Development Team
+; *
+; * SPDX-License-Identifier: Apache-2.0
+; *
+; * Change Logs:
+; * Date           Author       Notes
+; * 2019-10-25     tyx          first version
+; */
+
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    THUMB
+    REQUIRE8
+    PRESERVE8
+
+    IMPORT rt_secure_svc_handle
+
+;/*
+; * int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
+; */
+tzcall PROC
+    EXPORT tzcall
+	SVC     1                       ;call SVC 1
+    BX      LR
+
+    ENDP
+
+tzcall_entry     PROC
+    PUSH    {R1, R4, LR}
+    MOV     R4, R1                  ; copy thread SP to R4
+    LDMFD   R4!, {r0 - r3}          ; pop user stack, get input arg0, arg1, arg2 
+    STMFD   R4!, {r0 - r3}          ; push stack, user stack recovery
+    BL      rt_secure_svc_handle    ; call fun
+    POP     {R1, R4, LR}
+    STR     R0, [R1]                ; update return value
+    BX      LR                      ; return to thread
+
+    ENDP
+
+syscall_entry     PROC
+    BX      LR                      ; return to user app
+
+    ENDP
+
+;/*
+; * void SVC_Handler(void);
+; */
+SVC_Handler    PROC
+    EXPORT SVC_Handler
+
+    ; get SP, save to R1
+    MRS     R1, MSP                 ;get fault context from handler
+    TST     LR, #0x04               ;if(!EXC_RETURN[2])
+    BEQ     get_sp_done
+    MRS     R1, PSP                 ;get fault context from thread
+get_sp_done
+
+    ; get svc index
+    LDR     R0, [R1, #24]
+    LDRB    R0, [R0, #-2]
+
+    ;if svc == 0, do system call
+    CMP     R0, #0x0
+    BEQ    syscall_entry
+
+    ;if svc == 1, do TrustZone call
+    CMP     R0, #0x1
+    BEQ    tzcall_entry
+
+    ENDP
+
+    ALIGN
+
+    END

+ 123 - 0
libcpu/arm/cortex-m33/trustzone.c

@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2006-2019, RT-Thread Development Team
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Change Logs:
+ * Date           Author       Notes
+ * 2019-10-28     tyx          the first version.
+ */
+
+#include <rtthread.h>
+
+extern void TZ_InitContextSystem_S(void);
+extern rt_uint32_t TZ_AllocModuleContext_S (rt_uint32_t module);
+extern rt_uint32_t TZ_FreeModuleContext_S(rt_uint32_t id);
+extern rt_uint32_t TZ_LoadContext_S(rt_uint32_t id);
+extern rt_uint32_t TZ_StoreContext_S(rt_uint32_t id);
+extern int tzcall(int id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2);
+
+#define TZ_INIT_CONTEXT_ID     (0x1001)
+#define TZ_ALLOC_CONTEXT_ID    (0x1002)
+#define TZ_FREE_CONTEXT_ID     (0x1003)
+
+rt_ubase_t rt_trustzone_current_context;
+
+#if defined(__CC_ARM) 
+static __inline rt_uint32_t __get_IPSR(void)
+{
+  register rt_uint32_t result          __asm("ipsr");
+  return(result);
+}
+#elif defined(__CLANG_ARM)
+__attribute__((always_inline)) static __inline rt_uint32_t __get_IPSR(void)
+{
+    rt_uint32_t result;
+
+    __asm volatile ("MRS %0, ipsr" : "=r" (result) );
+    return(result);
+}
+#elif defined(__IAR_SYSTEMS_ICC__)
+_Pragma("inline=forced") static inline int __get_IPSR(int value)
+{
+    rt_uint32_t result;
+
+    asm("MRS  %0, ipsr" : "=r"(result));
+    return result;
+}
+#elif defined(__GNUC__)
+__attribute__((always_inline)) static inline rt_uint32_t __get_IPSR(void)
+{
+  rt_uint32_t result;
+
+  __asm volatile ("MRS %0, ipsr" : "=r" (result) );
+  return(result);
+}
+#endif
+
+void rt_trustzone_init(void)
+{
+    static rt_uint8_t _init;
+
+    if (_init)
+        return;
+    tzcall(TZ_INIT_CONTEXT_ID, 0, 0, 0);
+    _init = 1;
+}
+
+rt_err_t rt_trustzone_enter(rt_ubase_t module)
+{
+    rt_trustzone_init();
+    if (tzcall(TZ_ALLOC_CONTEXT_ID, module, 0, 0))
+    {
+        return RT_EOK; 
+    }
+    return -RT_ERROR;
+}
+
+rt_err_t rt_trustzone_exit(void)
+{
+    tzcall(TZ_FREE_CONTEXT_ID, 0, 0, 0);
+    return RT_EOK;
+}
+
+void rt_trustzone_context_store(rt_ubase_t context)
+{
+    TZ_StoreContext_S(context);
+}
+
+void rt_trustzone_context_load(rt_ubase_t context)
+{
+    TZ_LoadContext_S(context);
+}
+
+int rt_secure_svc_handle(int svc_id, rt_ubase_t arg0, rt_ubase_t arg1, rt_ubase_t arg2)
+{
+    rt_ubase_t tmp;
+    int res = 0;
+
+    switch (svc_id)
+    {
+    case TZ_INIT_CONTEXT_ID:
+        TZ_InitContextSystem_S();
+        break;
+    case TZ_ALLOC_CONTEXT_ID:
+        res = TZ_AllocModuleContext_S(arg0);
+        if (res <= 0)
+        {
+            rt_kprintf("Alloc Context Failed\n");
+        }
+        else
+        {
+            rt_trustzone_current_context = res;
+            TZ_LoadContext_S(res);
+        }
+        break;
+    case TZ_FREE_CONTEXT_ID:
+        TZ_FreeModuleContext_S(rt_trustzone_current_context);
+        rt_trustzone_current_context = 0;
+        break;
+    }
+    return res;
+}
+