فهرست منبع

[asm] 解决tab和空格混用的问题

Meco Man 3 سال پیش
والد
کامیت
563e49890c
80فایلهای تغییر یافته به همراه3558 افزوده شده و 3558 حذف شده
  1. 2 2
      libcpu/aarch64/common/cache.S
  2. 146 146
      libcpu/aarch64/common/context_gcc.S
  3. 53 53
      libcpu/aarch64/common/cpu_gcc.S
  4. 2 2
      libcpu/aarch64/common/startup_gcc.S
  5. 5 5
      libcpu/aarch64/common/vector_gcc.S
  6. 220 220
      libcpu/arc/em/contex_gcc_mw.S
  7. 38 38
      libcpu/arm/AT91SAM7S/context_gcc.S
  8. 33 33
      libcpu/arm/AT91SAM7S/context_rvds.S
  9. 181 181
      libcpu/arm/AT91SAM7S/start_gcc.S
  10. 38 38
      libcpu/arm/AT91SAM7X/context_gcc.S
  11. 61 61
      libcpu/arm/AT91SAM7X/context_rvds.S
  12. 183 183
      libcpu/arm/AT91SAM7X/start_gcc.S
  13. 37 37
      libcpu/arm/am335x/cp15_gcc.S
  14. 41 41
      libcpu/arm/am335x/cp15_iar.s
  15. 3 3
      libcpu/arm/am335x/start_gcc.S
  16. 11 11
      libcpu/arm/am335x/start_iar.s
  17. 2 2
      libcpu/arm/am335x/vector_gcc.S
  18. 1 1
      libcpu/arm/arm926/context_iar.S
  19. 23 23
      libcpu/arm/arm926/start_gcc.S
  20. 79 79
      libcpu/arm/armv6/arm_entry_gcc.S
  21. 320 320
      libcpu/arm/common/divsi3.S
  22. 1 1
      libcpu/arm/cortex-a/start_gcc.S
  23. 10 10
      libcpu/arm/cortex-m0/context_gcc.S
  24. 3 3
      libcpu/arm/cortex-m0/context_rvds.S
  25. 10 10
      libcpu/arm/cortex-m23/context_gcc.S
  26. 3 3
      libcpu/arm/cortex-m23/context_rvds.S
  27. 3 3
      libcpu/arm/cortex-m3/context_gcc.S
  28. 1 1
      libcpu/arm/cortex-m3/context_rvds.S
  29. 1 1
      libcpu/arm/cortex-m33/context_iar.S
  30. 2 2
      libcpu/arm/cortex-m33/syscall_gcc.S
  31. 2 2
      libcpu/arm/cortex-m33/syscall_iar.S
  32. 4 4
      libcpu/arm/cortex-m33/syscall_rvds.S
  33. 3 3
      libcpu/arm/cortex-m4/context_gcc.S
  34. 2 2
      libcpu/arm/cortex-m7/context_gcc.S
  35. 14 14
      libcpu/arm/cortex-r4/context_ccs.asm
  36. 11 11
      libcpu/arm/cortex-r4/context_gcc.S
  37. 13 13
      libcpu/arm/cortex-r4/start_ccs.asm
  38. 9 9
      libcpu/arm/cortex-r4/start_gcc.S
  39. 61 61
      libcpu/arm/dm36x/context_rvds.S
  40. 64 64
      libcpu/arm/lpc214x/context_gcc.S
  41. 94 94
      libcpu/arm/lpc214x/context_rvds.S
  42. 171 171
      libcpu/arm/lpc214x/startup_gcc.S
  43. 38 38
      libcpu/arm/lpc24xx/context_gcc.S
  44. 63 63
      libcpu/arm/lpc24xx/context_rvds.S
  45. 17 17
      libcpu/arm/lpc24xx/start_gcc.S
  46. 60 60
      libcpu/arm/lpc24xx/start_rvds.S
  47. 10 10
      libcpu/arm/realview-a8-vmm/start_gcc.S
  48. 2 2
      libcpu/arm/realview-a8-vmm/vector_gcc.S
  49. 38 38
      libcpu/arm/s3c24x0/context_gcc.S
  50. 61 61
      libcpu/arm/s3c24x0/context_rvds.S
  51. 273 273
      libcpu/arm/s3c24x0/start_gcc.S
  52. 13 13
      libcpu/arm/s3c24x0/start_rvds.S
  53. 38 38
      libcpu/arm/s3c44b0/context_gcc.S
  54. 61 61
      libcpu/arm/s3c44b0/context_rvds.S
  55. 165 165
      libcpu/arm/s3c44b0/start_gcc.S
  56. 61 61
      libcpu/arm/sep4020/context_rvds.S
  57. 4 4
      libcpu/arm/zynqmp-r5/context_gcc.S
  58. 37 37
      libcpu/arm/zynqmp-r5/start_gcc.S
  59. 2 2
      libcpu/arm/zynqmp-r5/vector_gcc.S
  60. 36 36
      libcpu/avr32/uc3/context_gcc.S
  61. 43 43
      libcpu/ia32/context_gcc.S
  62. 61 61
      libcpu/ia32/hdisr_gcc.S
  63. 44 44
      libcpu/ia32/start_gcc.S
  64. 45 45
      libcpu/ia32/trapisr_gcc.S
  65. 9 9
      libcpu/m16c/m16c62p/context_gcc.S
  66. 5 5
      libcpu/m16c/m16c62p/context_iar.S
  67. 5 5
      libcpu/m16c/m16c62p/context_iar.asm
  68. 2 2
      libcpu/mips/common/context_gcc.S
  69. 16 16
      libcpu/mips/common/entry_gcc.S
  70. 17 17
      libcpu/mips/common/exception_gcc.S
  71. 86 86
      libcpu/mips/gs232/cache_gcc.S
  72. 3 3
      libcpu/mips/gs232/cpuinit_gcc.S
  73. 11 11
      libcpu/mips/pic32/context_gcc.S
  74. 2 2
      libcpu/risc-v/e310/interrupt_gcc.S
  75. 2 2
      libcpu/risc-v/k210/startup_gcc.S
  76. 2 2
      libcpu/risc-v/rv32m1/interrupt_gcc.S
  77. 3 3
      libcpu/risc-v/virt64/startup_gcc.S
  78. 59 59
      libcpu/ti-dsp/c28x/context.s
  79. 41 41
      libcpu/unicore32/sep6200/context_gcc.S
  80. 162 162
      libcpu/unicore32/sep6200/start_gcc.S

+ 2 - 2
libcpu/aarch64/common/cache.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -199,4 +199,4 @@ __asm_invalidate_icache_all:
 .globl __asm_flush_l3_cache
 __asm_flush_l3_cache:
     mov    x0, #0            /* return status as success */
-    ret
+    ret

+ 146 - 146
libcpu/aarch64/common/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -23,9 +23,9 @@
  */
 .globl rt_hw_gtimer_enable
 rt_hw_gtimer_enable:
-	MOV X0,#1
-	MSR CNTP_CTL_EL0,X0
-	RET
+    MOV X0,#1
+    MSR CNTP_CTL_EL0,X0
+    RET
 
 /*
  *disable gtimer
@@ -40,183 +40,183 @@ rt_hw_gtimer_disable:
  */
 .globl rt_hw_set_gtimer_val
 rt_hw_set_gtimer_val:
-	MSR CNTP_TVAL_EL0,X0
-	RET
+    MSR CNTP_TVAL_EL0,X0
+    RET
 
 /*
  *get gtimer CNTP_TVAL_EL0 value
  */
 .globl rt_hw_get_gtimer_val
 rt_hw_get_gtimer_val:
-	MRS X0,CNTP_TVAL_EL0
-	RET
+    MRS X0,CNTP_TVAL_EL0
+    RET
 
 
 .globl rt_hw_get_cntpct_val
 rt_hw_get_cntpct_val:
-	MRS X0, CNTPCT_EL0
-	RET
+    MRS X0, CNTPCT_EL0
+    RET
 
 /*
  *get gtimer frq value
  */
 .globl rt_hw_get_gtimer_frq
 rt_hw_get_gtimer_frq:
-	MRS X0,CNTFRQ_EL0
-	RET
+    MRS X0,CNTFRQ_EL0
+    RET
 
 .macro SAVE_CONTEXT
     /* Save the entire context. */
     SAVE_FPU SP
-    STP 	X0, X1, [SP, #-0x10]!
-    STP 	X2, X3, [SP, #-0x10]!
-    STP 	X4, X5, [SP, #-0x10]!
-    STP 	X6, X7, [SP, #-0x10]!
-    STP 	X8, X9, [SP, #-0x10]!
-    STP 	X10, X11, [SP, #-0x10]!
-    STP 	X12, X13, [SP, #-0x10]!
-    STP 	X14, X15, [SP, #-0x10]!
-    STP 	X16, X17, [SP, #-0x10]!
-    STP 	X18, X19, [SP, #-0x10]!
-    STP 	X20, X21, [SP, #-0x10]!
-    STP 	X22, X23, [SP, #-0x10]!
-    STP 	X24, X25, [SP, #-0x10]!
-    STP 	X26, X27, [SP, #-0x10]!
-    STP 	X28, X29, [SP, #-0x10]!
+    STP     X0, X1, [SP, #-0x10]!
+    STP     X2, X3, [SP, #-0x10]!
+    STP     X4, X5, [SP, #-0x10]!
+    STP     X6, X7, [SP, #-0x10]!
+    STP     X8, X9, [SP, #-0x10]!
+    STP     X10, X11, [SP, #-0x10]!
+    STP     X12, X13, [SP, #-0x10]!
+    STP     X14, X15, [SP, #-0x10]!
+    STP     X16, X17, [SP, #-0x10]!
+    STP     X18, X19, [SP, #-0x10]!
+    STP     X20, X21, [SP, #-0x10]!
+    STP     X22, X23, [SP, #-0x10]!
+    STP     X24, X25, [SP, #-0x10]!
+    STP     X26, X27, [SP, #-0x10]!
+    STP     X28, X29, [SP, #-0x10]!
     MRS     X28, FPCR
     MRS     X29, FPSR
     STP     X28, X29, [SP, #-0x10]!
-    STP 	X30, XZR, [SP, #-0x10]!
-
-    MRS		X0, CurrentEL
-    CMP		X0, 0xc
-    B.EQ	3f
-    CMP		X0, 0x8
-    B.EQ	2f
-    CMP		X0, 0x4
-    B.EQ	1f
-    B 		.
+    STP     X30, XZR, [SP, #-0x10]!
+
+    MRS     X0, CurrentEL
+    CMP     X0, 0xc
+    B.EQ    3f
+    CMP     X0, 0x8
+    B.EQ    2f
+    CMP     X0, 0x4
+    B.EQ    1f
+    B       .
 3:
-    MRS		X3, SPSR_EL3
+    MRS     X3, SPSR_EL3
     /* Save the ELR. */
-    MRS		X2, ELR_EL3
-    B		0f
+    MRS     X2, ELR_EL3
+    B       0f
 2:
-    MRS		X3, SPSR_EL2
+    MRS     X3, SPSR_EL2
     /* Save the ELR. */
-    MRS		X2, ELR_EL2
-    B		0f
+    MRS     X2, ELR_EL2
+    B       0f
 1:
-    MRS		X3, SPSR_EL1
-    MRS		X2, ELR_EL1
-    B		0f
+    MRS     X3, SPSR_EL1
+    MRS     X2, ELR_EL1
+    B       0f
 0:
 
-    STP 	X2, X3, [SP, #-0x10]!
+    STP     X2, X3, [SP, #-0x10]!
 
-    MOV 	X0, SP   /* Move SP into X0 for saving. */
+    MOV     X0, SP   /* Move SP into X0 for saving. */
 
     .endm
 
 .macro SAVE_CONTEXT_T
     /* Save the entire context. */
     SAVE_FPU SP
-    STP 	X0, X1, [SP, #-0x10]!
-    STP 	X2, X3, [SP, #-0x10]!
-    STP 	X4, X5, [SP, #-0x10]!
-    STP 	X6, X7, [SP, #-0x10]!
-    STP 	X8, X9, [SP, #-0x10]!
-    STP 	X10, X11, [SP, #-0x10]!
-    STP 	X12, X13, [SP, #-0x10]!
-    STP 	X14, X15, [SP, #-0x10]!
-    STP 	X16, X17, [SP, #-0x10]!
-    STP 	X18, X19, [SP, #-0x10]!
-    STP 	X20, X21, [SP, #-0x10]!
-    STP 	X22, X23, [SP, #-0x10]!
-    STP 	X24, X25, [SP, #-0x10]!
-    STP 	X26, X27, [SP, #-0x10]!
-    STP 	X28, X29, [SP, #-0x10]!
+    STP     X0, X1, [SP, #-0x10]!
+    STP     X2, X3, [SP, #-0x10]!
+    STP     X4, X5, [SP, #-0x10]!
+    STP     X6, X7, [SP, #-0x10]!
+    STP     X8, X9, [SP, #-0x10]!
+    STP     X10, X11, [SP, #-0x10]!
+    STP     X12, X13, [SP, #-0x10]!
+    STP     X14, X15, [SP, #-0x10]!
+    STP     X16, X17, [SP, #-0x10]!
+    STP     X18, X19, [SP, #-0x10]!
+    STP     X20, X21, [SP, #-0x10]!
+    STP     X22, X23, [SP, #-0x10]!
+    STP     X24, X25, [SP, #-0x10]!
+    STP     X26, X27, [SP, #-0x10]!
+    STP     X28, X29, [SP, #-0x10]!
     MRS     X28, FPCR
     MRS     X29, FPSR
     STP     X28, X29, [SP, #-0x10]!
-    STP 	X30, XZR, [SP, #-0x10]!
-
-    MRS		X0, CurrentEL
-    CMP		X0, 0xc
-    B.EQ	3f
-    CMP		X0, 0x8
-    B.EQ	2f
-    CMP		X0, 0x4
-    B.EQ	1f
-    B 		.
+    STP     X30, XZR, [SP, #-0x10]!
+
+    MRS     X0, CurrentEL
+    CMP     X0, 0xc
+    B.EQ    3f
+    CMP     X0, 0x8
+    B.EQ    2f
+    CMP     X0, 0x4
+    B.EQ    1f
+    B       .
 3:
-    MOV		X3, 0x0d
-    MOV		X2, X30
-    B		0f
+    MOV     X3, 0x0d
+    MOV     X2, X30
+    B       0f
 2:
-    MOV		X3, 0x09
-    MOV		X2, X30
-    B		0f
+    MOV     X3, 0x09
+    MOV     X2, X30
+    B       0f
 1:
-    MOV		X3, 0x05
-    MOV		X2, X30
-    B		0f
+    MOV     X3, 0x05
+    MOV     X2, X30
+    B       0f
 0:
 
-    STP 	X2, X3, [SP, #-0x10]!
+    STP     X2, X3, [SP, #-0x10]!
 
-    MOV 	X0, SP   /* Move SP into X0 for saving. */
+    MOV     X0, SP   /* Move SP into X0 for saving. */
 
     .endm
 
 .macro RESTORE_CONTEXT
 
     /* Set the SP to point to the stack of the task being restored. */
-    MOV		SP, X0
-
-    LDP 	X2, X3, [SP], #0x10  /* SPSR and ELR. */
-
-    MRS		X0, CurrentEL
-    CMP		X0, 0xc
-    B.EQ	3f
-    CMP		X0, 0x8
-    B.EQ	2f
-    CMP		X0, 0x4
-    B.EQ	1f
-    B 		.
+    MOV     SP, X0
+
+    LDP     X2, X3, [SP], #0x10  /* SPSR and ELR. */
+
+    MRS     X0, CurrentEL
+    CMP     X0, 0xc
+    B.EQ    3f
+    CMP     X0, 0x8
+    B.EQ    2f
+    CMP     X0, 0x4
+    B.EQ    1f
+    B       .
 3:
-    MSR		SPSR_EL3, X3
-    MSR		ELR_EL3, X2
-    B		0f
+    MSR     SPSR_EL3, X3
+    MSR     ELR_EL3, X2
+    B       0f
 2:
-    MSR		SPSR_EL2, X3
-    MSR		ELR_EL2, X2
-    B		0f
+    MSR     SPSR_EL2, X3
+    MSR     ELR_EL2, X2
+    B       0f
 1:
-    MSR		SPSR_EL1, X3
-    MSR		ELR_EL1, X2
-    B		0f
+    MSR     SPSR_EL1, X3
+    MSR     ELR_EL1, X2
+    B       0f
 0:
 
-    LDP 	X30, XZR, [SP], #0x10
+    LDP     X30, XZR, [SP], #0x10
     LDP     X28, X29, [SP], #0x10
     MSR     FPCR, X28
     MSR     FPSR, X29
-    LDP 	X28, X29, [SP], #0x10
-    LDP 	X26, X27, [SP], #0x10
-    LDP 	X24, X25, [SP], #0x10
-    LDP 	X22, X23, [SP], #0x10
-    LDP 	X20, X21, [SP], #0x10
-    LDP 	X18, X19, [SP], #0x10
-    LDP 	X16, X17, [SP], #0x10
-    LDP 	X14, X15, [SP], #0x10
-    LDP 	X12, X13, [SP], #0x10
-    LDP 	X10, X11, [SP], #0x10
-    LDP 	X8, X9, [SP], #0x10
-    LDP 	X6, X7, [SP], #0x10
-    LDP 	X4, X5, [SP], #0x10
-    LDP 	X2, X3, [SP], #0x10
-    LDP 	X0, X1, [SP], #0x10
+    LDP     X28, X29, [SP], #0x10
+    LDP     X26, X27, [SP], #0x10
+    LDP     X24, X25, [SP], #0x10
+    LDP     X22, X23, [SP], #0x10
+    LDP     X20, X21, [SP], #0x10
+    LDP     X18, X19, [SP], #0x10
+    LDP     X16, X17, [SP], #0x10
+    LDP     X14, X15, [SP], #0x10
+    LDP     X12, X13, [SP], #0x10
+    LDP     X10, X11, [SP], #0x10
+    LDP     X8, X9, [SP], #0x10
+    LDP     X6, X7, [SP], #0x10
+    LDP     X4, X5, [SP], #0x10
+    LDP     X2, X3, [SP], #0x10
+    LDP     X0, X1, [SP], #0x10
     RESTORE_FPU SP
 
     ERET
@@ -264,7 +264,7 @@ rt_hw_context_switch_to:
     BL      rt_cpus_lock_status_restore
     LDR     X0, [SP], #0x8
 #endif /*RT_USING_SMP*/
-    LDR		X0, [X0]
+    LDR     X0, [X0]
     RESTORE_CONTEXT
 
 .text
@@ -289,14 +289,14 @@ rt_hw_context_switch:
     LDP     X0, X1, [SP], #0x10
 #endif /*RT_USING_SMP*/
 
-    MOV		X8,X0
-    MOV		X9,X1
+    MOV     X8,X0
+    MOV     X9,X1
 
     SAVE_CONTEXT_T
-    
-    STR		X0, [X8]            // store sp in preempted tasks TCB
-    LDR		X0, [X9]            // get new task stack pointer
-    
+
+    STR     X0, [X8]            // store sp in preempted tasks TCB
+    LDR     X0, [X9]            // get new task stack pointer
+
     RESTORE_CONTEXT
 
 /*
@@ -320,17 +320,17 @@ rt_hw_context_switch_interrupt:
     MOV     X0, SP
     RESTORE_CONTEXT
 #else
-    LDR 	X2, =rt_thread_switch_interrupt_flag
-    LDR 	X3, [X2]
-    CMP 	X3, #1
-    B.EQ 	_reswitch
-    LDR 	X4, =rt_interrupt_from_thread  // set rt_interrupt_from_thread
-    MOV 	X3, #1              // set rt_thread_switch_interrupt_flag to 1
-    STR 	X0, [X4]
-    STR 	X3, [X2]
+    LDR     X2, =rt_thread_switch_interrupt_flag
+    LDR     X3, [X2]
+    CMP     X3, #1
+    B.EQ    _reswitch
+    LDR     X4, =rt_interrupt_from_thread  // set rt_interrupt_from_thread
+    MOV     X3, #1              // set rt_thread_switch_interrupt_flag to 1
+    STR     X0, [X4]
+    STR     X3, [X2]
 _reswitch:
-    LDR 	X2, =rt_interrupt_to_thread    // set rt_interrupt_to_thread
-    STR 	X1, [X2]
+    LDR     X2, =rt_interrupt_to_thread    // set rt_interrupt_to_thread
+    STR     X1, [X2]
     RET
 #endif
 .text
@@ -341,9 +341,9 @@ _reswitch:
 .globl vector_fiq
 vector_fiq:
     SAVE_CONTEXT
-    STP 	X0, X1, [SP, #-0x10]!
+    STP     X0, X1, [SP, #-0x10]!
     BL      rt_hw_trap_fiq
-    LDP 	X0, X1, [SP], #0x10
+    LDP     X0, X1, [SP], #0x10
     RESTORE_CONTEXT
 
 .globl      rt_interrupt_enter
@@ -359,13 +359,13 @@ vector_fiq:
 .globl vector_irq
 vector_irq:
     SAVE_CONTEXT
-    STP 	X0, X1, [SP, #-0x10]!
+    STP     X0, X1, [SP, #-0x10]!
 
     BL      rt_interrupt_enter
     BL      rt_hw_trap_irq
     BL      rt_interrupt_leave
-    
-    LDP 	X0, X1, [SP], #0x10
+
+    LDP     X0, X1, [SP], #0x10
 #ifdef RT_USING_SMP
     /* Never reture If can switch */
     BL      rt_scheduler_do_irq_switch
@@ -374,7 +374,7 @@ vector_irq:
 
     // if rt_thread_switch_interrupt_flag set, jump to
     // rt_hw_context_switch_interrupt_do and don't return
-    LDR 	X1, =rt_thread_switch_interrupt_flag
+    LDR     X1, =rt_thread_switch_interrupt_flag
     LDR     X2, [X1]
     CMP     X2, #1
     B.NE     vector_irq_exit
@@ -389,8 +389,8 @@ vector_irq:
     LDR     x3, =rt_interrupt_to_thread
     LDR     X4,  [X3]
     LDR     x0,  [X4]       // get new task's stack pointer
-    
-vector_irq_exit:	
+
+vector_irq_exit:
     RESTORE_CONTEXT
 
 // -------------------------------------------------

+ 53 - 53
libcpu/aarch64/common/cpu_gcc.S

@@ -1,82 +1,82 @@
 /*
- * Copyright (c) 2006-2020, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Date           Author       Notes
  * 2018-10-06     ZhaoXiaowei    the first version
  */
- 
+
 .text
 .globl rt_hw_get_current_el
 rt_hw_get_current_el:
-	MRS		X0, CurrentEL
-	CMP		X0, 0xc
-	B.EQ	3f
-	CMP		X0, 0x8
-	B.EQ	2f
-	CMP		X0, 0x4
-	B.EQ	1f
-	
-	LDR		X0, =0
-	B		0f
+    MRS     X0, CurrentEL
+    CMP     X0, 0xc
+    B.EQ    3f
+    CMP     X0, 0x8
+    B.EQ    2f
+    CMP     X0, 0x4
+    B.EQ    1f
+
+    LDR     X0, =0
+    B       0f
 3:
-	LDR		X0, =3
-	B		0f
+    LDR     X0, =3
+    B       0f
 2:
-	LDR		X0, =2
-	B		0f
+    LDR     X0, =2
+    B       0f
 1:
-	LDR		X0, =1
-	B		0f
+    LDR     X0, =1
+    B       0f
 0:
-	RET
+    RET
 
 
 .globl rt_hw_set_current_vbar
 rt_hw_set_current_vbar:
-	MRS		X1, CurrentEL
-	CMP		X1, 0xc
-	B.EQ	3f
-	CMP		X1, 0x8
-	B.EQ	2f
-	CMP		X1, 0x4
-	B.EQ	1f
-	B		0f
+    MRS     X1, CurrentEL
+    CMP     X1, 0xc
+    B.EQ    3f
+    CMP     X1, 0x8
+    B.EQ    2f
+    CMP     X1, 0x4
+    B.EQ    1f
+    B       0f
 3:
-	MSR		VBAR_EL3,X0
-	B		0f
+    MSR     VBAR_EL3,X0
+    B       0f
 2:
-	MSR		VBAR_EL2,X0
-	B		0f
+    MSR     VBAR_EL2,X0
+    B       0f
 1:
-	MSR		VBAR_EL1,X0
-	B		0f
+    MSR     VBAR_EL1,X0
+    B       0f
 0:
-	RET
+    RET
+
 
-    
 .globl rt_hw_set_elx_env
 rt_hw_set_elx_env:
-	MRS		X1, CurrentEL
-	CMP		X1, 0xc
-	B.EQ	3f
-	CMP		X1, 0x8
-	B.EQ	2f
-	CMP		X1, 0x4
-	B.EQ	1f
-	B		0f
+    MRS     X1, CurrentEL
+    CMP     X1, 0xc
+    B.EQ    3f
+    CMP     X1, 0x8
+    B.EQ    2f
+    CMP     X1, 0x4
+    B.EQ    1f
+    B       0f
 3:
-	MRS		X0, SCR_EL3
-	ORR		X0, X0, #0xF			/* SCR_EL3.NS|IRQ|FIQ|EA */
-	MSR		SCR_EL3, X0
-	B		0f
+    MRS     X0, SCR_EL3
+    ORR     X0, X0, #0xF            /* SCR_EL3.NS|IRQ|FIQ|EA */
+    MSR     SCR_EL3, X0
+    B       0f
 2:
-	MRS	X0, HCR_EL2
-	ORR	X0, X0, #0x38
-	MSR	HCR_EL2, X0
-	B		0f
+    MRS X0, HCR_EL2
+    ORR X0, X0, #0x38
+    MSR HCR_EL2, X0
+    B       0f
 1:
-	B		0f
+    B       0f
 0:
-	RET
+    RET

+ 2 - 2
libcpu/aarch64/common/startup_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -7,7 +7,7 @@
  * Date           Author       Notes
  */
 
-    .global	Reset_Handler
+    .global Reset_Handler
     .section ".start", "ax"
 Reset_Handler:
     nop

+ 5 - 5
libcpu/aarch64/common/vector_gcc.S

@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2006-2020, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Date           Author       Notes
  * 2018-10-06     ZhaoXiaowei    the first version
  */
- 
+
 .text
 
 .globl system_vectors
@@ -20,7 +20,7 @@ system_vectors:
     .org    VBAR
     // Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=0)
     .org (VBAR + 0x00 + 0)
-    B vector_error      // 			Synchronous
+    B vector_error      //          Synchronous
     .org (VBAR + 0x80 + 0)
     B vector_irq        //          IRQ/vIRQ
     .org (VBAR + 0x100 + 0)
@@ -30,9 +30,9 @@ system_vectors:
 
     // Exception from CurrentEL (EL1) with SP_ELn
     .org (VBAR + 0x200 + 0)
-    B vector_error      // 			Synchronous
+    B vector_error      //          Synchronous
     .org (VBAR + 0x280 + 0)
-    B vector_irq    	// 			IRQ/vIRQ
+    B vector_irq        //          IRQ/vIRQ
     .org (VBAR + 0x300 + 0)
     B vector_fiq        //          FIQ/vFIQ
     .org (VBAR + 0x380 + 0)

+ 220 - 220
libcpu/arc/em/contex_gcc_mw.S

@@ -15,68 +15,68 @@
 .global exc_nest_count;
 .global set_hw_stack_check;
 
-	.text
-	.align 4
+    .text
+    .align 4
 dispatcher:
-	st sp, [r0]
-	ld sp, [r1]
+    st sp, [r0]
+    ld sp, [r1]
 #if ARC_FEATURE_STACK_CHECK
 #if ARC_FEATURE_SEC_PRESENT
-	lr r0, [AUX_SEC_STAT]
-	bclr r0, r0, AUX_SEC_STAT_BIT_SSC
-	sflag r0
+    lr r0, [AUX_SEC_STAT]
+    bclr r0, r0, AUX_SEC_STAT_BIT_SSC
+    sflag r0
 #else
-	lr r0, [AUX_STATUS32]
-	bclr r0, r0, AUX_STATUS_BIT_SC
-	kflag r0
+    lr r0, [AUX_STATUS32]
+    bclr r0, r0, AUX_STATUS_BIT_SC
+    kflag r0
 #endif
-	jl	set_hw_stack_check
+    jl  set_hw_stack_check
 #if ARC_FEATURE_SEC_PRESENT
-	lr r0, [AUX_SEC_STAT]
-	bset r0, r0, AUX_SEC_STAT_BIT_SSC
-	sflag r0
+    lr r0, [AUX_SEC_STAT]
+    bset r0, r0, AUX_SEC_STAT_BIT_SSC
+    sflag r0
 #else
-	lr r0, [AUX_STATUS32]
-	bset r0, r0, AUX_STATUS_BIT_SC
-	kflag r0
+    lr r0, [AUX_STATUS32]
+    bset r0, r0, AUX_STATUS_BIT_SC
+    kflag r0
 #endif
 #endif
-	pop r0
-	j [r0]
+    pop r0
+    j [r0]
 
 /* return routine when task dispatch happened in task context */
 dispatch_r:
-	RESTORE_NONSCRATCH_REGS
-	j	[blink]
+    RESTORE_NONSCRATCH_REGS
+    j   [blink]
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
-	.global rt_hw_interrupt_disable
-	.align 4
+    .global rt_hw_interrupt_disable
+    .align 4
 rt_hw_interrupt_disable:
- 	clri r0
- 	j [blink]
+    clri r0
+    j [blink]
 
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
  */
-	.global rt_hw_interrupt_enable
-	.align 4
+    .global rt_hw_interrupt_enable
+    .align 4
 rt_hw_interrupt_enable:
-	seti r0
-	j [blink]
+    seti r0
+    j [blink]
 
 
-	.global rt_hw_context_switch_interrupt
-	.align 4
+    .global rt_hw_context_switch_interrupt
+    .align 4
 rt_hw_context_switch_interrupt:
-	st r0, [rt_interrupt_from_thread]
-	st r1, [rt_interrupt_to_thread]
-	mov r0, 1
-	st r0, [context_switch_reqflg]
-	j [blink]
+    st r0, [rt_interrupt_from_thread]
+    st r1, [rt_interrupt_to_thread]
+    mov r0, 1
+    st r0, [context_switch_reqflg]
+    j [blink]
 
 
 /*
@@ -84,281 +84,281 @@ rt_hw_context_switch_interrupt:
  * r0 --> from
  * r1 --> to
  */
-	.global rt_hw_context_switch
-	.align 4
+    .global rt_hw_context_switch
+    .align 4
 rt_hw_context_switch:
-	SAVE_NONSCRATCH_REGS
-	mov r2, dispatch_r
-	push r2
-	b dispatcher
+    SAVE_NONSCRATCH_REGS
+    mov r2, dispatch_r
+    push r2
+    b dispatcher
 
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
  * r0 --> to
  */
-	.global rt_hw_context_switch_to
-	.align 4
+    .global rt_hw_context_switch_to
+    .align 4
 rt_hw_context_switch_to:
-	ld sp, [r0]
+    ld sp, [r0]
 #if ARC_FEATURE_STACK_CHECK
-	mov r1, r0
+    mov r1, r0
 #if ARC_FEATURE_SEC_PRESENT
-	lr r0, [AUX_SEC_STAT]
-	bclr r0, r0, AUX_SEC_STAT_BIT_SSC
-	sflag r0
+    lr r0, [AUX_SEC_STAT]
+    bclr r0, r0, AUX_SEC_STAT_BIT_SSC
+    sflag r0
 #else
-	lr r0, [AUX_STATUS32]
-	bclr r0, r0, AUX_STATUS_BIT_SC
-	kflag r0
+    lr r0, [AUX_STATUS32]
+    bclr r0, r0, AUX_STATUS_BIT_SC
+    kflag r0
 #endif
-	jl	set_hw_stack_check
+    jl  set_hw_stack_check
 #if ARC_FEATURE_SEC_PRESENT
-	lr r0, [AUX_SEC_STAT]
-	bset r0, r0, AUX_SEC_STAT_BIT_SSC
-	sflag r0
+    lr r0, [AUX_SEC_STAT]
+    bset r0, r0, AUX_SEC_STAT_BIT_SSC
+    sflag r0
 #else
-	lr r0, [AUX_STATUS32]
-	bset r0, r0, AUX_STATUS_BIT_SC
-	kflag r0
+    lr r0, [AUX_STATUS32]
+    bset r0, r0, AUX_STATUS_BIT_SC
+    kflag r0
 #endif
 #endif
-	pop r0
-	j [r0]
+    pop r0
+    j [r0]
 
-	.global start_r
-	.align 4
+    .global start_r
+    .align 4
 start_r:
-	pop blink;
-	pop r1
-	pop r2
-	pop r0
+    pop blink;
+    pop r1
+    pop r2
+    pop r0
 
-	j_s.d [r1]
-	kflag r2
+    j_s.d [r1]
+    kflag r2
 
 /*
  * int __rt_ffs(int value);
  * r0 --> value
  */
-	.global __rt_ffs
-	.align 4
+    .global __rt_ffs
+    .align 4
 __rt_ffs:
-	breq r0, 0, __rt_ffs_return
-	ffs r1, r0
-	add r0, r1, 1
+    breq r0, 0, __rt_ffs_return
+    ffs r1, r0
+    add r0, r1, 1
 __rt_ffs_return:
-	j [blink]
+    j [blink]
 
 /****** exceptions and interrupts handing ******/
 /****** entry for exception handling ******/
-	.global exc_entry_cpu
-	.align 4
+    .global exc_entry_cpu
+    .align 4
 exc_entry_cpu:
 
-	EXCEPTION_PROLOGUE
+    EXCEPTION_PROLOGUE
 
-	mov	blink,	sp
-	mov	r3, sp		/* as exception handler's para(p_excinfo) */
+    mov blink,  sp
+    mov r3, sp      /* as exception handler's para(p_excinfo) */
 
-	ld	r0, [exc_nest_count]
-	add	r1, r0, 1
-	st	r1, [exc_nest_count]
-	brne	r0, 0, exc_handler_1
+    ld  r0, [exc_nest_count]
+    add r1, r0, 1
+    st  r1, [exc_nest_count]
+    brne    r0, 0, exc_handler_1
 /* change to exception stack if interrupt happened in task context */
-	mov	sp, _e_stack
+    mov sp, _e_stack
 exc_handler_1:
-	PUSH	blink
+    PUSH    blink
 
-	lr	r0, [AUX_ECR]
-	lsr	r0, r0, 16
-	mov	r1, exc_int_handler_table
-	ld.as	r2, [r1, r0]
+    lr  r0, [AUX_ECR]
+    lsr r0, r0, 16
+    mov r1, exc_int_handler_table
+    ld.as   r2, [r1, r0]
 
-	mov	r0, r3
-	jl	[r2]
+    mov r0, r3
+    jl  [r2]
 
 /* interrupts are not allowed */
 ret_exc:
-	POP	sp
-	mov	r1, exc_nest_count
-	ld	r0, [r1]
-	sub	r0, r0, 1
-	st	r0, [r1]
-	brne	r0, 0, ret_exc_1 /* nest exception case */
-	lr	r1, [AUX_IRQ_ACT] /* nest interrupt case */
-	brne	r1, 0, ret_exc_1
-
-	ld	r0, [context_switch_reqflg]
-	brne	r0, 0, ret_exc_2
-ret_exc_1:	/* return from non-task context, interrupts or exceptions are nested */
-	EXCEPTION_EPILOGUE
-	rtie
+    POP sp
+    mov r1, exc_nest_count
+    ld  r0, [r1]
+    sub r0, r0, 1
+    st  r0, [r1]
+    brne    r0, 0, ret_exc_1 /* nest exception case */
+    lr  r1, [AUX_IRQ_ACT] /* nest interrupt case */
+    brne    r1, 0, ret_exc_1
+
+    ld  r0, [context_switch_reqflg]
+    brne    r0, 0, ret_exc_2
+ret_exc_1:  /* return from non-task context, interrupts or exceptions are nested */
+    EXCEPTION_EPILOGUE
+    rtie
 
 /* there is a dispatch request */
 ret_exc_2:
-	/* clear dispatch request */
-	mov	r0, 0
-	st	r0, [context_switch_reqflg]
+    /* clear dispatch request */
+    mov r0, 0
+    st  r0, [context_switch_reqflg]
 
-	SAVE_CALLEE_REGS	/* save callee save registers */
+    SAVE_CALLEE_REGS    /* save callee save registers */
 
-	/* clear exception bit to do exception exit by SW */
-	lr	r0, [AUX_STATUS32]
-	bclr	r0, r0, AUX_STATUS_BIT_AE
-	kflag	r0
+    /* clear exception bit to do exception exit by SW */
+    lr  r0, [AUX_STATUS32]
+    bclr    r0, r0, AUX_STATUS_BIT_AE
+    kflag   r0
 
-	mov	r1, ret_exc_r	/* save return address */
-	PUSH	r1
+    mov r1, ret_exc_r   /* save return address */
+    PUSH    r1
 
-	ld 	r0, [rt_interrupt_from_thread]
-	ld 	r1, [rt_interrupt_to_thread]
-	b	dispatcher
+    ld  r0, [rt_interrupt_from_thread]
+    ld  r1, [rt_interrupt_to_thread]
+    b   dispatcher
 
 ret_exc_r:
-	/* recover exception status */
-	lr	r0, [AUX_STATUS32]
-	bset	r0, r0, AUX_STATUS_BIT_AE
-	kflag	r0
+    /* recover exception status */
+    lr  r0, [AUX_STATUS32]
+    bset    r0, r0, AUX_STATUS_BIT_AE
+    kflag   r0
 
-	RESTORE_CALLEE_REGS
-	EXCEPTION_EPILOGUE
-	rtie
+    RESTORE_CALLEE_REGS
+    EXCEPTION_EPILOGUE
+    rtie
 
 /****** entry for normal interrupt exception handling ******/
-	.global exc_entry_int	/* entry for interrupt handling */
-	.align 4
+    .global exc_entry_int   /* entry for interrupt handling */
+    .align 4
 exc_entry_int:
 #if ARC_FEATURE_FIRQ == 1
 /*  check whether it is P0 interrupt */
 #if ARC_FEATURE_RGF_NUM_BANKS > 1
-	lr	r0, [AUX_IRQ_ACT]
-	btst	r0, 0
-	jnz	exc_entry_firq
+    lr  r0, [AUX_IRQ_ACT]
+    btst    r0, 0
+    jnz exc_entry_firq
 #else
-	PUSH	r10
-	lr	r10, [AUX_IRQ_ACT]
-	btst	r10, 0
-	POP	r10
-	jnz	exc_entry_firq
+    PUSH    r10
+    lr  r10, [AUX_IRQ_ACT]
+    btst    r10, 0
+    POP r10
+    jnz exc_entry_firq
 #endif
 #endif
-	INTERRUPT_PROLOGUE
+    INTERRUPT_PROLOGUE
 
-	mov	blink, sp
+    mov blink, sp
 
-	clri	/* disable interrupt */
-	ld	r3, [exc_nest_count]
-	add	r2, r3, 1
-	st	r2, [exc_nest_count]
-	seti	/* enable higher priority interrupt */
+    clri    /* disable interrupt */
+    ld  r3, [exc_nest_count]
+    add r2, r3, 1
+    st  r2, [exc_nest_count]
+    seti    /* enable higher priority interrupt */
 
-	brne	r3, 0, irq_handler_1
+    brne    r3, 0, irq_handler_1
 /* change to exception stack if interrupt happened in task context */
-	mov	sp, _e_stack
+    mov sp, _e_stack
 #if ARC_FEATURE_STACK_CHECK
 #if ARC_FEATURE_SEC_PRESENT
-	lr r0, [AUX_SEC_STAT]
-	bclr r0, r0, AUX_SEC_STAT_BIT_SSC
-	sflag r0
+    lr r0, [AUX_SEC_STAT]
+    bclr r0, r0, AUX_SEC_STAT_BIT_SSC
+    sflag r0
 #else
-	lr r0, [AUX_STATUS32]
-	bclr r0, r0, AUX_STATUS_BIT_SC
-	kflag r0
+    lr r0, [AUX_STATUS32]
+    bclr r0, r0, AUX_STATUS_BIT_SC
+    kflag r0
 #endif
 #endif
 irq_handler_1:
-	PUSH	blink
+    PUSH    blink
 
-	jl 	rt_interrupt_enter
+    jl  rt_interrupt_enter
 
-	lr	r0, [AUX_IRQ_CAUSE]
-	sr	r0, [AUX_IRQ_SELECT]
-	mov	r1, exc_int_handler_table
-	ld.as	r2, [r1, r0]	/* r2 = exc_int_handler_table + irqno *4 */
+    lr  r0, [AUX_IRQ_CAUSE]
+    sr  r0, [AUX_IRQ_SELECT]
+    mov r1, exc_int_handler_table
+    ld.as   r2, [r1, r0]    /* r2 = exc_int_handler_table + irqno *4 */
 /* handle software triggered interrupt */
-	lr	r3, [AUX_IRQ_HINT]
-	cmp	r3, r0
-	bne.d irq_hint_handled
-	xor	r3, r3, r3
-	sr	r3, [AUX_IRQ_HINT]
+    lr  r3, [AUX_IRQ_HINT]
+    cmp r3, r0
+    bne.d irq_hint_handled
+    xor r3, r3, r3
+    sr  r3, [AUX_IRQ_HINT]
 irq_hint_handled:
-	lr	r3, [AUX_IRQ_PRIORITY]
-	PUSH	r3		/* save irq priority */
+    lr  r3, [AUX_IRQ_PRIORITY]
+    PUSH    r3      /* save irq priority */
 
-	jl	[r2]		/* jump to interrupt handler */
-	jl	rt_interrupt_leave
+    jl  [r2]        /* jump to interrupt handler */
+    jl  rt_interrupt_leave
 ret_int:
-	clri			/* disable interrupt */
-	POP	r3		/* irq priority */
-	POP	sp
-	mov	r1, exc_nest_count
-	ld	r0, [r1]
-	sub	r0, r0, 1
-	st	r0, [r1]
+    clri            /* disable interrupt */
+    POP r3      /* irq priority */
+    POP sp
+    mov r1, exc_nest_count
+    ld  r0, [r1]
+    sub r0, r0, 1
+    st  r0, [r1]
 /* if there are multi-bits set in IRQ_ACT, it's still in nest interrupt */
-	lr	r0, [AUX_IRQ_CAUSE]
-	sr	r0, [AUX_IRQ_SELECT]
-	lr 	r3, [AUX_IRQ_PRIORITY]
-	lr	r1, [AUX_IRQ_ACT]
-	bclr	r2, r1, r3
-	brne	r2, 0, ret_int_1
-
-	ld	r0, [context_switch_reqflg]
-	brne	r0, 0, ret_int_2
-ret_int_1:	/* return from non-task context */
-	INTERRUPT_EPILOGUE
-	rtie
+    lr  r0, [AUX_IRQ_CAUSE]
+    sr  r0, [AUX_IRQ_SELECT]
+    lr  r3, [AUX_IRQ_PRIORITY]
+    lr  r1, [AUX_IRQ_ACT]
+    bclr    r2, r1, r3
+    brne    r2, 0, ret_int_1
+
+    ld  r0, [context_switch_reqflg]
+    brne    r0, 0, ret_int_2
+ret_int_1:  /* return from non-task context */
+    INTERRUPT_EPILOGUE
+    rtie
 /* there is a dispatch request */
 ret_int_2:
-	/* clear dispatch request */
-	mov	r0, 0
-	st	r0, [context_switch_reqflg]
+    /* clear dispatch request */
+    mov r0, 0
+    st  r0, [context_switch_reqflg]
 
-	/* interrupt return by SW */
-	lr	r10, [AUX_IRQ_ACT]
-	PUSH	r10
-	bclr	r10, r10, r3	/* clear related bits in IRQ_ACT */
-	sr	r10, [AUX_IRQ_ACT]
+    /* interrupt return by SW */
+    lr  r10, [AUX_IRQ_ACT]
+    PUSH    r10
+    bclr    r10, r10, r3    /* clear related bits in IRQ_ACT */
+    sr  r10, [AUX_IRQ_ACT]
 
-	SAVE_CALLEE_REGS	/* save callee save registers */
-	mov	r1, ret_int_r	/* save return address */
-	PUSH	r1
+    SAVE_CALLEE_REGS    /* save callee save registers */
+    mov r1, ret_int_r   /* save return address */
+    PUSH    r1
 
-	ld 	r0, [rt_interrupt_from_thread]
-	ld 	r1, [rt_interrupt_to_thread]
-	b	dispatcher
+    ld  r0, [rt_interrupt_from_thread]
+    ld  r1, [rt_interrupt_to_thread]
+    b   dispatcher
 
 ret_int_r:
-	RESTORE_CALLEE_REGS
-	/* recover AUX_IRQ_ACT to restore the interrup status */
-	POPAX	AUX_IRQ_ACT
-	INTERRUPT_EPILOGUE
-	rtie
+    RESTORE_CALLEE_REGS
+    /* recover AUX_IRQ_ACT to restore the interrup status */
+    POPAX   AUX_IRQ_ACT
+    INTERRUPT_EPILOGUE
+    rtie
 
 /****** entry for fast irq exception handling ******/
-	.global exc_entry_firq
-	.weak exc_entry_firq
-	.align 4
+    .global exc_entry_firq
+    .weak exc_entry_firq
+    .align 4
 exc_entry_firq:
-	SAVE_FIQ_EXC_REGS
+    SAVE_FIQ_EXC_REGS
 
-	lr	r0, [AUX_IRQ_CAUSE]
-	mov	r1, exc_int_handler_table
+    lr  r0, [AUX_IRQ_CAUSE]
+    mov r1, exc_int_handler_table
 /* r2 = _kernel_exc_tbl + irqno *4 */
-	ld.as	r2, [r1, r0]
+    ld.as   r2, [r1, r0]
 
 /* for the case of software triggered interrupt */
-	lr	r3, [AUX_IRQ_HINT]
-	cmp	r3, r0
-	bne.d	firq_hint_handled
-	xor	r3, r3, r3
-	sr	r3, [AUX_IRQ_HINT]
+    lr  r3, [AUX_IRQ_HINT]
+    cmp r3, r0
+    bne.d   firq_hint_handled
+    xor r3, r3, r3
+    sr  r3, [AUX_IRQ_HINT]
 firq_hint_handled:
 /* jump to interrupt handler */
-	mov	r0, sp
-	jl	[r2]
+    mov r0, sp
+    jl  [r2]
 
 firq_return:
-	RESTORE_FIQ_EXC_REGS
-	rtie
+    RESTORE_FIQ_EXC_REGS
+    rtie

+ 38 - 38
libcpu/arm/AT91SAM7S/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -8,25 +8,25 @@
  * 2006-03-13     Bernard      first version
  */
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable()/*
  */
 .globl rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
-	mrs r0, cpsr
-	orr r1, r0, #NOINT
-	msr cpsr_c, r1
-	mov pc, lr
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    mov pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level)/*
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-	msr cpsr, r0
-	mov pc, lr
+    msr cpsr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)/*
@@ -35,23 +35,23 @@ rt_hw_interrupt_enable:
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	stmfd	sp!, {lr}			/* push pc (lr should be pushed in place of PC) */
-	stmfd	sp!, {r0-r12, lr}	/* push lr & register file */
+    stmfd   sp!, {lr}           /* push pc (lr should be pushed in place of PC) */
+    stmfd   sp!, {r0-r12, lr}   /* push lr & register file */
 
-	mrs		r4, cpsr
-	stmfd	sp!, {r4}			/* push cpsr */
-	mrs		r4, spsr
-	stmfd	sp!, {r4}			/* push spsr */
+    mrs     r4, cpsr
+    stmfd   sp!, {r4}           /* push cpsr */
+    mrs     r4, spsr
+    stmfd   sp!, {r4}           /* push spsr */
 
-	str		sp, [r0]			/* store sp in preempted tasks TCB */
-	ldr		sp, [r1]			/* get new task stack pointer */
+    str     sp, [r0]            /* store sp in preempted tasks TCB */
+    ldr     sp, [r1]            /* get new task stack pointer */
 
-	ldmfd	sp!, {r4}			/* pop new task spsr */
-	msr		spsr_cxsf, r4
-	ldmfd	sp!, {r4}			/* pop new task cpsr */
-	msr		cpsr_cxsf, r4
+    ldmfd   sp!, {r4}           /* pop new task spsr */
+    msr     spsr_cxsf, r4
+    ldmfd   sp!, {r4}           /* pop new task cpsr */
+    msr     cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	/* pop new task r0-r12, lr & pc */
+    ldmfd   sp!, {r0-r12, lr, pc}   /* pop new task r0-r12, lr & pc */
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to)/*
@@ -59,14 +59,14 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldr		sp, [r0]			/* get new task stack pointer */
+    ldr     sp, [r0]            /* get new task stack pointer */
 
-	ldmfd	sp!, {r4}			/* pop new task spsr */
-	msr		spsr_cxsf, r4
-	ldmfd	sp!, {r4}			/* pop new task cpsr */
-	msr		cpsr_cxsf, r4
+    ldmfd   sp!, {r4}           /* pop new task spsr */
+    msr     spsr_cxsf, r4
+    ldmfd   sp!, {r4}           /* pop new task cpsr */
+    msr     cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	/* pop new task r0-r12, lr & pc */
+    ldmfd   sp!, {r0-r12, lr, pc}   /* pop new task r0-r12, lr & pc */
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
@@ -76,15 +76,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldr		r2, =rt_thread_switch_interrupt_flag
-	ldr		r3, [r2]
-	cmp		r3, #1
-	beq		_reswitch
-	mov		r3, #1							/* set rt_thread_switch_interrupt_flag to 1 */
-	str		r3, [r2]
-	ldr		r2, =rt_interrupt_from_thread	/* set rt_interrupt_from_thread */
-	str		r0, [r2]
+    ldr     r2, =rt_thread_switch_interrupt_flag
+    ldr     r3, [r2]
+    cmp     r3, #1
+    beq     _reswitch
+    mov     r3, #1                          /* set rt_thread_switch_interrupt_flag to 1 */
+    str     r3, [r2]
+    ldr     r2, =rt_interrupt_from_thread   /* set rt_interrupt_from_thread */
+    str     r0, [r2]
 _reswitch:
-	ldr		r2, =rt_interrupt_to_thread		/* set rt_interrupt_to_thread */
-	str		r1, [r2]
-	mov		pc, lr
+    ldr     r2, =rt_interrupt_to_thread     /* set rt_interrupt_to_thread */
+    str     r1, [r2]
+    mov     pc, lr

+ 33 - 33
libcpu/arm/AT91SAM7S/context_rvds.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -8,7 +8,7 @@
  * 2009-01-20     Bernard      first version
  */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
     AREA |.text|, CODE, READONLY, ALIGN=2
     ARM
@@ -18,21 +18,21 @@ NOINT	EQU		0xc0	; disable interrupt in psr
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
+rt_hw_interrupt_disable PROC
     EXPORT rt_hw_interrupt_disable
     MRS r0, cpsr
     ORR r1, r0, #NOINT
     MSR cpsr_c, r1
-    BX	lr
+    BX  lr
     ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
+rt_hw_interrupt_enable  PROC
     EXPORT rt_hw_interrupt_enable
     MSR cpsr_c, r0
-    BX	lr
+    BX  lr
     ENDP
 
 ;/*
@@ -40,41 +40,41 @@ rt_hw_interrupt_enable	PROC
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
+rt_hw_context_switch    PROC
     EXPORT rt_hw_context_switch
-    STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-    STMFD	sp!, {r0-r12, lr}	; push lr & register file
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-    MRS		r4, cpsr
-    STMFD	sp!, {r4}			; push cpsr
-    MRS		r4, spsr
-    STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-    STR	sp, [r0]				; store sp in preempted tasks TCB
-    LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-    LDMFD	sp!, {r4}			; pop new task spsr
-    MSR	spsr_cxsf, r4
-    LDMFD	sp!, {r4}			; pop new task cpsr
-    MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-    LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
     ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
+rt_hw_context_switch_to PROC
     EXPORT rt_hw_context_switch_to
-    LDR	sp, [r0]				; get new task stack pointer
+    LDR sp, [r0]                ; get new task stack pointer
 
-    LDMFD	sp!, {r4}			; pop new task spsr
-    MSR	spsr_cxsf, r4
-    LDMFD	sp!, {r4}			; pop new task cpsr
-    MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-    LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
     ENDP
 
 ;/*
@@ -84,20 +84,20 @@ rt_hw_context_switch_to	PROC
     IMPORT rt_interrupt_from_thread
     IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
+rt_hw_context_switch_interrupt  PROC
     EXPORT rt_hw_context_switch_interrupt
     LDR r2, =rt_thread_switch_interrupt_flag
     LDR r3, [r2]
     CMP r3, #1
     BEQ _reswitch
-    MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
     STR r3, [r2]
-    LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
     STR r0, [r2]
 _reswitch
-    LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
     STR r1, [r2]
-    BX	lr
+    BX  lr
     ENDP
 
-    END
+    END

+ 181 - 181
libcpu/arm/AT91SAM7S/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -8,151 +8,151 @@
  * 2006-08-31     Bernard      first version
  */
 
-	/* Internal Memory Base Addresses */
-	.equ    FLASH_BASE,     0x00100000   
-	.equ    RAM_BASE,       0x00200000
-
-	/* Stack Configuration */
-	.equ    TOP_STACK,      0x00204000
-	.equ    UND_STACK_SIZE, 0x00000100
-	.equ    SVC_STACK_SIZE, 0x00000400
-	.equ    ABT_STACK_SIZE, 0x00000100
-	.equ    FIQ_STACK_SIZE, 0x00000100
-	.equ    IRQ_STACK_SIZE, 0x00000100
-	.equ    USR_STACK_SIZE, 0x00000004
-
-	/* ARM architecture definitions */
-	.equ	MODE_USR, 0x10
-	.equ	MODE_FIQ, 0x11
-	.equ	MODE_IRQ, 0x12
-	.equ	MODE_SVC, 0x13
-	.equ	MODE_ABT, 0x17
-	.equ	MODE_UND, 0x1B
-	.equ	MODE_SYS, 0x1F
-
-	.equ    I_BIT, 0x80    /* when this bit is set, IRQ is disabled */
-	.equ    F_BIT, 0x40    /* when this bit is set, FIQ is disabled */
+    /* Internal Memory Base Addresses */
+    .equ    FLASH_BASE,     0x00100000
+    .equ    RAM_BASE,       0x00200000
+
+    /* Stack Configuration */
+    .equ    TOP_STACK,      0x00204000
+    .equ    UND_STACK_SIZE, 0x00000100
+    .equ    SVC_STACK_SIZE, 0x00000400
+    .equ    ABT_STACK_SIZE, 0x00000100
+    .equ    FIQ_STACK_SIZE, 0x00000100
+    .equ    IRQ_STACK_SIZE, 0x00000100
+    .equ    USR_STACK_SIZE, 0x00000004
+
+    /* ARM architecture definitions */
+    .equ    MODE_USR, 0x10
+    .equ    MODE_FIQ, 0x11
+    .equ    MODE_IRQ, 0x12
+    .equ    MODE_SVC, 0x13
+    .equ    MODE_ABT, 0x17
+    .equ    MODE_UND, 0x1B
+    .equ    MODE_SYS, 0x1F
+
+    .equ    I_BIT, 0x80    /* when this bit is set, IRQ is disabled */
+    .equ    F_BIT, 0x40    /* when this bit is set, FIQ is disabled */
 
 .section .init, "ax"
 .code 32
 .align 0
 .globl _start
 _start:
-	b	reset
-	ldr	pc, _vector_undef
-	ldr	pc, _vector_swi
-	ldr	pc, _vector_pabt
-	ldr	pc, _vector_dabt
-	nop							/* reserved vector */
-	ldr	pc, _vector_irq
-	ldr	pc, _vector_fiq
-
-_vector_undef:	.word vector_undef
-_vector_swi:	.word vector_swi
-_vector_pabt:	.word vector_pabt
-_vector_dabt:	.word vector_dabt
-_vector_resv:	.word vector_resv
-_vector_irq:	.word vector_irq
-_vector_fiq:	.word vector_fiq
+    b   reset
+    ldr pc, _vector_undef
+    ldr pc, _vector_swi
+    ldr pc, _vector_pabt
+    ldr pc, _vector_dabt
+    nop                         /* reserved vector */
+    ldr pc, _vector_irq
+    ldr pc, _vector_fiq
+
+_vector_undef:  .word vector_undef
+_vector_swi:    .word vector_swi
+_vector_pabt:   .word vector_pabt
+_vector_dabt:   .word vector_dabt
+_vector_resv:   .word vector_resv
+_vector_irq:    .word vector_irq
+_vector_fiq:    .word vector_fiq
 
 /*
  * rtthread bss start and end
  * which are defined in linker script
  */
 .globl _bss_start
-_bss_start:	.word __bss_start
+_bss_start: .word __bss_start
 .globl _bss_end
-_bss_end:	.word __bss_end
+_bss_end:   .word __bss_end
 
 /* the system entry */
 reset:
-	/* disable watchdog */
-	ldr r0, =0xFFFFFD40
-	ldr r1, =0x00008000
-	str r1, [r0, #0x04]
-	
-	/* enable the main oscillator */
-	ldr r0, =0xFFFFFC00
-	ldr r1, =0x00000601
-	str r1, [r0, #0x20]
-	
-	/* wait for main oscillator to stabilize */
+    /* disable watchdog */
+    ldr r0, =0xFFFFFD40
+    ldr r1, =0x00008000
+    str r1, [r0, #0x04]
+
+    /* enable the main oscillator */
+    ldr r0, =0xFFFFFC00
+    ldr r1, =0x00000601
+    str r1, [r0, #0x20]
+
+    /* wait for main oscillator to stabilize */
 moscs_loop:
-	ldr r2, [r0, #0x68]
-	ands r2, r2, #1
-	beq moscs_loop
-	
-	/* set up the PLL */
-	ldr r1, =0x00191C05
-	str r1, [r0, #0x2C]
-	
-	/* wait for PLL to lock */
+    ldr r2, [r0, #0x68]
+    ands r2, r2, #1
+    beq moscs_loop
+
+    /* set up the PLL */
+    ldr r1, =0x00191C05
+    str r1, [r0, #0x2C]
+
+    /* wait for PLL to lock */
 pll_loop:
-	ldr r2, [r0, #0x68]
-	ands r2, r2, #0x04
-	beq pll_loop
-	
-	/* select clock */
-	ldr r1, =0x00000007
-	str r1, [r0, #0x30]
-	
-	/* setup stack for each mode */
-	ldr r0, =TOP_STACK
-	
-	/* set stack */
-	/* undefined instruction mode */
-	msr cpsr_c, #MODE_UND|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #UND_STACK_SIZE
-	
-	/* abort mode */
-	msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #ABT_STACK_SIZE
-	
-	/* FIQ mode */
-	msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #FIQ_STACK_SIZE
-	
-	/* IRQ mode */
-	msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #IRQ_STACK_SIZE
-	
-	/* supervisor mode */
-	msr cpsr_c, #MODE_SVC
-	mov sp, r0
-	
+    ldr r2, [r0, #0x68]
+    ands r2, r2, #0x04
+    beq pll_loop
+
+    /* select clock */
+    ldr r1, =0x00000007
+    str r1, [r0, #0x30]
+
+    /* setup stack for each mode */
+    ldr r0, =TOP_STACK
+
+    /* set stack */
+    /* undefined instruction mode */
+    msr cpsr_c, #MODE_UND|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #UND_STACK_SIZE
+
+    /* abort mode */
+    msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #ABT_STACK_SIZE
+
+    /* FIQ mode */
+    msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #FIQ_STACK_SIZE
+
+    /* IRQ mode */
+    msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #IRQ_STACK_SIZE
+
+    /* supervisor mode */
+    msr cpsr_c, #MODE_SVC
+    mov sp, r0
+
 #ifdef __FLASH_BUILD__
-	/* Relocate .data section (Copy from ROM to RAM) */
-	ldr     r1, =_etext
-	ldr     r2, =_data
-	ldr     r3, =_edata
+    /* Relocate .data section (Copy from ROM to RAM) */
+    ldr     r1, =_etext
+    ldr     r2, =_data
+    ldr     r3, =_edata
 data_loop:
-	cmp     r2, r3
-	ldrlo   r0, [r1], #4
-	strlo   r0, [r2], #4
-	blo     data_loop
+    cmp     r2, r3
+    ldrlo   r0, [r1], #4
+    strlo   r0, [r2], #4
+    blo     data_loop
 #else
-	/* remap SRAM to 0x0000 */
-	ldr r0, =0xFFFFFF00
-	mov r1, #0x01
-	str r1, [r0]
+    /* remap SRAM to 0x0000 */
+    ldr r0, =0xFFFFFF00
+    mov r1, #0x01
+    str r1, [r0]
 #endif
-	
-	/* mask all IRQs */
-	ldr	r1, =0xFFFFF124
-	ldr	r0, =0XFFFFFFFF
-	str	r0, [r1]
-	
-	/* start RT-Thread Kernel */
-	ldr	pc, _rtthread_startup
-	
+
+    /* mask all IRQs */
+    ldr r1, =0xFFFFF124
+    ldr r0, =0XFFFFFFFF
+    str r0, [r1]
+
+    /* start RT-Thread Kernel */
+    ldr pc, _rtthread_startup
+
 _rtthread_startup: .word rtthread_startup
 
 /* exception handlers */
-vector_undef: b	vector_undef
+vector_undef: b vector_undef
 vector_swi  : b vector_swi
 vector_pabt : b vector_pabt
 vector_dabt : b vector_dabt
@@ -164,70 +164,70 @@ vector_resv : b vector_resv
 .globl rt_interrupt_from_thread
 .globl rt_interrupt_to_thread
 vector_irq:
-	stmfd	sp!, {r0-r12,lr}
-	bl	rt_interrupt_enter
-	bl	rt_hw_trap_irq
-	bl	rt_interrupt_leave
-
-	/* 
-	 * if rt_thread_switch_interrupt_flag set, jump to
-	 * rt_hw_context_switch_interrupt_do and don't return
-	 */
-	ldr	r0, =rt_thread_switch_interrupt_flag
-	ldr	r1, [r0]
-	cmp	r1, #1
-	beq	rt_hw_context_switch_interrupt_do
-
-	ldmfd	sp!, {r0-r12,lr}
-	subs	pc, lr, #4
+    stmfd   sp!, {r0-r12,lr}
+    bl  rt_interrupt_enter
+    bl  rt_hw_trap_irq
+    bl  rt_interrupt_leave
+
+    /*
+     * if rt_thread_switch_interrupt_flag set, jump to
+     * rt_hw_context_switch_interrupt_do and don't return
+     */
+    ldr r0, =rt_thread_switch_interrupt_flag
+    ldr r1, [r0]
+    cmp r1, #1
+    beq rt_hw_context_switch_interrupt_do
+
+    ldmfd   sp!, {r0-r12,lr}
+    subs    pc, lr, #4
 
 vector_fiq:
-	stmfd	sp!,{r0-r7,lr}
-	bl 	rt_hw_trap_fiq
-	ldmfd	sp!,{r0-r7,lr}
-	subs	pc,lr,#4
+    stmfd   sp!,{r0-r7,lr}
+    bl  rt_hw_trap_fiq
+    ldmfd   sp!,{r0-r7,lr}
+    subs    pc,lr,#4
 
 /*
  * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
  */
 rt_hw_context_switch_interrupt_do:
-	mov		r1,  #0				/* clear flag */
-	str		r1,  [r0]
-
-	ldmfd	sp!, {r0-r12,lr}	/* reload saved registers */
-	stmfd	sp!, {r0-r3}		/* save r0-r3 */
-	mov		r1,  sp
-	add		sp,  sp, #16		/* restore sp */
-	sub		r2,  lr, #4			/* save old task's pc to r2 */
-
-	mrs		r3,  spsr			/* disable interrupt */
-	orr		r0,  r3, #I_BIT|F_BIT
-	msr		spsr_c, r0
-
-	ldr		r0,  =.+8			/* switch to interrupted task's stack */
-	movs	pc,  r0
-
-	stmfd	sp!, {r2}			/* push old task's pc */
-	stmfd	sp!, {r4-r12,lr}	/* push old task's lr,r12-r4 */
-	mov		r4,  r1				/* Special optimised code below */
-	mov		r5,  r3
-	ldmfd	r4!, {r0-r3}
-	stmfd	sp!, {r0-r3}		/* push old task's r3-r0 */
-	stmfd	sp!, {r5}			/* push old task's psr */
-	mrs		r4,  spsr
-	stmfd	sp!, {r4}			/* push old task's spsr */
-
-	ldr		r4,  =rt_interrupt_from_thread
-	ldr		r5,  [r4]
-	str		sp,  [r5]			/* store sp in preempted tasks's TCB */
-
-	ldr		r6,  =rt_interrupt_to_thread
-	ldr		r6,  [r6]
-	ldr		sp,  [r6]			/* get new task's stack pointer */
-
-	ldmfd	sp!, {r4}			/* pop new task's spsr */
-	msr		SPSR_cxsf, r4
-	ldmfd	sp!, {r4}			/* pop new task's psr */
-	msr		CPSR_cxsf, r4
-
-	ldmfd	sp!, {r0-r12,lr,pc}	/* pop new task's r0-r12,lr & pc */
+    mov     r1,  #0             /* clear flag */
+    str     r1,  [r0]
+
+    ldmfd   sp!, {r0-r12,lr}    /* reload saved registers */
+    stmfd   sp!, {r0-r3}        /* save r0-r3 */
+    mov     r1,  sp
+    add     sp,  sp, #16        /* restore sp */
+    sub     r2,  lr, #4         /* save old task's pc to r2 */
+
+    mrs     r3,  spsr           /* disable interrupt */
+    orr     r0,  r3, #I_BIT|F_BIT
+    msr     spsr_c, r0
+
+    ldr     r0,  =.+8           /* switch to interrupted task's stack */
+    movs    pc,  r0
+
+    stmfd   sp!, {r2}           /* push old task's pc */
+    stmfd   sp!, {r4-r12,lr}    /* push old task's lr,r12-r4 */
+    mov     r4,  r1             /* Special optimised code below */
+    mov     r5,  r3
+    ldmfd   r4!, {r0-r3}
+    stmfd   sp!, {r0-r3}        /* push old task's r3-r0 */
+    stmfd   sp!, {r5}           /* push old task's psr */
+    mrs     r4,  spsr
+    stmfd   sp!, {r4}           /* push old task's spsr */
+
+    ldr     r4,  =rt_interrupt_from_thread
+    ldr     r5,  [r4]
+    str     sp,  [r5]           /* store sp in preempted tasks's TCB */
+
+    ldr     r6,  =rt_interrupt_to_thread
+    ldr     r6,  [r6]
+    ldr     sp,  [r6]           /* get new task's stack pointer */
+
+    ldmfd   sp!, {r4}           /* pop new task's spsr */
+    msr     SPSR_cxsf, r4
+    ldmfd   sp!, {r4}           /* pop new task's psr */
+    msr     CPSR_cxsf, r4
+
+    ldmfd   sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */

+ 38 - 38
libcpu/arm/AT91SAM7X/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -13,25 +13,25 @@
  */
 /*@{*/
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
 .globl rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
-	mrs r0, cpsr
-	orr r1, r0, #NOINT
-	msr cpsr_c, r1
-	mov pc, lr
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    mov pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-	msr cpsr, r0
-	mov pc, lr
+    msr cpsr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -40,23 +40,23 @@ rt_hw_interrupt_enable:
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	stmfd	sp!, {lr}		@ push pc (lr should be pushed in place of PC)
-	stmfd	sp!, {r0-r12, lr}	@ push lr & register file
+    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
+    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
 
-	mrs	r4, cpsr
-	stmfd	sp!, {r4}		@ push cpsr
-	mrs	r4, spsr
-	stmfd	sp!, {r4}		@ push spsr
+    mrs r4, cpsr
+    stmfd   sp!, {r4}       @ push cpsr
+    mrs r4, spsr
+    stmfd   sp!, {r4}       @ push spsr
 
-	str	sp, [r0]			@ store sp in preempted tasks TCB
-	ldr	sp, [r1]			@ get new task stack pointer
+    str sp, [r0]            @ store sp in preempted tasks TCB
+    ldr sp, [r1]            @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
@@ -64,14 +64,14 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldr	sp, [r0]		@ get new task stack pointer
+    ldr sp, [r0]        @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
@@ -81,15 +81,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldr r2, =rt_thread_switch_interrupt_flag
-	ldr r3, [r2]
-	cmp r3, #1
-	beq _reswitch
-	mov r3, #1				@ set rt_thread_switch_interrupt_flag to 1
-	str r3, [r2]
-	ldr r2, =rt_interrupt_from_thread	@ set rt_interrupt_from_thread
-	str r0, [r2]
+    ldr r2, =rt_thread_switch_interrupt_flag
+    ldr r3, [r2]
+    cmp r3, #1
+    beq _reswitch
+    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
+    str r3, [r2]
+    ldr r2, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
+    str r0, [r2]
 _reswitch:
-	ldr r2, =rt_interrupt_to_thread		@ set rt_interrupt_to_thread
-	str r1, [r2]
-	mov pc, lr
+    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
+    str r1, [r2]
+    mov pc, lr

+ 61 - 61
libcpu/arm/AT91SAM7X/context_rvds.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -8,96 +8,96 @@
  * 2009-01-20     Bernard      first version
  */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	STMFD	sp!, {r4}			; push cpsr
-	MRS		r4, spsr
-	STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 183 - 183
libcpu/arm/AT91SAM7X/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -8,145 +8,145 @@
  * 2006-08-31     Bernard      first version
  */
 
-	/* Internal Memory Base Addresses */
-	.equ    FLASH_BASE,     0x00100000   
-	.equ    RAM_BASE,       0x00200000
-
-	/* Stack Configuration */
-	.equ    TOP_STACK,      0x00204000
-	.equ    UND_STACK_SIZE, 0x00000100
-	.equ    SVC_STACK_SIZE, 0x00000400
-	.equ    ABT_STACK_SIZE, 0x00000100
-	.equ    FIQ_STACK_SIZE, 0x00000100
-	.equ    IRQ_STACK_SIZE, 0x00000100
-	.equ    USR_STACK_SIZE, 0x00000004
-
-	/* ARM architecture definitions */
-	.equ	MODE_USR, 0x10
-	.equ	MODE_FIQ, 0x11
-	.equ	MODE_IRQ, 0x12
-	.equ	MODE_SVC, 0x13
-	.equ	MODE_ABT, 0x17
-	.equ	MODE_UND, 0x1B
-	.equ	MODE_SYS, 0x1F
-
-	.equ    I_BIT, 0x80    /* when this bit is set, IRQ is disabled */
-	.equ    F_BIT, 0x40    /* when this bit is set, FIQ is disabled */
+    /* Internal Memory Base Addresses */
+    .equ    FLASH_BASE,     0x00100000
+    .equ    RAM_BASE,       0x00200000
+
+    /* Stack Configuration */
+    .equ    TOP_STACK,      0x00204000
+    .equ    UND_STACK_SIZE, 0x00000100
+    .equ    SVC_STACK_SIZE, 0x00000400
+    .equ    ABT_STACK_SIZE, 0x00000100
+    .equ    FIQ_STACK_SIZE, 0x00000100
+    .equ    IRQ_STACK_SIZE, 0x00000100
+    .equ    USR_STACK_SIZE, 0x00000004
+
+    /* ARM architecture definitions */
+    .equ    MODE_USR, 0x10
+    .equ    MODE_FIQ, 0x11
+    .equ    MODE_IRQ, 0x12
+    .equ    MODE_SVC, 0x13
+    .equ    MODE_ABT, 0x17
+    .equ    MODE_UND, 0x1B
+    .equ    MODE_SYS, 0x1F
+
+    .equ    I_BIT, 0x80    /* when this bit is set, IRQ is disabled */
+    .equ    F_BIT, 0x40    /* when this bit is set, FIQ is disabled */
 
 .section .init, "ax"
 .code 32
 .align 0
 .globl _start
 _start:
-	b	reset
-	ldr	pc, _vector_undef
-	ldr	pc, _vector_swi
-	ldr	pc, _vector_pabt
-	ldr	pc, _vector_dabt
-	nop							/* reserved vector */
-	ldr	pc, _vector_irq
-	ldr	pc, _vector_fiq
-
-_vector_undef:	.word vector_undef
-_vector_swi:	.word vector_swi
-_vector_pabt:	.word vector_pabt
-_vector_dabt:	.word vector_dabt
-_vector_resv:	.word vector_resv
-_vector_irq:	.word vector_irq
-_vector_fiq:	.word vector_fiq
+    b   reset
+    ldr pc, _vector_undef
+    ldr pc, _vector_swi
+    ldr pc, _vector_pabt
+    ldr pc, _vector_dabt
+    nop                         /* reserved vector */
+    ldr pc, _vector_irq
+    ldr pc, _vector_fiq
+
+_vector_undef:  .word vector_undef
+_vector_swi:    .word vector_swi
+_vector_pabt:   .word vector_pabt
+_vector_dabt:   .word vector_dabt
+_vector_resv:   .word vector_resv
+_vector_irq:    .word vector_irq
+_vector_fiq:    .word vector_fiq
 
 /*
  * rtthread bss start and end
  * which are defined in linker script
  */
 .globl _bss_start
-_bss_start:	.word __bss_start
+_bss_start: .word __bss_start
 .globl _bss_end
-_bss_end:	.word __bss_end
+_bss_end:   .word __bss_end
 
 /* the system entry */
 reset:
-	/* disable watchdog */
-	ldr r0, =0xFFFFFD40
-	ldr r1, =0x00008000
-	str r1, [r0, #0x04]
-	
-	/* enable the main oscillator */
-	ldr r0, =0xFFFFFC00
-	ldr r1, =0x00000601
-	str r1, [r0, #0x20]
-	
-	/* wait for main oscillator to stabilize */
+    /* disable watchdog */
+    ldr r0, =0xFFFFFD40
+    ldr r1, =0x00008000
+    str r1, [r0, #0x04]
+
+    /* enable the main oscillator */
+    ldr r0, =0xFFFFFC00
+    ldr r1, =0x00000601
+    str r1, [r0, #0x20]
+
+    /* wait for main oscillator to stabilize */
 moscs_loop:
-	ldr r2, [r0, #0x68]
-	ands r2, r2, #1
-	beq moscs_loop
-	
-	/* set up the PLL */
-	ldr r1, =0x00191C05
-	str r1, [r0, #0x2C]
-	
-	/* wait for PLL to lock */
+    ldr r2, [r0, #0x68]
+    ands r2, r2, #1
+    beq moscs_loop
+
+    /* set up the PLL */
+    ldr r1, =0x00191C05
+    str r1, [r0, #0x2C]
+
+    /* wait for PLL to lock */
 pll_loop:
-	ldr r2, [r0, #0x68]
-	ands r2, r2, #0x04
-	beq pll_loop
-	
-	/* select clock */
-	ldr r1, =0x00000007
-	str r1, [r0, #0x30]
-	
+    ldr r2, [r0, #0x68]
+    ands r2, r2, #0x04
+    beq pll_loop
+
+    /* select clock */
+    ldr r1, =0x00000007
+    str r1, [r0, #0x30]
+
 #ifdef __FLASH_BUILD__
-	/* copy exception vectors into internal sram */
+    /* copy exception vectors into internal sram */
     /*
-	mov r8, #RAM_BASE
-	ldr r9, =_start
-	ldmia r9!, {r0-r7}
-	stmia r8!, {r0-r7}
-	ldmia r9!, {r0-r6}
-	stmia r8!, {r0-r6}
+    mov r8, #RAM_BASE
+    ldr r9, =_start
+    ldmia r9!, {r0-r7}
+    stmia r8!, {r0-r7}
+    ldmia r9!, {r0-r6}
+    stmia r8!, {r0-r6}
     */
 #endif
-	
-	/* setup stack for each mode */
-	ldr r0, =TOP_STACK
-	
-	/* set stack */
-	/* undefined instruction mode */
-	msr cpsr_c, #MODE_UND|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #UND_STACK_SIZE
-	
-	/* abort mode */
-	msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #ABT_STACK_SIZE
-	
-	/* FIQ mode */
-	msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #FIQ_STACK_SIZE
-	
-	/* IRQ mode */
-	msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
-	mov sp, r0
-	sub r0, r0, #IRQ_STACK_SIZE
-	
-	/* supervisor mode */
-	msr cpsr_c, #MODE_SVC|I_BIT|F_BIT
-	mov sp, r0
-	
-	/* remap SRAM to 0x0000 */
-	/* 
-	ldr r0, =0xFFFFFF00
-	mov r1, #0x01
-	str r1, [r0]
-	*/
-	
-	/* mask all IRQs */
-	ldr	r1, =0xFFFFF124
-	ldr	r0, =0XFFFFFFFF
-	str	r0, [r1]
+
+    /* setup stack for each mode */
+    ldr r0, =TOP_STACK
+
+    /* set stack */
+    /* undefined instruction mode */
+    msr cpsr_c, #MODE_UND|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #UND_STACK_SIZE
+
+    /* abort mode */
+    msr cpsr_c, #MODE_ABT|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #ABT_STACK_SIZE
+
+    /* FIQ mode */
+    msr cpsr_c, #MODE_FIQ|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #FIQ_STACK_SIZE
+
+    /* IRQ mode */
+    msr cpsr_c, #MODE_IRQ|I_BIT|F_BIT
+    mov sp, r0
+    sub r0, r0, #IRQ_STACK_SIZE
+
+    /* supervisor mode */
+    msr cpsr_c, #MODE_SVC|I_BIT|F_BIT
+    mov sp, r0
+
+    /* remap SRAM to 0x0000 */
+    /*
+    ldr r0, =0xFFFFFF00
+    mov r1, #0x01
+    str r1, [r0]
+    */
+
+    /* mask all IRQs */
+    ldr r1, =0xFFFFF124
+    ldr r0, =0XFFFFFFFF
+    str r0, [r1]
 
     /* copy .data to SRAM */
     ldr     r1, =_sidata            /* .data start in image */
@@ -187,14 +187,14 @@ ctor_loop:
     b       ctor_loop
 ctor_end:
 
-	
-	/* start RT-Thread Kernel */
-	ldr	pc, _rtthread_startup
-	
+
+    /* start RT-Thread Kernel */
+    ldr pc, _rtthread_startup
+
 _rtthread_startup: .word rtthread_startup
 
 /* exception handlers */
-vector_undef: b	vector_undef
+vector_undef: b vector_undef
 vector_swi  : b vector_swi
 vector_pabt : b vector_pabt
 vector_dabt : b vector_dabt
@@ -206,70 +206,70 @@ vector_resv : b vector_resv
 .globl rt_interrupt_from_thread
 .globl rt_interrupt_to_thread
 vector_irq:
-	stmfd	sp!, {r0-r12,lr}
-	bl	rt_interrupt_enter
-	bl	rt_hw_trap_irq
-	bl	rt_interrupt_leave
-
-	/* 
-	 * if rt_thread_switch_interrupt_flag set, jump to
-	 * rt_hw_context_switch_interrupt_do and don't return
-	 */
-	ldr	r0, =rt_thread_switch_interrupt_flag
-	ldr	r1, [r0]
-	cmp	r1, #1
-	beq	rt_hw_context_switch_interrupt_do
-
-	ldmfd	sp!, {r0-r12,lr}
-	subs	pc, lr, #4
+    stmfd   sp!, {r0-r12,lr}
+    bl  rt_interrupt_enter
+    bl  rt_hw_trap_irq
+    bl  rt_interrupt_leave
+
+    /*
+     * if rt_thread_switch_interrupt_flag set, jump to
+     * rt_hw_context_switch_interrupt_do and don't return
+     */
+    ldr r0, =rt_thread_switch_interrupt_flag
+    ldr r1, [r0]
+    cmp r1, #1
+    beq rt_hw_context_switch_interrupt_do
+
+    ldmfd   sp!, {r0-r12,lr}
+    subs    pc, lr, #4
 
 vector_fiq:
-	stmfd	sp!,{r0-r7,lr}
-	bl 	rt_hw_trap_fiq
-	ldmfd	sp!,{r0-r7,lr}
-	subs	pc,lr,#4
+    stmfd   sp!,{r0-r7,lr}
+    bl  rt_hw_trap_fiq
+    ldmfd   sp!,{r0-r7,lr}
+    subs    pc,lr,#4
 
 /*
  * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
  */
 rt_hw_context_switch_interrupt_do:
-	mov	r1,  #0				@ clear flag
-	str	r1,  [r0]
-
-	ldmfd	sp!, {r0-r12,lr}@ reload saved registers
-	stmfd	sp!, {r0-r3}	@ save r0-r3
-	mov	r1,  sp
-	add	sp,  sp, #16		@ restore sp
-	sub	r2,  lr, #4			@ save old task's pc to r2
-
-	mrs	r3,  spsr			@ disable interrupt
-	orr	r0,  r3, #I_BIT|F_BIT
-	msr	spsr_c, r0
-
-	ldr	r0,  =.+8			@ switch to interrupted task's stack
-	movs	pc,  r0
-
-	stmfd	sp!, {r2}		@ push old task's pc
-	stmfd	sp!, {r4-r12,lr}@ push old task's lr,r12-r4
-	mov	r4,  r1				@ Special optimised code below
-	mov	r5,  r3
-	ldmfd	r4!, {r0-r3}
-	stmfd	sp!, {r0-r3}	@ push old task's r3-r0
-	stmfd	sp!, {r5}		@ push old task's psr
-	mrs	r4,  spsr
-	stmfd	sp!, {r4}		@ push old task's spsr
-
-	ldr	r4,  =rt_interrupt_from_thread
-	ldr	r5,  [r4]
-	str	sp,  [r5]			@ store sp in preempted tasks's TCB
-
-	ldr	r6,  =rt_interrupt_to_thread
-	ldr	r6,  [r6]
-	ldr	sp,  [r6]			@ get new task's stack pointer
-
-	ldmfd	sp!, {r4}		@ pop new task's spsr
-	msr	SPSR_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task's psr
-	msr	CPSR_cxsf, r4
-
-	ldmfd	sp!, {r0-r12,lr,pc}	@ pop new task's r0-r12,lr & pc
+    mov r1,  #0             @ clear flag
+    str r1,  [r0]
+
+    ldmfd   sp!, {r0-r12,lr}@ reload saved registers
+    stmfd   sp!, {r0-r3}    @ save r0-r3
+    mov r1,  sp
+    add sp,  sp, #16        @ restore sp
+    sub r2,  lr, #4         @ save old task's pc to r2
+
+    mrs r3,  spsr           @ disable interrupt
+    orr r0,  r3, #I_BIT|F_BIT
+    msr spsr_c, r0
+
+    ldr r0,  =.+8           @ switch to interrupted task's stack
+    movs    pc,  r0
+
+    stmfd   sp!, {r2}       @ push old task's pc
+    stmfd   sp!, {r4-r12,lr}@ push old task's lr,r12-r4
+    mov r4,  r1             @ Special optimised code below
+    mov r5,  r3
+    ldmfd   r4!, {r0-r3}
+    stmfd   sp!, {r0-r3}    @ push old task's r3-r0
+    stmfd   sp!, {r5}       @ push old task's psr
+    mrs r4,  spsr
+    stmfd   sp!, {r4}       @ push old task's spsr
+
+    ldr r4,  =rt_interrupt_from_thread
+    ldr r5,  [r4]
+    str sp,  [r5]           @ store sp in preempted tasks's TCB
+
+    ldr r6,  =rt_interrupt_to_thread
+    ldr r6,  [r6]
+    ldr sp,  [r6]           @ get new task's stack pointer
+
+    ldmfd   sp!, {r4}       @ pop new task's spsr
+    msr SPSR_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task's psr
+    msr CPSR_cxsf, r4
+
+    ldmfd   sp!, {r0-r12,lr,pc} @ pop new task's r0-r12,lr & pc

+ 37 - 37
libcpu/arm/am335x/cp15_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -26,7 +26,7 @@ rt_cpu_get_sctlr:
 
 .globl rt_cpu_dcache_enable
 rt_cpu_dcache_enable:
-    mrc     p15, #0, r0, c1, c0, #0 
+    mrc     p15, #0, r0, c1, c0, #0
     orr     r0,  r0, #0x00000004
     mcr     p15, #0, r0, c1, c0, #0
     bx      lr
@@ -35,7 +35,7 @@ rt_cpu_dcache_enable:
 rt_cpu_icache_enable:
     mrc     p15, #0, r0, c1, c0, #0
     orr     r0,  r0, #0x00001000
-    mcr     p15, #0, r0, c1, c0, #0 
+    mcr     p15, #0, r0, c1, c0, #0
     bx      lr
 
 _FLD_MAX_WAY:
@@ -45,48 +45,48 @@ _FLD_MAX_IDX:
 
 .globl rt_cpu_dcache_clean_flush
 rt_cpu_dcache_clean_flush:
-    push    {r4-r11} 
-    dmb                   
+    push    {r4-r11}
+    dmb
     mrc     p15, #1, r0, c0, c0, #1  @ read clid register
     ands    r3, r0, #0x7000000       @ get level of coherency
-    mov     r3, r3, lsr #23  
-    beq     finished  
-    mov     r10, #0 
+    mov     r3, r3, lsr #23
+    beq     finished
+    mov     r10, #0
 loop1:
-    add     r2, r10, r10, lsr #1 
+    add     r2, r10, r10, lsr #1
     mov     r1, r0, lsr r2
-    and     r1, r1, #7 
-    cmp     r1, #2  
-    blt     skip 
-    mcr     p15, #2, r10, c0, c0, #0 
-    isb 
-    mrc     p15, #1, r1, c0, c0, #0 
-    and     r2, r1, #7 
-    add     r2, r2, #4   
-    ldr     r4, _FLD_MAX_WAY 
+    and     r1, r1, #7
+    cmp     r1, #2
+    blt     skip
+    mcr     p15, #2, r10, c0, c0, #0
+    isb
+    mrc     p15, #1, r1, c0, c0, #0
+    and     r2, r1, #7
+    add     r2, r2, #4
+    ldr     r4, _FLD_MAX_WAY
     ands    r4, r4, r1, lsr #3
     clz     r5, r4
     ldr     r7, _FLD_MAX_IDX
-    ands    r7, r7, r1, lsr #13 
+    ands    r7, r7, r1, lsr #13
 loop2:
-    mov     r9, r4   
+    mov     r9, r4
 loop3:
-    orr     r11, r10, r9, lsl r5  
-    orr     r11, r11, r7, lsl r2 
-    mcr     p15, #0, r11, c7, c14, #2 
-    subs    r9, r9, #1 
-    bge     loop3 
-    subs    r7, r7, #1 
-    bge     loop2 
-skip: 
-    add     r10, r10, #2 
-    cmp     r3, r10 
-    bgt     loop1 
+    orr     r11, r10, r9, lsl r5
+    orr     r11, r11, r7, lsl r2
+    mcr     p15, #0, r11, c7, c14, #2
+    subs    r9, r9, #1
+    bge     loop3
+    subs    r7, r7, #1
+    bge     loop2
+skip:
+    add     r10, r10, #2
+    cmp     r3, r10
+    bgt     loop1
 
 finished:
     dsb
-    isb                         
-    pop     {r4-r11} 
+    isb
+    pop     {r4-r11}
     bx      lr
 
 .globl rt_cpu_dcache_disable
@@ -108,11 +108,11 @@ rt_cpu_icache_disable:
 
 .globl rt_cpu_mmu_disable
 rt_cpu_mmu_disable:
-    mcr     p15, #0, r0, c8, c7, #0    @ invalidate tlb  
-    mrc     p15, #0, r0, c1, c0, #0    
-    bic     r0, r0, #1      
+    mcr     p15, #0, r0, c8, c7, #0    @ invalidate tlb
+    mrc     p15, #0, r0, c1, c0, #0
+    bic     r0, r0, #1
     mcr     p15, #0, r0, c1, c0, #0    @ clear mmu bit
-    dsb  
+    dsb
     bx      lr
 
 .globl rt_cpu_mmu_enable

+ 41 - 41
libcpu/arm/am335x/cp15_iar.s

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -7,9 +7,9 @@
  * Date           Author       Notes
  * 2015-04-06     zchong      change to iar compiler from convert from cp15_gcc.S
  */
- 
+
     SECTION .text:CODE:NOROOT(2)
-         
+
     ARM
 
     EXPORT  rt_cpu_vector_set_base
@@ -17,7 +17,7 @@ rt_cpu_vector_set_base:
         MCR     p15, #0, r0, c12, c0, #0
         DSB
         BX      lr
-        
+
     EXPORT  rt_cpu_vector_get_base
 rt_cpu_vector_get_base:
         MRC     p15, #0, r0, c12, c0, #0
@@ -30,7 +30,7 @@ rt_cpu_get_sctlr:
 
     EXPORT  rt_cpu_dcache_enable
 rt_cpu_dcache_enable:
-        MRC     p15, #0, r0, c1, c0, #0 
+        MRC     p15, #0, r0, c1, c0, #0
         ORR     r0,  r0, #0x00000004
         MCR     p15, #0, r0, c1, c0, #0
         BX      lr
@@ -39,7 +39,7 @@ rt_cpu_dcache_enable:
 rt_cpu_icache_enable:
         MRC     p15, #0, r0, c1, c0, #0
         ORR     r0,  r0, #0x00001000
-        MCR     p15, #0, r0, c1, c0, #0 
+        MCR     p15, #0, r0, c1, c0, #0
         BX      lr
 
 ;_FLD_MAX_WAY DEFINE 0x3ff
@@ -48,50 +48,50 @@ rt_cpu_icache_enable:
 
     EXPORT  rt_cpu_dcache_clean_flush
 rt_cpu_dcache_clean_flush:
-        PUSH    {r4-r11} 
-        DMB                   
+        PUSH    {r4-r11}
+        DMB
         MRC     p15, #1, r0, c0, c0, #1  ; read clid register
         ANDS    r3, r0, #0x7000000       ; get level of coherency
-        MOV     r3, r3, lsr #23  
-        BEQ     finished  
-        MOV     r10, #0 
+        MOV     r3, r3, lsr #23
+        BEQ     finished
+        MOV     r10, #0
 loop1:
-        ADD     r2, r10, r10, lsr #1 
+        ADD     r2, r10, r10, lsr #1
         MOV     r1, r0, lsr r2
-        AND     r1, r1, #7 
-        CMP     r1, #2  
-        BLT     skip 
-        MCR     p15, #2, r10, c0, c0, #0 
-        ISB 
-        MRC     p15, #1, r1, c0, c0, #0 
-        AND     r2, r1, #7 
-        ADD     r2, r2, #4   
-        ;LDR     r4, _FLD_MAX_WAY 
+        AND     r1, r1, #7
+        CMP     r1, #2
+        BLT     skip
+        MCR     p15, #2, r10, c0, c0, #0
+        ISB
+        MRC     p15, #1, r1, c0, c0, #0
+        AND     r2, r1, #7
+        ADD     r2, r2, #4
+        ;LDR     r4, _FLD_MAX_WAY
         LDR     r4, =0x3FF
         ANDS    r4, r4, r1, lsr #3
         CLZ     r5, r4
         ;LDR     r7, _FLD_MAX_IDX
         LDR     r7, =0x7FF
-        ANDS    r7, r7, r1, lsr #13 
+        ANDS    r7, r7, r1, lsr #13
 loop2:
-        MOV     r9, r4   
+        MOV     r9, r4
 loop3:
-        ORR     r11, r10, r9, lsl r5  
-        ORR     r11, r11, r7, lsl r2 
-        MCR     p15, #0, r11, c7, c14, #2 
-        SUBS    r9, r9, #1 
-        BGE     loop3 
-        SUBS    r7, r7, #1 
-        BGE     loop2 
-skip: 
-        ADD     r10, r10, #2 
-        CMP     r3, r10 
-        BGT     loop1 
+        ORR     r11, r10, r9, lsl r5
+        ORR     r11, r11, r7, lsl r2
+        MCR     p15, #0, r11, c7, c14, #2
+        SUBS    r9, r9, #1
+        BGE     loop3
+        SUBS    r7, r7, #1
+        BGE     loop2
+skip:
+        ADD     r10, r10, #2
+        CMP     r3, r10
+        BGT     loop1
 
 finished:
         DSB
-        ISB                         
-        POP     {r4-r11} 
+        ISB
+        POP     {r4-r11}
         BX      lr
 
 
@@ -115,11 +115,11 @@ rt_cpu_icache_disable:
 
     EXPORT  rt_cpu_mmu_disable
 rt_cpu_mmu_disable:
-        MCR     p15, #0, r0, c8, c7, #0    ; invalidate tlb  
-        MRC     p15, #0, r0, c1, c0, #0    
-        BIC     r0, r0, #1      
+        MCR     p15, #0, r0, c8, c7, #0    ; invalidate tlb
+        MRC     p15, #0, r0, c1, c0, #0
+        BIC     r0, r0, #1
         MCR     p15, #0, r0, c1, c0, #0    ; clear mmu bit
-        DSB  
+        DSB
         BX      lr
 
     EXPORT  rt_cpu_mmu_enable
@@ -135,5 +135,5 @@ rt_cpu_tlb_set:
         MCR     p15, #0, r0, c2, c0, #0
         DMB
         BX      lr
-        
+
     END

+ 3 - 3
libcpu/arm/am335x/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -65,7 +65,7 @@ bss_loop:
     /* call C++ constructors of global objects                          */
     ldr     r0, =__ctors_start__
     ldr     r1, =__ctors_end__
-    
+
 ctor_loop:
     cmp     r0, r1
     beq     ctor_end
@@ -150,7 +150,7 @@ vector_undef:
 vector_swi:
     bl      rt_hw_trap_swi
 
-    .align	5
+    .align  5
 .globl vector_pabt
 vector_pabt:
     bl      rt_hw_trap_pabt

+ 11 - 11
libcpu/arm/am335x/start_iar.s

@@ -1,15 +1,15 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Change Logs:
  * Date           Author       Notes
- * 2015-04-06     zchong      the first version 
+ * 2015-04-06     zchong      the first version
  */
 
         MODULE  ?cstartup
-        
+
         ; --------------------
 ; Mode, correspords to bits 0-5 in CPSR
 
@@ -25,7 +25,7 @@ ABT_MODE DEFINE 0x17            ; Abort mode
 UND_MODE DEFINE 0x1B            ; Undefined Instruction mode
 SYS_MODE DEFINE 0x1F            ; System mode
 
-        
+
         ;; Forward declaration of sections.
         SECTION IRQ_STACK:DATA:NOROOT(3)
         SECTION FIQ_STACK:DATA:NOROOT(3)
@@ -34,8 +34,8 @@ SYS_MODE DEFINE 0x1F            ; System mode
         SECTION UND_STACK:DATA:NOROOT(3)
         SECTION CSTACK:DATA:NOROOT(3)
         SECTION .text:CODE
-        
-        
+
+
         SECTION .intvec:CODE:NOROOT(5)
 
         PUBLIC  __vector
@@ -95,7 +95,7 @@ FIQ_Addr:       DCD   FIQ_Handler
         EXTERN  rt_current_thread
         EXTERN  vmm_thread
         EXTERN  vmm_virq_check
-        
+
         EXTERN  __cmain
         REQUIRE __vector
         EXTWEAK __iar_init_core
@@ -135,7 +135,7 @@ __iar_program_start:
         MSR     cpsr_c, r0              ; Change the mode
         LDR     sp, =SFE(FIQ_STACK)     ; End of FIQ_STACK
         BIC     sp,sp,#0x7              ; Make sure SP is 8 aligned
-        
+
         BIC     r0,r0,#MODE_MSK         ; Clear the mode bits
         ORR     r0,r0,#ABT_MODE         ; Set Abort mode bits
         MSR     cpsr_c,r0               ; Change the mode
@@ -165,7 +165,7 @@ __iar_program_start:
         ;; Continue to __cmain for C-level initialization.
         B       __cmain
 
-      
+
 Undefined_Handler:
         SUB     sp, sp, #72
         STMIA   sp, {r0 - r12}          ;/* Calling r0-r12                  */
@@ -217,7 +217,7 @@ Abort_Handler:
         LDR      lr, [sp, #60]          ;/* Get PC   */
         ADD      sp, sp, #72
         MOVS     pc, lr                 ;/* return & move spsr_svc into cpsr */
-         
+
 FIQ_Handler:
         STMFD   sp!,{r0-r7,lr}
         BL      rt_hw_trap_fiq
@@ -274,5 +274,5 @@ rt_hw_context_switch_interrupt_do:
         MSR     spsr_cxsf, r4
 
         LDMFD   sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
-    
+
      END

+ 2 - 2
libcpu/arm/am335x/vector_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -48,4 +48,4 @@ _vector_irq:
 _vector_fiq:
     .word vector_fiq
 
-.balignl 	16,0xdeadbeef
+.balignl    16,0xdeadbeef

+ 1 - 1
libcpu/arm/arm926/context_iar.S

@@ -79,4 +79,4 @@ _reswitch:
     STR     R1, [R2]
     MOV     PC, LR
     END
-    
+

+ 23 - 23
libcpu/arm/arm926/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -71,7 +71,7 @@ _vector_fiq:
 
  /*
  ***************************************
- *  Stack and Heap Definitions 
+ *  Stack and Heap Definitions
  ***************************************
  */
     .section .data
@@ -107,7 +107,7 @@ svc_stack_start:
 
 /*
  ***************************************
- * Startup Code 
+ * Startup Code
  ***************************************
  */
     .section .text
@@ -126,10 +126,10 @@ reset:
     ldr     sp, =svc_stack_start
     ldr     r0, =rt_low_level_init
     blx     r0
-    
+
     /* init stack */
     bl stack_setup
-    
+
     /* clear bss */
     mov     r0, #0
     ldr     r1, =__bss_start
@@ -139,7 +139,7 @@ bss_clear_loop:
     cmp     r1, r2
     strlo   r0, [r1], #4
     blo     bss_clear_loop
-       
+
     /* call c++ constructors of global objects */
     /*
     ldr     r0, =__ctors_start__
@@ -178,7 +178,7 @@ cpu_init_crit:
     mcr p15, 0, r0, c1, c0, 0
 
     bx lr
- 
+
 stack_setup:
     /* Setup Stack for each mode */
     mrs     r0, cpsr
@@ -209,10 +209,10 @@ stack_setup:
     ldr     sp, =svc_stack_start
 
     bx      lr
- 
+
 /*
  ***************************************
- * exception handlers 
+ * exception handlers
  ***************************************
  */
     /* Interrupt */
@@ -238,42 +238,42 @@ vector_irq:
     subs    pc,  lr, #4
 
 rt_hw_context_switch_interrupt_do:
-    mov     r1,  #0         
+    mov     r1,  #0
     str     r1,  [r0]
 
-    mov     r1, sp          
+    mov     r1, sp
     add     sp, sp, #4*4
     ldmfd   sp!, {r4-r12,lr}
-    mrs     r0,  spsr       
-    sub     r2,  lr, #4     
+    mrs     r0,  spsr
+    sub     r2,  lr, #4
 
     msr     cpsr_c, #I_BIT|F_BIT|MODE_SVC
 
-    stmfd   sp!, {r2}       
+    stmfd   sp!, {r2}
     stmfd   sp!, {r4-r12,lr}
-    ldmfd   r1,  {r1-r4}    
-    stmfd   sp!, {r1-r4}    
-    stmfd   sp!, {r0}       
+    ldmfd   r1,  {r1-r4}
+    stmfd   sp!, {r1-r4}
+    stmfd   sp!, {r0}
 
     ldr     r4,  =rt_interrupt_from_thread
     ldr     r5,  [r4]
-    str     sp,  [r5]       
+    str     sp,  [r5]
 
     ldr     r6,  =rt_interrupt_to_thread
     ldr     r6,  [r6]
-    ldr     sp,  [r6]       
+    ldr     sp,  [r6]
 
-    ldmfd   sp!, {r4}       
+    ldmfd   sp!, {r4}
     msr     spsr_cxsf, r4
 
-    ldmfd   sp!, {r0-r12,lr,pc}^ 
+    ldmfd   sp!, {r0-r12,lr,pc}^
 
     /* Exception */
 .macro push_svc_reg
     sub     sp, sp, #17 * 4
-    stmia   sp, {r0 - r12} 
+    stmia   sp, {r0 - r12}
     mov     r0, sp
-    mrs     r6, spsr       
+    mrs     r6, spsr
     str     lr, [r0, #15*4]
     str     r6, [r0, #16*4]
     str     sp, [r0, #13*4]

+ 79 - 79
libcpu/arm/armv6/arm_entry_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -14,113 +14,113 @@
 
 //#define DEBUG
 
-.macro	PRINT, str
+.macro  PRINT, str
 #ifdef DEBUG
-	stmfd	sp!, {r0-r3, ip, lr}
-	add	r0, pc, #4
-	bl	rt_kprintf
-	b	1f
-	.asciz  "UNDEF: \str\n"
-	.balign 4
-1:	ldmfd	sp!, {r0-r3, ip, lr}
+    stmfd   sp!, {r0-r3, ip, lr}
+    add r0, pc, #4
+    bl  rt_kprintf
+    b   1f
+    .asciz  "UNDEF: \str\n"
+    .balign 4
+1:  ldmfd   sp!, {r0-r3, ip, lr}
 #endif
-	.endm
+    .endm
 
 .macro  PRINT1, str, arg
 #ifdef DEBUG
-	stmfd	sp!, {r0-r3, ip, lr}
-	mov	r1, \arg
-	add	r0, pc, #4
-	bl	rt_kprintf
-	b	1f
-	.asciz  "UNDEF: \str\n"
-	.balign 4
-1:	ldmfd	sp!, {r0-r3, ip, lr}
+    stmfd   sp!, {r0-r3, ip, lr}
+    mov r1, \arg
+    add r0, pc, #4
+    bl  rt_kprintf
+    b   1f
+    .asciz  "UNDEF: \str\n"
+    .balign 4
+1:  ldmfd   sp!, {r0-r3, ip, lr}
 #endif
-	.endm
+    .endm
 
 .macro  PRINT3, str, arg1, arg2, arg3
 #ifdef DEBUG
-	stmfd	sp!, {r0-r3, ip, lr}
-	mov	r3, \arg3
-	mov	r2, \arg2
-	mov	r1, \arg1
-	add	r0, pc, #4
-	bl	rt_kprintf
-	b	1f
-	.asciz  "UNDEF: \str\n"
-	.balign 4
-1:	ldmfd	sp!, {r0-r3, ip, lr}
+    stmfd   sp!, {r0-r3, ip, lr}
+    mov r3, \arg3
+    mov r2, \arg2
+    mov r1, \arg1
+    add r0, pc, #4
+    bl  rt_kprintf
+    b   1f
+    .asciz  "UNDEF: \str\n"
+    .balign 4
+1:  ldmfd   sp!, {r0-r3, ip, lr}
 #endif
-	.endm
+    .endm
 
-.macro	get_current_thread, rd
-	ldr	\rd, .current_thread
-	ldr	\rd, [\rd]
-	.endm
+.macro  get_current_thread, rd
+    ldr \rd, .current_thread
+    ldr \rd, [\rd]
+    .endm
 
 .current_thread:
-	.word	rt_current_thread
+    .word   rt_current_thread
 
 #ifdef RT_USING_NEON
-	.align	6
+    .align  6
 
 /* is the neon instuction on arm mode? */
 .neon_opcode:
-	.word	0xfe000000			@ mask
-	.word	0xf2000000			@ opcode
+    .word   0xfe000000          @ mask
+    .word   0xf2000000          @ opcode
 
-	.word	0xff100000			@ mask
-	.word	0xf4000000			@ opcode
+    .word   0xff100000          @ mask
+    .word   0xf4000000          @ opcode
 
-	.word	0x00000000			@ end mask
-	.word	0x00000000			@ end opcode
+    .word   0x00000000          @ end mask
+    .word   0x00000000          @ end opcode
 #endif
 
 /* undefined instruction exception processing */
 .globl undef_entry
 undef_entry:
-	PRINT1 "r0=0x%08x", r0
-	PRINT1 "r2=0x%08x", r2
-	PRINT1 "r9=0x%08x", r9
-	PRINT1 "sp=0x%08x", sp
+    PRINT1 "r0=0x%08x", r0
+    PRINT1 "r2=0x%08x", r2
+    PRINT1 "r9=0x%08x", r9
+    PRINT1 "sp=0x%08x", sp
 
 #ifdef RT_USING_NEON
-	ldr	r6, .neon_opcode
+    ldr r6, .neon_opcode
 __check_neon_instruction:
-	ldr	r7, [r6], #4		@ load mask value
-	cmp	r7, #0				@ end mask?
-	beq	__check_vfp_instruction
-	and	r8, r0, r7
-	ldr	r7, [r6], #4		@ load opcode value
-	cmp	r8, r7				@ is NEON instruction?
-	bne	__check_neon_instruction
-	b	vfp_entry
+    ldr r7, [r6], #4        @ load mask value
+    cmp r7, #0              @ end mask?
+    beq __check_vfp_instruction
+    and r8, r0, r7
+    ldr r7, [r6], #4        @ load opcode value
+    cmp r8, r7              @ is NEON instruction?
+    bne __check_neon_instruction
+    b   vfp_entry
 __check_vfp_instruction:
 #endif
-	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC instruction has bit 27
-	tstne	r0, #0x04000000		@ bit 26 set on both ARM and Thumb-2 instruction
-	moveq	pc, lr				@ no vfp coprocessor instruction, return
-	get_current_thread r10
-	and	r8, r0, #0x00000f00		@ get coprocessor number
-	PRINT1 "CP=0x%08x", r8
-	add	pc, pc, r8, lsr #6
-	nop
-	mov pc,	lr				@ CP0
-	mov pc,	lr				@ CP1
-	mov pc,	lr				@ CP2
-	mov pc,	lr				@ CP3
-	mov pc,	lr				@ CP4
-	mov pc,	lr				@ CP5
-	mov pc,	lr				@ CP6
-	mov pc,	lr				@ CP7
-	mov pc,	lr				@ CP8
-	mov pc,	lr				@ CP9
-	mov pc,	lr				@ CP10 VFP
-	mov pc,	lr				@ CP11 VFP
-	mov pc,	lr				@ CP12
-	mov pc,	lr				@ CP13
-	mov pc,	lr				@ CP14 DEBUG
-	mov pc,	lr				@ CP15 SYS CONTROL
+    tst r0, #0x08000000         @ only CDP/CPRT/LDC/STC instruction has bit 27
+    tstne   r0, #0x04000000     @ bit 26 set on both ARM and Thumb-2 instruction
+    moveq   pc, lr              @ no vfp coprocessor instruction, return
+    get_current_thread r10
+    and r8, r0, #0x00000f00     @ get coprocessor number
+    PRINT1 "CP=0x%08x", r8
+    add pc, pc, r8, lsr #6
+    nop
+    mov pc, lr              @ CP0
+    mov pc, lr              @ CP1
+    mov pc, lr              @ CP2
+    mov pc, lr              @ CP3
+    mov pc, lr              @ CP4
+    mov pc, lr              @ CP5
+    mov pc, lr              @ CP6
+    mov pc, lr              @ CP7
+    mov pc, lr              @ CP8
+    mov pc, lr              @ CP9
+    mov pc, lr              @ CP10 VFP
+    mov pc, lr              @ CP11 VFP
+    mov pc, lr              @ CP12
+    mov pc, lr              @ CP13
+    mov pc, lr              @ CP14 DEBUG
+    mov pc, lr              @ CP15 SYS CONTROL
 
 

+ 320 - 320
libcpu/arm/common/divsi3.S

@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Change Logs:
  * Date           Author       Notes
  */
-/*	$NetBSD: divsi3.S,v 1.5 2005/02/26 22:58:56 perry Exp $	*/
+/*  $NetBSD: divsi3.S,v 1.5 2005/02/26 22:58:56 perry Exp $ */
 
 /*
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
@@ -26,376 +26,376 @@
  * stack is aligned as there's a possibility of branching to L_overflow
  * which makes a C call
  */
-	.text
-	.align	0
-	.globl	__umodsi3
-	.type	__umodsi3 , function
+    .text
+    .align  0
+    .globl  __umodsi3
+    .type   __umodsi3 , function
 __umodsi3:
-	stmfd	sp!, {lr}
-	sub	sp, sp, #4	/* align stack */
-	bl	.L_udivide
-	add	sp, sp, #4	/* unalign stack */
-	mov	r0, r1
-	ldmfd	sp!, {pc}
+    stmfd   sp!, {lr}
+    sub sp, sp, #4  /* align stack */
+    bl  .L_udivide
+    add sp, sp, #4  /* unalign stack */
+    mov r0, r1
+    ldmfd   sp!, {pc}
 
-	.text
-	.align	0
-	.globl	__modsi3
-	.type	__modsi3 , function
+    .text
+    .align  0
+    .globl  __modsi3
+    .type   __modsi3 , function
 __modsi3:
-	stmfd	sp!, {lr}
-	sub	sp, sp, #4	/* align stack */
-	bl	.L_divide
-	add	sp, sp, #4	/* unalign stack */
-	mov	r0, r1
-	ldmfd	sp!, {pc}
+    stmfd   sp!, {lr}
+    sub sp, sp, #4  /* align stack */
+    bl  .L_divide
+    add sp, sp, #4  /* unalign stack */
+    mov r0, r1
+    ldmfd   sp!, {pc}
 
 .L_overflow:
-	/* XXX should cause a fatal error */
-	mvn	r0, #0
-	mov	pc, lr
+    /* XXX should cause a fatal error */
+    mvn r0, #0
+    mov pc, lr
 
-	.text
-	.align	0
-	.globl	__udivsi3
-	.type	__udivsi3 , function
+    .text
+    .align  0
+    .globl  __udivsi3
+    .type   __udivsi3 , function
 __udivsi3:
-.L_udivide:				/* r0 = r0 / r1; r1 = r0 % r1 */
-	eor     r0, r1, r0
-	eor     r1, r0, r1
-	eor     r0, r1, r0
-					/* r0 = r1 / r0; r1 = r1 % r0 */
-	cmp	r0, #1
-	bcc	.L_overflow
-	beq	.L_divide_l0
-	mov	ip, #0
-	movs	r1, r1
-	bpl	.L_divide_l1
-	orr	ip, ip, #0x20000000	/* ip bit 0x20000000 = -ve r1 */
-	movs	r1, r1, lsr #1
-	orrcs	ip, ip, #0x10000000	/* ip bit 0x10000000 = bit 0 of r1 */
-	b	.L_divide_l1
+.L_udivide:             /* r0 = r0 / r1; r1 = r0 % r1 */
+    eor     r0, r1, r0
+    eor     r1, r0, r1
+    eor     r0, r1, r0
+                    /* r0 = r1 / r0; r1 = r1 % r0 */
+    cmp r0, #1
+    bcc .L_overflow
+    beq .L_divide_l0
+    mov ip, #0
+    movs    r1, r1
+    bpl .L_divide_l1
+    orr ip, ip, #0x20000000 /* ip bit 0x20000000 = -ve r1 */
+    movs    r1, r1, lsr #1
+    orrcs   ip, ip, #0x10000000 /* ip bit 0x10000000 = bit 0 of r1 */
+    b   .L_divide_l1
 
-.L_divide_l0:				/* r0 == 1 */
-	mov	r0, r1
-	mov	r1, #0
-	mov	pc, lr
+.L_divide_l0:               /* r0 == 1 */
+    mov r0, r1
+    mov r1, #0
+    mov pc, lr
 
-	.text
-	.align	0
-	.globl	__divsi3
-	.type	__divsi3 , function
+    .text
+    .align  0
+    .globl  __divsi3
+    .type   __divsi3 , function
 __divsi3:
-.L_divide:				/* r0 = r0 / r1; r1 = r0 % r1 */
-	eor     r0, r1, r0
-	eor     r1, r0, r1
-	eor     r0, r1, r0
-					/* r0 = r1 / r0; r1 = r1 % r0 */
-	cmp	r0, #1
-	bcc	.L_overflow
-	beq	.L_divide_l0
-	ands	ip, r0, #0x80000000
-	rsbmi	r0, r0, #0
-	ands	r2, r1, #0x80000000
-	eor	ip, ip, r2
-	rsbmi	r1, r1, #0
-	orr	ip, r2, ip, lsr #1	/* ip bit 0x40000000 = -ve division */
-					/* ip bit 0x80000000 = -ve remainder */
+.L_divide:              /* r0 = r0 / r1; r1 = r0 % r1 */
+    eor     r0, r1, r0
+    eor     r1, r0, r1
+    eor     r0, r1, r0
+                    /* r0 = r1 / r0; r1 = r1 % r0 */
+    cmp r0, #1
+    bcc .L_overflow
+    beq .L_divide_l0
+    ands    ip, r0, #0x80000000
+    rsbmi   r0, r0, #0
+    ands    r2, r1, #0x80000000
+    eor ip, ip, r2
+    rsbmi   r1, r1, #0
+    orr ip, r2, ip, lsr #1  /* ip bit 0x40000000 = -ve division */
+                    /* ip bit 0x80000000 = -ve remainder */
 
 .L_divide_l1:
-	mov	r2, #1
-	mov	r3, #0
+    mov r2, #1
+    mov r3, #0
 
-	/*
-	 * If the highest bit of the dividend is set, we have to be
-	 * careful when shifting the divisor. Test this.
-	 */
-	movs	r1,r1
-	bpl	.L_old_code
+    /*
+     * If the highest bit of the dividend is set, we have to be
+     * careful when shifting the divisor. Test this.
+     */
+    movs    r1,r1
+    bpl .L_old_code
 
-	/*
-	 * At this point, the highest bit of r1 is known to be set.
-	 * We abuse this below in the tst instructions.
-	 */
-	tst	r1, r0 /*, lsl #0 */
-	bmi	.L_divide_b1
-	tst	r1, r0, lsl #1
-	bmi	.L_divide_b2
-	tst	r1, r0, lsl #2
-	bmi	.L_divide_b3
-	tst	r1, r0, lsl #3
-	bmi	.L_divide_b4
-	tst	r1, r0, lsl #4
-	bmi	.L_divide_b5
-	tst	r1, r0, lsl #5
-	bmi	.L_divide_b6
-	tst	r1, r0, lsl #6
-	bmi	.L_divide_b7
-	tst	r1, r0, lsl #7
-	bmi	.L_divide_b8
-	tst	r1, r0, lsl #8
-	bmi	.L_divide_b9
-	tst	r1, r0, lsl #9
-	bmi	.L_divide_b10
-	tst	r1, r0, lsl #10
-	bmi	.L_divide_b11
-	tst	r1, r0, lsl #11
-	bmi	.L_divide_b12
-	tst	r1, r0, lsl #12
-	bmi	.L_divide_b13
-	tst	r1, r0, lsl #13
-	bmi	.L_divide_b14
-	tst	r1, r0, lsl #14
-	bmi	.L_divide_b15
-	tst	r1, r0, lsl #15
-	bmi	.L_divide_b16
-	tst	r1, r0, lsl #16
-	bmi	.L_divide_b17
-	tst	r1, r0, lsl #17
-	bmi	.L_divide_b18
-	tst	r1, r0, lsl #18
-	bmi	.L_divide_b19
-	tst	r1, r0, lsl #19
-	bmi	.L_divide_b20
-	tst	r1, r0, lsl #20
-	bmi	.L_divide_b21
-	tst	r1, r0, lsl #21
-	bmi	.L_divide_b22
-	tst	r1, r0, lsl #22
-	bmi	.L_divide_b23
-	tst	r1, r0, lsl #23
-	bmi	.L_divide_b24
-	tst	r1, r0, lsl #24
-	bmi	.L_divide_b25
-	tst	r1, r0, lsl #25
-	bmi	.L_divide_b26
-	tst	r1, r0, lsl #26
-	bmi	.L_divide_b27
-	tst	r1, r0, lsl #27
-	bmi	.L_divide_b28
-	tst	r1, r0, lsl #28
-	bmi	.L_divide_b29
-	tst	r1, r0, lsl #29
-	bmi	.L_divide_b30
-	tst	r1, r0, lsl #30
-	bmi	.L_divide_b31
+    /*
+     * At this point, the highest bit of r1 is known to be set.
+     * We abuse this below in the tst instructions.
+     */
+    tst r1, r0 /*, lsl #0 */
+    bmi .L_divide_b1
+    tst r1, r0, lsl #1
+    bmi .L_divide_b2
+    tst r1, r0, lsl #2
+    bmi .L_divide_b3
+    tst r1, r0, lsl #3
+    bmi .L_divide_b4
+    tst r1, r0, lsl #4
+    bmi .L_divide_b5
+    tst r1, r0, lsl #5
+    bmi .L_divide_b6
+    tst r1, r0, lsl #6
+    bmi .L_divide_b7
+    tst r1, r0, lsl #7
+    bmi .L_divide_b8
+    tst r1, r0, lsl #8
+    bmi .L_divide_b9
+    tst r1, r0, lsl #9
+    bmi .L_divide_b10
+    tst r1, r0, lsl #10
+    bmi .L_divide_b11
+    tst r1, r0, lsl #11
+    bmi .L_divide_b12
+    tst r1, r0, lsl #12
+    bmi .L_divide_b13
+    tst r1, r0, lsl #13
+    bmi .L_divide_b14
+    tst r1, r0, lsl #14
+    bmi .L_divide_b15
+    tst r1, r0, lsl #15
+    bmi .L_divide_b16
+    tst r1, r0, lsl #16
+    bmi .L_divide_b17
+    tst r1, r0, lsl #17
+    bmi .L_divide_b18
+    tst r1, r0, lsl #18
+    bmi .L_divide_b19
+    tst r1, r0, lsl #19
+    bmi .L_divide_b20
+    tst r1, r0, lsl #20
+    bmi .L_divide_b21
+    tst r1, r0, lsl #21
+    bmi .L_divide_b22
+    tst r1, r0, lsl #22
+    bmi .L_divide_b23
+    tst r1, r0, lsl #23
+    bmi .L_divide_b24
+    tst r1, r0, lsl #24
+    bmi .L_divide_b25
+    tst r1, r0, lsl #25
+    bmi .L_divide_b26
+    tst r1, r0, lsl #26
+    bmi .L_divide_b27
+    tst r1, r0, lsl #27
+    bmi .L_divide_b28
+    tst r1, r0, lsl #28
+    bmi .L_divide_b29
+    tst r1, r0, lsl #29
+    bmi .L_divide_b30
+    tst r1, r0, lsl #30
+    bmi .L_divide_b31
 /*
  * instead of:
- *	tst	r1, r0, lsl #31
- *	bmi	.L_divide_b32
+ *  tst r1, r0, lsl #31
+ *  bmi .L_divide_b32
  */
-	b	.L_divide_b32
+    b   .L_divide_b32
 
 .L_old_code:
-	cmp	r1, r0
-	bcc	.L_divide_b0
-	cmp	r1, r0, lsl #1
-	bcc	.L_divide_b1
-	cmp	r1, r0, lsl #2
-	bcc	.L_divide_b2
-	cmp	r1, r0, lsl #3
-	bcc	.L_divide_b3
-	cmp	r1, r0, lsl #4
-	bcc	.L_divide_b4
-	cmp	r1, r0, lsl #5
-	bcc	.L_divide_b5
-	cmp	r1, r0, lsl #6
-	bcc	.L_divide_b6
-	cmp	r1, r0, lsl #7
-	bcc	.L_divide_b7
-	cmp	r1, r0, lsl #8
-	bcc	.L_divide_b8
-	cmp	r1, r0, lsl #9
-	bcc	.L_divide_b9
-	cmp	r1, r0, lsl #10
-	bcc	.L_divide_b10
-	cmp	r1, r0, lsl #11
-	bcc	.L_divide_b11
-	cmp	r1, r0, lsl #12
-	bcc	.L_divide_b12
-	cmp	r1, r0, lsl #13
-	bcc	.L_divide_b13
-	cmp	r1, r0, lsl #14
-	bcc	.L_divide_b14
-	cmp	r1, r0, lsl #15
-	bcc	.L_divide_b15
-	cmp	r1, r0, lsl #16
-	bcc	.L_divide_b16
-	cmp	r1, r0, lsl #17
-	bcc	.L_divide_b17
-	cmp	r1, r0, lsl #18
-	bcc	.L_divide_b18
-	cmp	r1, r0, lsl #19
-	bcc	.L_divide_b19
-	cmp	r1, r0, lsl #20
-	bcc	.L_divide_b20
-	cmp	r1, r0, lsl #21
-	bcc	.L_divide_b21
-	cmp	r1, r0, lsl #22
-	bcc	.L_divide_b22
-	cmp	r1, r0, lsl #23
-	bcc	.L_divide_b23
-	cmp	r1, r0, lsl #24
-	bcc	.L_divide_b24
-	cmp	r1, r0, lsl #25
-	bcc	.L_divide_b25
-	cmp	r1, r0, lsl #26
-	bcc	.L_divide_b26
-	cmp	r1, r0, lsl #27
-	bcc	.L_divide_b27
-	cmp	r1, r0, lsl #28
-	bcc	.L_divide_b28
-	cmp	r1, r0, lsl #29
-	bcc	.L_divide_b29
-	cmp	r1, r0, lsl #30
-	bcc	.L_divide_b30
+    cmp r1, r0
+    bcc .L_divide_b0
+    cmp r1, r0, lsl #1
+    bcc .L_divide_b1
+    cmp r1, r0, lsl #2
+    bcc .L_divide_b2
+    cmp r1, r0, lsl #3
+    bcc .L_divide_b3
+    cmp r1, r0, lsl #4
+    bcc .L_divide_b4
+    cmp r1, r0, lsl #5
+    bcc .L_divide_b5
+    cmp r1, r0, lsl #6
+    bcc .L_divide_b6
+    cmp r1, r0, lsl #7
+    bcc .L_divide_b7
+    cmp r1, r0, lsl #8
+    bcc .L_divide_b8
+    cmp r1, r0, lsl #9
+    bcc .L_divide_b9
+    cmp r1, r0, lsl #10
+    bcc .L_divide_b10
+    cmp r1, r0, lsl #11
+    bcc .L_divide_b11
+    cmp r1, r0, lsl #12
+    bcc .L_divide_b12
+    cmp r1, r0, lsl #13
+    bcc .L_divide_b13
+    cmp r1, r0, lsl #14
+    bcc .L_divide_b14
+    cmp r1, r0, lsl #15
+    bcc .L_divide_b15
+    cmp r1, r0, lsl #16
+    bcc .L_divide_b16
+    cmp r1, r0, lsl #17
+    bcc .L_divide_b17
+    cmp r1, r0, lsl #18
+    bcc .L_divide_b18
+    cmp r1, r0, lsl #19
+    bcc .L_divide_b19
+    cmp r1, r0, lsl #20
+    bcc .L_divide_b20
+    cmp r1, r0, lsl #21
+    bcc .L_divide_b21
+    cmp r1, r0, lsl #22
+    bcc .L_divide_b22
+    cmp r1, r0, lsl #23
+    bcc .L_divide_b23
+    cmp r1, r0, lsl #24
+    bcc .L_divide_b24
+    cmp r1, r0, lsl #25
+    bcc .L_divide_b25
+    cmp r1, r0, lsl #26
+    bcc .L_divide_b26
+    cmp r1, r0, lsl #27
+    bcc .L_divide_b27
+    cmp r1, r0, lsl #28
+    bcc .L_divide_b28
+    cmp r1, r0, lsl #29
+    bcc .L_divide_b29
+    cmp r1, r0, lsl #30
+    bcc .L_divide_b30
 .L_divide_b32:
-	cmp	r1, r0, lsl #31
-	subhs	r1, r1,r0, lsl #31
-	addhs	r3, r3,r2, lsl #31
+    cmp r1, r0, lsl #31
+    subhs   r1, r1,r0, lsl #31
+    addhs   r3, r3,r2, lsl #31
 .L_divide_b31:
-	cmp	r1, r0, lsl #30
-	subhs	r1, r1,r0, lsl #30
-	addhs	r3, r3,r2, lsl #30
+    cmp r1, r0, lsl #30
+    subhs   r1, r1,r0, lsl #30
+    addhs   r3, r3,r2, lsl #30
 .L_divide_b30:
-	cmp	r1, r0, lsl #29
-	subhs	r1, r1,r0, lsl #29
-	addhs	r3, r3,r2, lsl #29
+    cmp r1, r0, lsl #29
+    subhs   r1, r1,r0, lsl #29
+    addhs   r3, r3,r2, lsl #29
 .L_divide_b29:
-	cmp	r1, r0, lsl #28
-	subhs	r1, r1,r0, lsl #28
-	addhs	r3, r3,r2, lsl #28
+    cmp r1, r0, lsl #28
+    subhs   r1, r1,r0, lsl #28
+    addhs   r3, r3,r2, lsl #28
 .L_divide_b28:
-	cmp	r1, r0, lsl #27
-	subhs	r1, r1,r0, lsl #27
-	addhs	r3, r3,r2, lsl #27
+    cmp r1, r0, lsl #27
+    subhs   r1, r1,r0, lsl #27
+    addhs   r3, r3,r2, lsl #27
 .L_divide_b27:
-	cmp	r1, r0, lsl #26
-	subhs	r1, r1,r0, lsl #26
-	addhs	r3, r3,r2, lsl #26
+    cmp r1, r0, lsl #26
+    subhs   r1, r1,r0, lsl #26
+    addhs   r3, r3,r2, lsl #26
 .L_divide_b26:
-	cmp	r1, r0, lsl #25
-	subhs	r1, r1,r0, lsl #25
-	addhs	r3, r3,r2, lsl #25
+    cmp r1, r0, lsl #25
+    subhs   r1, r1,r0, lsl #25
+    addhs   r3, r3,r2, lsl #25
 .L_divide_b25:
-	cmp	r1, r0, lsl #24
-	subhs	r1, r1,r0, lsl #24
-	addhs	r3, r3,r2, lsl #24
+    cmp r1, r0, lsl #24
+    subhs   r1, r1,r0, lsl #24
+    addhs   r3, r3,r2, lsl #24
 .L_divide_b24:
-	cmp	r1, r0, lsl #23
-	subhs	r1, r1,r0, lsl #23
-	addhs	r3, r3,r2, lsl #23
+    cmp r1, r0, lsl #23
+    subhs   r1, r1,r0, lsl #23
+    addhs   r3, r3,r2, lsl #23
 .L_divide_b23:
-	cmp	r1, r0, lsl #22
-	subhs	r1, r1,r0, lsl #22
-	addhs	r3, r3,r2, lsl #22
+    cmp r1, r0, lsl #22
+    subhs   r1, r1,r0, lsl #22
+    addhs   r3, r3,r2, lsl #22
 .L_divide_b22:
-	cmp	r1, r0, lsl #21
-	subhs	r1, r1,r0, lsl #21
-	addhs	r3, r3,r2, lsl #21
+    cmp r1, r0, lsl #21
+    subhs   r1, r1,r0, lsl #21
+    addhs   r3, r3,r2, lsl #21
 .L_divide_b21:
-	cmp	r1, r0, lsl #20
-	subhs	r1, r1,r0, lsl #20
-	addhs	r3, r3,r2, lsl #20
+    cmp r1, r0, lsl #20
+    subhs   r1, r1,r0, lsl #20
+    addhs   r3, r3,r2, lsl #20
 .L_divide_b20:
-	cmp	r1, r0, lsl #19
-	subhs	r1, r1,r0, lsl #19
-	addhs	r3, r3,r2, lsl #19
+    cmp r1, r0, lsl #19
+    subhs   r1, r1,r0, lsl #19
+    addhs   r3, r3,r2, lsl #19
 .L_divide_b19:
-	cmp	r1, r0, lsl #18
-	subhs	r1, r1,r0, lsl #18
-	addhs	r3, r3,r2, lsl #18
+    cmp r1, r0, lsl #18
+    subhs   r1, r1,r0, lsl #18
+    addhs   r3, r3,r2, lsl #18
 .L_divide_b18:
-	cmp	r1, r0, lsl #17
-	subhs	r1, r1,r0, lsl #17
-	addhs	r3, r3,r2, lsl #17
+    cmp r1, r0, lsl #17
+    subhs   r1, r1,r0, lsl #17
+    addhs   r3, r3,r2, lsl #17
 .L_divide_b17:
-	cmp	r1, r0, lsl #16
-	subhs	r1, r1,r0, lsl #16
-	addhs	r3, r3,r2, lsl #16
+    cmp r1, r0, lsl #16
+    subhs   r1, r1,r0, lsl #16
+    addhs   r3, r3,r2, lsl #16
 .L_divide_b16:
-	cmp	r1, r0, lsl #15
-	subhs	r1, r1,r0, lsl #15
-	addhs	r3, r3,r2, lsl #15
+    cmp r1, r0, lsl #15
+    subhs   r1, r1,r0, lsl #15
+    addhs   r3, r3,r2, lsl #15
 .L_divide_b15:
-	cmp	r1, r0, lsl #14
-	subhs	r1, r1,r0, lsl #14
-	addhs	r3, r3,r2, lsl #14
+    cmp r1, r0, lsl #14
+    subhs   r1, r1,r0, lsl #14
+    addhs   r3, r3,r2, lsl #14
 .L_divide_b14:
-	cmp	r1, r0, lsl #13
-	subhs	r1, r1,r0, lsl #13
-	addhs	r3, r3,r2, lsl #13
+    cmp r1, r0, lsl #13
+    subhs   r1, r1,r0, lsl #13
+    addhs   r3, r3,r2, lsl #13
 .L_divide_b13:
-	cmp	r1, r0, lsl #12
-	subhs	r1, r1,r0, lsl #12
-	addhs	r3, r3,r2, lsl #12
+    cmp r1, r0, lsl #12
+    subhs   r1, r1,r0, lsl #12
+    addhs   r3, r3,r2, lsl #12
 .L_divide_b12:
-	cmp	r1, r0, lsl #11
-	subhs	r1, r1,r0, lsl #11
-	addhs	r3, r3,r2, lsl #11
+    cmp r1, r0, lsl #11
+    subhs   r1, r1,r0, lsl #11
+    addhs   r3, r3,r2, lsl #11
 .L_divide_b11:
-	cmp	r1, r0, lsl #10
-	subhs	r1, r1,r0, lsl #10
-	addhs	r3, r3,r2, lsl #10
+    cmp r1, r0, lsl #10
+    subhs   r1, r1,r0, lsl #10
+    addhs   r3, r3,r2, lsl #10
 .L_divide_b10:
-	cmp	r1, r0, lsl #9
-	subhs	r1, r1,r0, lsl #9
-	addhs	r3, r3,r2, lsl #9
+    cmp r1, r0, lsl #9
+    subhs   r1, r1,r0, lsl #9
+    addhs   r3, r3,r2, lsl #9
 .L_divide_b9:
-	cmp	r1, r0, lsl #8
-	subhs	r1, r1,r0, lsl #8
-	addhs	r3, r3,r2, lsl #8
+    cmp r1, r0, lsl #8
+    subhs   r1, r1,r0, lsl #8
+    addhs   r3, r3,r2, lsl #8
 .L_divide_b8:
-	cmp	r1, r0, lsl #7
-	subhs	r1, r1,r0, lsl #7
-	addhs	r3, r3,r2, lsl #7
+    cmp r1, r0, lsl #7
+    subhs   r1, r1,r0, lsl #7
+    addhs   r3, r3,r2, lsl #7
 .L_divide_b7:
-	cmp	r1, r0, lsl #6
-	subhs	r1, r1,r0, lsl #6
-	addhs	r3, r3,r2, lsl #6
+    cmp r1, r0, lsl #6
+    subhs   r1, r1,r0, lsl #6
+    addhs   r3, r3,r2, lsl #6
 .L_divide_b6:
-	cmp	r1, r0, lsl #5
-	subhs	r1, r1,r0, lsl #5
-	addhs	r3, r3,r2, lsl #5
+    cmp r1, r0, lsl #5
+    subhs   r1, r1,r0, lsl #5
+    addhs   r3, r3,r2, lsl #5
 .L_divide_b5:
-	cmp	r1, r0, lsl #4
-	subhs	r1, r1,r0, lsl #4
-	addhs	r3, r3,r2, lsl #4
+    cmp r1, r0, lsl #4
+    subhs   r1, r1,r0, lsl #4
+    addhs   r3, r3,r2, lsl #4
 .L_divide_b4:
-	cmp	r1, r0, lsl #3
-	subhs	r1, r1,r0, lsl #3
-	addhs	r3, r3,r2, lsl #3
+    cmp r1, r0, lsl #3
+    subhs   r1, r1,r0, lsl #3
+    addhs   r3, r3,r2, lsl #3
 .L_divide_b3:
-	cmp	r1, r0, lsl #2
-	subhs	r1, r1,r0, lsl #2
-	addhs	r3, r3,r2, lsl #2
+    cmp r1, r0, lsl #2
+    subhs   r1, r1,r0, lsl #2
+    addhs   r3, r3,r2, lsl #2
 .L_divide_b2:
-	cmp	r1, r0, lsl #1
-	subhs	r1, r1,r0, lsl #1
-	addhs	r3, r3,r2, lsl #1
+    cmp r1, r0, lsl #1
+    subhs   r1, r1,r0, lsl #1
+    addhs   r3, r3,r2, lsl #1
 .L_divide_b1:
-	cmp	r1, r0
-	subhs	r1, r1, r0
-	addhs	r3, r3, r2
+    cmp r1, r0
+    subhs   r1, r1, r0
+    addhs   r3, r3, r2
 .L_divide_b0:
 
-	tst	ip, #0x20000000
-	bne	.L_udivide_l1
-	mov	r0, r3
-	cmp	ip, #0
-	rsbmi	r1, r1, #0
-	movs	ip, ip, lsl #1
-	bicmi	r0, r0, #0x80000000	/* Fix incase we divided 0x80000000 */
-	rsbmi	r0, r0, #0
-	mov	pc, lr
+    tst ip, #0x20000000
+    bne .L_udivide_l1
+    mov r0, r3
+    cmp ip, #0
+    rsbmi   r1, r1, #0
+    movs    ip, ip, lsl #1
+    bicmi   r0, r0, #0x80000000 /* Fix incase we divided 0x80000000 */
+    rsbmi   r0, r0, #0
+    mov pc, lr
 
 .L_udivide_l1:
-	tst	ip, #0x10000000
-	mov	r1, r1, lsl #1
-	orrne	r1, r1, #1
-	mov	r3, r3, lsl #1
-	cmp	r1, r0
-	subhs	r1, r1, r0
-	addhs	r3, r3, r2
-	mov	r0, r3
-	mov	pc, lr
+    tst ip, #0x10000000
+    mov r1, r1, lsl #1
+    orrne   r1, r1, #1
+    mov r3, r3, lsl #1
+    cmp r1, r0
+    subhs   r1, r1, r0
+    addhs   r3, r3, r2
+    mov r0, r3
+    mov pc, lr

+ 1 - 1
libcpu/arm/cortex-a/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *

+ 10 - 10
libcpu/arm/cortex-m0/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -12,18 +12,18 @@
  * 2013-06-18   aozima       add restore MSP feature.
  * 2013-11-04   bright       fixed hardfault bug for gcc.
  */
- 
+
     .cpu    cortex-m0
     .fpu    softvfp
     .syntax unified
     .thumb
     .text
 
-	.equ 	SCB_VTOR, 0xE000ED08            /* Vector Table Offset Register */
-	.equ 	NVIC_INT_CTRL, 0xE000ED04       /* interrupt control state register */
-	.equ 	NVIC_SHPR3, 0xE000ED20          /* system priority register (3) */
-	.equ 	NVIC_PENDSV_PRI, 0xFFFF0000     /* PendSV and SysTick priority value (lowest) */
-	.equ 	NVIC_PENDSVSET, 0x10000000      /* value to trigger PendSV exception */
+    .equ    SCB_VTOR, 0xE000ED08            /* Vector Table Offset Register */
+    .equ    NVIC_INT_CTRL, 0xE000ED04       /* interrupt control state register */
+    .equ    NVIC_SHPR3, 0xE000ED20          /* system priority register (3) */
+    .equ    NVIC_PENDSV_PRI, 0xFFFF0000     /* PendSV and SysTick priority value (lowest) */
+    .equ    NVIC_PENDSVSET, 0x10000000      /* value to trigger PendSV exception */
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
@@ -90,7 +90,7 @@ PendSV_Handler:
     LDR     R0, =rt_thread_switch_interrupt_flag
     LDR     R1, [R0]
     CMP     R1, #0x00
-    BEQ     pendsv_exit		/* pendsv aLReady handled */
+    BEQ     pendsv_exit     /* pendsv aLReady handled */
 
     /* clear rt_thread_switch_interrupt_flag to 0 */
     MOVS    R1, #0
@@ -101,7 +101,7 @@ PendSV_Handler:
     CMP     R1, #0x00
     BEQ     switch_to_thread    /* skip register save at the first time */
 
-	MRS     R1, PSP                 /* get from thread stack pointer */
+    MRS     R1, PSP                 /* get from thread stack pointer */
 
     SUBS    R1, R1, #0x20           /* space for {R4 - R7} and {R8 - R11} */
     LDR     R0, [R0]
@@ -119,7 +119,7 @@ switch_to_thread:
     LDR     R1, [R1]
     LDR     R1, [R1]                /* load thread stack pointer */
 
-	LDMIA   R1!, {R4 - R7}          /* pop thread {R4 - R7} register from thread stack */
+    LDMIA   R1!, {R4 - R7}          /* pop thread {R4 - R7} register from thread stack */
     PUSH    {R4 - R7}               /* push {R4 - R7} to MSP for copy {R8 - R11} */
 
     LDMIA   R1!, {R4 - R7}          /* pop thread {R8 - R11} high register from thread stack to {R4 - R7} */

+ 3 - 3
libcpu/arm/cortex-m0/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -46,8 +46,8 @@ rt_hw_interrupt_disable    PROC
 ; */
 rt_hw_interrupt_enable    PROC
     EXPORT  rt_hw_interrupt_enable
-    MSR		PRIMASK, r0
-    BX		LR
+    MSR     PRIMASK, r0
+    BX      LR
     ENDP
 
 ;/*

+ 10 - 10
libcpu/arm/cortex-m23/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2019, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -13,18 +13,18 @@
  * 2013-11-04   bright       fixed hardfault bug for gcc.
  * 2019-03-31   xuzhuoyi     port to Cortex-M23.
  */
- 
+
     .cpu    cortex-m23
     .fpu    softvfp
     .syntax unified
     .thumb
     .text
 
-	.equ 	SCB_VTOR, 0xE000ED08            /* Vector Table Offset Register */
-	.equ 	NVIC_INT_CTRL, 0xE000ED04       /* interrupt control state register */
-	.equ 	NVIC_SHPR3, 0xE000ED20          /* system priority register (3) */
-	.equ 	NVIC_PENDSV_PRI, 0xFFFF0000     /* PendSV and SysTick priority value (lowest) */
-	.equ 	NVIC_PENDSVSET, 0x10000000      /* value to trigger PendSV exception */
+    .equ    SCB_VTOR, 0xE000ED08            /* Vector Table Offset Register */
+    .equ    NVIC_INT_CTRL, 0xE000ED04       /* interrupt control state register */
+    .equ    NVIC_SHPR3, 0xE000ED20          /* system priority register (3) */
+    .equ    NVIC_PENDSV_PRI, 0xFFFF0000     /* PendSV and SysTick priority value (lowest) */
+    .equ    NVIC_PENDSVSET, 0x10000000      /* value to trigger PendSV exception */
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
@@ -91,7 +91,7 @@ PendSV_Handler:
     LDR     R0, =rt_thread_switch_interrupt_flag
     LDR     R1, [R0]
     CMP     R1, #0x00
-    BEQ     pendsv_exit		/* pendsv aLReady handled */
+    BEQ     pendsv_exit     /* pendsv aLReady handled */
 
     /* clear rt_thread_switch_interrupt_flag to 0 */
     MOVS    R1, #0
@@ -102,7 +102,7 @@ PendSV_Handler:
     CMP     R1, #0x00
     BEQ     switch_to_thread    /* skip register save at the first time */
 
-	MRS     R1, PSP                 /* get from thread stack pointer */
+    MRS     R1, PSP                 /* get from thread stack pointer */
 
     SUBS    R1, R1, #0x20           /* space for {R4 - R7} and {R8 - R11} */
     LDR     R0, [R0]
@@ -120,7 +120,7 @@ switch_to_thread:
     LDR     R1, [R1]
     LDR     R1, [R1]                /* load thread stack pointer */
 
-	LDMIA   R1!, {R4 - R7}          /* pop thread {R4 - R7} register from thread stack */
+    LDMIA   R1!, {R4 - R7}          /* pop thread {R4 - R7} register from thread stack */
     PUSH    {R4 - R7}               /* push {R4 - R7} to MSP for copy {R8 - R11} */
 
     LDMIA   R1!, {R4 - R7}          /* pop thread {R8 - R11} high register from thread stack to {R4 - R7} */

+ 3 - 3
libcpu/arm/cortex-m23/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2019, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -47,8 +47,8 @@ rt_hw_interrupt_disable    PROC
 ; */
 rt_hw_interrupt_enable    PROC
     EXPORT  rt_hw_interrupt_enable
-    MSR		PRIMASK, r0
-    BX		LR
+    MSR     PRIMASK, r0
+    BX      LR
     ENDP
 
 ;/*

+ 3 - 3
libcpu/arm/cortex-m3/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -12,7 +12,7 @@
  * 2013-06-18   aozima    add restore MSP feature.
  * 2013-07-09   aozima    enhancement hard fault exception handler.
  */
- 
+
     .cpu    cortex-m3
     .fpu    softvfp
     .syntax unified
@@ -22,7 +22,7 @@
     .equ    SCB_VTOR, 0xE000ED08            /* Vector Table Offset Register */
     .equ    ICSR, 0xE000ED04                /* interrupt control state register */
     .equ    PENDSVSET_BIT, 0x10000000       /* value to trigger PendSV exception */
-    
+
     .equ    SHPR3, 0xE000ED20               /* system priority register (3) */
     .equ    PENDSV_PRI_LOWEST, 0xFFFF0000   /* PendSV and SysTick priority value (lowest) */
 

+ 1 - 1
libcpu/arm/cortex-m3/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *

+ 1 - 1
libcpu/arm/cortex-m33/context_iar.S

@@ -272,7 +272,7 @@ HardFault_Handler:
 get_sp_done
 
     STMFD   r0!, {r4 - r11}                         ; push r4 - r11 register
-    
+
     LDR     r2,  =rt_trustzone_current_context      ; r2 = &rt_secure_current_context
     LDR     r2, [r2]                                ; r2 = *r2
     MOV     r3, lr                                  ; r3 = lr

+ 2 - 2
libcpu/arm/cortex-m33/syscall_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -19,7 +19,7 @@
 .global tzcall
 .type tzcall, %function
 tzcall:
-	SVC     1                       /* call SVC 1 */
+    SVC     1                       /* call SVC 1 */
     BX      LR
 
 tzcall_entry:

+ 2 - 2
libcpu/arm/cortex-m33/syscall_iar.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -23,7 +23,7 @@
 .global tzcall
 .type tzcall, %function
 tzcall:
-	SVC     1                       ;/* call SVC 1 */
+    SVC     1                       ;/* call SVC 1 */
     BX      LR
 
 tzcall_entry:

+ 4 - 4
libcpu/arm/cortex-m33/syscall_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -20,7 +20,7 @@
 ; */
 tzcall PROC
     EXPORT tzcall
-	SVC     1                       ;call SVC 1
+    SVC     1                       ;call SVC 1
     BX      LR
 
     ENDP
@@ -28,7 +28,7 @@ tzcall PROC
 tzcall_entry     PROC
     PUSH    {R1, R4, LR}
     MOV     R4, R1                  ; copy thread SP to R4
-    LDMFD   R4!, {r0 - r3}          ; pop user stack, get input arg0, arg1, arg2 
+    LDMFD   R4!, {r0 - r3}          ; pop user stack, get input arg0, arg1, arg2
     STMFD   R4!, {r0 - r3}          ; push stack, user stack recovery
     BL      rt_secure_svc_handle    ; call fun
     POP     {R1, R4, LR}
@@ -71,4 +71,4 @@ get_sp_done
 
     ALIGN
 
-    END
+    END

+ 3 - 3
libcpu/arm/cortex-m4/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -104,13 +104,13 @@ PendSV_Handler:
     CBZ r1, switch_to_thread    /* skip register save at the first time */
 
     MRS r1, psp                 /* get from thread stack pointer */
-    
+
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
     TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
     IT      EQ
     VSTMDBEQ r1!, {d8 - d15}    /* push FPU register s16~s31 */
 #endif
-    
+
     STMFD   r1!, {r4 - r11}     /* push r4 - r11 register */
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+ 2 - 2
libcpu/arm/cortex-m7/context_gcc.S

@@ -104,13 +104,13 @@ PendSV_Handler:
     CBZ r1, switch_to_thread    /* skip register save at the first time */
 
     MRS r1, psp                 /* get from thread stack pointer */
-    
+
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
     TST     lr, #0x10           /* if(!EXC_RETURN[4]) */
     IT      EQ
     VSTMDBEQ r1!, {d8 - d15}    /* push FPU register s16~s31 */
 #endif
-    
+
     STMFD   r1!, {r4 - r11}     /* push r4 - r11 register */
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)

+ 14 - 14
libcpu/arm/cortex-r4/context_ccs.asm

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -59,17 +59,17 @@ rt_hw_context_switch
     STMDB   sp!, {r4}           ; push cpsr
 
     .if (__TI_VFP_SUPPORT__)
-		VMRS    r4,  fpexc
+        VMRS    r4,  fpexc
         TST     r4,  #0x40000000
         BEQ     __no_vfp_frame1
-		VSTMDB  sp!, {d0-d15}
+        VSTMDB  sp!, {d0-d15}
         VMRS    r5, fpscr
         ; TODO: add support for Common VFPv3.
         ;       Save registers like FPINST, FPINST2
         STMDB   sp!, {r5}
 __no_vfp_frame1
         STMDB   sp!, {r4}
-	.endif
+    .endif
 
     STR     sp, [r0]            ; store sp in preempted tasks TCB
     LDR     sp, [r1]            ; get new task stack pointer
@@ -81,7 +81,7 @@ __no_vfp_frame1
         BEQ     __no_vfp_frame2
         LDMIA   sp!, {r1}       ; get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame2
     .endif
 
@@ -107,7 +107,7 @@ rt_hw_context_switch_to
         BEQ     __no_vfp_frame_to
         LDMIA   sp!, {r1}       ; get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_to
     .endif
 
@@ -143,17 +143,17 @@ IRQ_Handler
     STMDB   sp!, {r0-r12,lr}
 
     .if (__TI_VFP_SUPPORT__)
-		VMRS    r0,  fpexc
+        VMRS    r0,  fpexc
         TST     r0,  #0x40000000
         BEQ     __no_vfp_frame_str_irq
-		VSTMDB  sp!, {d0-d15}
+        VSTMDB  sp!, {d0-d15}
         VMRS    r1, fpscr
         ; TODO: add support for Common VFPv3.
         ;       Save registers like FPINST, FPINST2
         STMDB   sp!, {r1}
 __no_vfp_frame_str_irq
         STMDB   sp!, {r0}
-	.endif
+    .endif
 
     BL  rt_interrupt_enter
     BL  rt_hw_trap_irq
@@ -173,7 +173,7 @@ __no_vfp_frame_str_irq
         BEQ     __no_vfp_frame_ldr_irq
         LDMIA   sp!, {r1}       ; get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_ldr_irq
     .endif
 
@@ -195,7 +195,7 @@ rt_hw_context_switch_interrupt_do
         BEQ     __no_vfp_frame_do1
         LDMIA   sp!, {r1}       ; get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_do1
     .endif
 
@@ -219,7 +219,7 @@ __no_vfp_frame_do1
     STMDB   sp!, {r3}         ; push old task's cpsr
 
     .if (__TI_VFP_SUPPORT__)
-		VMRS    r0,  fpexc
+        VMRS    r0,  fpexc
         TST     r0,  #0x40000000
         BEQ     __no_vfp_frame_do2
         VSTMDB  sp!, {d0-d15}
@@ -229,7 +229,7 @@ __no_vfp_frame_do1
         STMDB   sp!, {r1}
 __no_vfp_frame_do2
         STMDB   sp!, {r0}
-	.endif
+    .endif
 
     LDR     r4,  pfromthread
     LDR     r5,  [r4]
@@ -246,7 +246,7 @@ __no_vfp_frame_do2
         BEQ     __no_vfp_frame_do3
         LDMIA   sp!, {r1}       ; get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_do3
     .endif
 

+ 11 - 11
libcpu/arm/cortex-r4/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -57,10 +57,10 @@ rt_hw_context_switch:
     STMDB   sp!, {r4}           @ push cpsr
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
-		VMRS    r4,  fpexc
+        VMRS    r4,  fpexc
         TST     r4,  #0x40000000
         BEQ     __no_vfp_frame1
-		VSTMDB  sp!, {d0-d15}
+        VSTMDB  sp!, {d0-d15}
         VMRS    r5, fpscr
         @ TODO: add support for Common VFPv3.
         @       Save registers like FPINST, FPINST2
@@ -79,7 +79,7 @@ __no_vfp_frame1:
         BEQ     __no_vfp_frame2
         LDMIA   sp!, {r1}       @ get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame2:
     #endif
 
@@ -103,7 +103,7 @@ rt_hw_context_switch_to:
         BEQ     __no_vfp_frame_to
         LDMIA   sp!, {r1}       @ get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_to:
 #endif
 
@@ -137,10 +137,10 @@ IRQ_Handler:
     STMDB   sp!, {r0-r12,lr}
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
-		VMRS    r0,  fpexc
+        VMRS    r0,  fpexc
         TST     r0,  #0x40000000
         BEQ     __no_vfp_frame_str_irq
-		VSTMDB  sp!, {d0-d15}
+        VSTMDB  sp!, {d0-d15}
         VMRS    r1, fpscr
         @ TODO: add support for Common VFPv3.
         @       Save registers like FPINST, FPINST2
@@ -167,7 +167,7 @@ __no_vfp_frame_str_irq:
         BEQ     __no_vfp_frame_ldr_irq
         LDMIA   sp!, {r1}       @ get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_ldr_irq:
 #endif
 
@@ -189,7 +189,7 @@ rt_hw_context_switch_interrupt_do:
         BEQ     __no_vfp_frame_do1
         LDMIA   sp!, {r1}       @ get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_do1:
 #endif
 
@@ -213,7 +213,7 @@ __no_vfp_frame_do1:
     STMDB   sp!, {r3}         @ push old task's cpsr
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__) && defined(RT_VFP_LAZY_STACKING)
-		VMRS    r0,  fpexc
+        VMRS    r0,  fpexc
         TST     r0,  #0x40000000
         BEQ     __no_vfp_frame_do2
         VSTMDB  sp!, {d0-d15}
@@ -240,7 +240,7 @@ __no_vfp_frame_do2:
         BEQ     __no_vfp_frame_do3
         LDMIA   sp!, {r1}       @ get fpscr
         VMSR    fpscr, r1
-		VLDMIA  sp!, {d0-d15}
+        VLDMIA  sp!, {d0-d15}
 __no_vfp_frame_do3:
 #endif
 

+ 13 - 13
libcpu/arm/cortex-r4/start_ccs.asm

@@ -502,42 +502,42 @@ _push_svc_reg    .macro
         cps     #0x13
         str     sp, [r0, #13*4]         ;/* Save calling SP                 */
         str     lr, [r0, #14*4]         ;/* Save calling PC                 */
-	.endm
+    .endm
 
-	.ref    rt_hw_trap_svc
-    .def	vector_svc
+    .ref    rt_hw_trap_svc
+    .def    vector_svc
     .asmfunc
 vector_svc:
         _push_svc_reg
         bl      rt_hw_trap_svc
-		sub     pc, pc, #-4
+        sub     pc, pc, #-4
     .endasmfunc
 
-	.ref    rt_hw_trap_pabt
-    .def	vector_pabort
+    .ref    rt_hw_trap_pabt
+    .def    vector_pabort
     .asmfunc
 vector_pabort:
         _push_svc_reg
         bl      rt_hw_trap_pabt
-		sub     pc, pc, #-4
+        sub     pc, pc, #-4
     .endasmfunc
 
-	.ref    rt_hw_trap_dabt
-    .def	vector_dabort
+    .ref    rt_hw_trap_dabt
+    .def    vector_dabort
     .asmfunc
 vector_dabort:
         _push_svc_reg
         bl      rt_hw_trap_dabt
-		sub     pc, pc, #-4
+        sub     pc, pc, #-4
     .endasmfunc
 
-	.ref    rt_hw_trap_resv
-    .def	vector_resv
+    .ref    rt_hw_trap_resv
+    .def    vector_resv
     .asmfunc
 vector_resv:
         _push_svc_reg
         bl      rt_hw_trap_resv
-		sub     pc, pc, #-4
+        sub     pc, pc, #-4
     .endasmfunc
 
 ;-------------------------------------------------------------------------------

+ 9 - 9
libcpu/arm/cortex-r4/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -462,26 +462,26 @@ turnon_VFP:
         str     lr, [r0, #14*4]         @/* Save calling PC                 */
     .endm
 
-    .globl	vector_svc
+    .globl  vector_svc
 vector_svc:
         push_svc_reg
         bl      rt_hw_trap_svc
-		b       .
+        b       .
 
-    .globl	vector_pabort
+    .globl  vector_pabort
 vector_pabort:
         push_svc_reg
         bl      rt_hw_trap_pabt
-		b       .
+        b       .
 
-    .globl	vector_dabort
+    .globl  vector_dabort
 vector_dabort:
         push_svc_reg
         bl      rt_hw_trap_dabt
-		b       .
+        b       .
 
-    .globl	vector_resv
+    .globl  vector_resv
 vector_resv:
         push_svc_reg
         bl      rt_hw_trap_resv
-		b       .
+        b       .

+ 61 - 61
libcpu/arm/dm36x/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -8,96 +8,96 @@
 ; * 2011-08-14     weety    copy from mini2440
 ; */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	STMFD	sp!, {r4}			; push cpsr
-	MRS		r4, spsr
-	STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR spsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}^	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 64 - 64
libcpu/arm/lpc214x/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -19,22 +19,22 @@
  关闭中断,关闭前返回CPSR寄存器值
  */
 rt_hw_interrupt_disable:
-	//EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	//ENDP
+    //EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    //ENDP
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
   恢复中断状态
  */
 rt_hw_interrupt_enable:
-	//EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	//ENDP
+    //EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    //ENDP
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -43,68 +43,68 @@ rt_hw_interrupt_enable:
  进行线程的上下文切换
  */
 rt_hw_context_switch:
-	//EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			/* push pc (lr should be pushed in place of PC) */
-							    /* 把LR寄存器压入栈(这个函数返回后的下一个执行处) */
-	STMFD	sp!, {r0-r12, lr}	/* push lr & register file */
-								/*  把R0 – R12以及LR压入栈 */
+    //EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           /* push pc (lr should be pushed in place of PC) */
+                                /* 把LR寄存器压入栈(这个函数返回后的下一个执行处) */
+    STMFD   sp!, {r0-r12, lr}   /* push lr & register file */
+                                /*  把R0 – R12以及LR压入栈 */
 
-	MRS		r4, cpsr			/*  读取CPSR寄存器到R4寄存器 */
-	STMFD	sp!, {r4}			/* push cpsr */
-							    /* 把R4寄存器压栈(即上一指令取出的CPSR寄存器) */
-	MRS		r4, spsr		    /* 读取SPSR寄存器到R4寄存器 */
-	STMFD	sp!, {r4}			/* push spsr */
-								/* 把R4寄存器压栈(即SPSR寄存器) */
+    MRS     r4, cpsr            /*  读取CPSR寄存器到R4寄存器 */
+    STMFD   sp!, {r4}           /* push cpsr */
+                                /* 把R4寄存器压栈(即上一指令取出的CPSR寄存器) */
+    MRS     r4, spsr            /* 读取SPSR寄存器到R4寄存器 */
+    STMFD   sp!, {r4}           /* push spsr */
+                                /* 把R4寄存器压栈(即SPSR寄存器) */
 
-	STR	sp, [r0]				/* store sp in preempted tasks TCB */
-								/*  把栈指针更新到TCB的sp,是由R0传入此函数 */
-								/*  到这里换出线程的上下文都保存在栈中 */
-	LDR	sp, [r1]				/* get new task stack pointer */
-								/*  载入切换到线程的TCB的sp */
-								/*  从切换到线程的栈中恢复上下文,次序和保存的时候刚好相反 */
+    STR sp, [r0]                /* store sp in preempted tasks TCB */
+                                /*  把栈指针更新到TCB的sp,是由R0传入此函数 */
+                                /*  到这里换出线程的上下文都保存在栈中 */
+    LDR sp, [r1]                /* get new task stack pointer */
+                                /*  载入切换到线程的TCB的sp */
+                                /*  从切换到线程的栈中恢复上下文,次序和保存的时候刚好相反 */
 
-	LDMFD	sp!, {r4}			/* pop new task spsr */
-								/* 出栈到R4寄存器(保存了SPSR寄存器) */
-	MSR	spsr_cxsf, r4			/* 恢复SPSR寄存器 */
-	LDMFD	sp!, {r4}			/* pop new task cpsr */
-								/* 出栈到R4寄存器(保存了CPSR寄存器) */
-	MSR	cpsr_cxsf, r4			/*  恢复CPSR寄存器 */
+    LDMFD   sp!, {r4}           /* pop new task spsr */
+                                /* 出栈到R4寄存器(保存了SPSR寄存器) */
+    MSR spsr_cxsf, r4           /* 恢复SPSR寄存器 */
+    LDMFD   sp!, {r4}           /* pop new task cpsr */
+                                /* 出栈到R4寄存器(保存了CPSR寄存器) */
+    MSR cpsr_cxsf, r4           /*  恢复CPSR寄存器 */
 
-	LDMFD	sp!, {r0-r12, lr, pc}	/* pop new task r0-r12, lr & pc */
-									/* 对R0 – R12及LR、PC进行恢复 */
-	//ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   /* pop new task r0-r12, lr & pc */
+                                    /* 对R0 – R12及LR、PC进行恢复 */
+    //ENDP
 
 rt_hw_context_switch_to:
-	//EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				/* get new task stack pointer */
-								/* 获得切换到线程的SP指针 */
+    //EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                /* get new task stack pointer */
+                                /* 获得切换到线程的SP指针 */
 
-	LDMFD	sp!, {r4}			/* pop new task spsr */
-								/* 出栈R4寄存器(保存了SPSR寄存器值) */
-	MSR	spsr_cxsf, r4			/* 恢复SPSR寄存器 */
-	LDMFD	sp!, {r4}			/* pop new task cpsr */
-								/* 出栈R4寄存器(保存了CPSR寄存器值) */
-	MSR	cpsr_cxsf, r4			/* 恢复CPSR寄存器 */
+    LDMFD   sp!, {r4}           /* pop new task spsr */
+                                /* 出栈R4寄存器(保存了SPSR寄存器值) */
+    MSR spsr_cxsf, r4           /* 恢复SPSR寄存器 */
+    LDMFD   sp!, {r4}           /* pop new task cpsr */
+                                /* 出栈R4寄存器(保存了CPSR寄存器值) */
+    MSR cpsr_cxsf, r4           /* 恢复CPSR寄存器 */
 
-	LDMFD	sp!, {r0-r12, lr, pc}	/* pop new task r0-r12, lr & pc */
-									/* 恢复R0 – R12,LR及PC寄存器 */
-	//ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   /* pop new task r0-r12, lr & pc */
+                                    /* 恢复R0 – R12,LR及PC寄存器 */
+    //ENDP
 
 rt_hw_context_switch_interrupt:
-	//EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]						/* 载入中断中切换标致地址 */
-	CMP r3, #1							/* 等于 1 ?*/
-	BEQ _reswitch						/* 如果等于1,跳转到_reswitch*/
-	MOV r3, #1							/* set rt_thread_switch_interrupt_flag to 1*/
-										/* 设置中断中切换标志位1 */
-	STR r3, [r2]						/* */
-	LDR r2, =rt_interrupt_from_thread	/* set rt_interrupt_from_thread*/
-	STR r0, [r2]						/* 保存切换出线程栈指针*/
+    //EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]                        /* 载入中断中切换标致地址 */
+    CMP r3, #1                          /* 等于 1 ?*/
+    BEQ _reswitch                       /* 如果等于1,跳转到_reswitch*/
+    MOV r3, #1                          /* set rt_thread_switch_interrupt_flag to 1*/
+                                        /* 设置中断中切换标志位1 */
+    STR r3, [r2]                        /* */
+    LDR r2, =rt_interrupt_from_thread   /* set rt_interrupt_from_thread*/
+    STR r0, [r2]                        /* 保存切换出线程栈指针*/
 _reswitch:
-	LDR r2, =rt_interrupt_to_thread		/* set rt_interrupt_to_thread*/
-	STR r1, [r2]						/* 保存切换到线程栈指针*/
-	BX	lr
-	//ENDP
+    LDR r2, =rt_interrupt_to_thread     /* set rt_interrupt_to_thread*/
+    STR r1, [r2]                        /* 保存切换到线程栈指针*/
+    BX  lr
+    //ENDP
 
-	//END
+    //END

+ 94 - 94
libcpu/arm/lpc214x/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -20,141 +20,141 @@ Mode_SYS        EQU     0x1F
 I_Bit           EQU     0x80            ; when I bit is set, IRQ is disabled
 F_Bit           EQU     0x40            ; when F bit is set, FIQ is disabled
 
-NOINT	        EQU     0xc0	; disable interrupt in psr
+NOINT           EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
+    MRS     r4, cpsr
         TST     lr, #0x01
         BEQ     _ARM_MODE
         ORR     r4, r4, #0x20           ; it's thumb code
 _ARM_MODE
-	STMFD	sp!, {r4}			; push cpsr
+    STMFD   sp!, {r4}           ; push cpsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
     LDMFD   sp!, {r4}               ; pop new task cpsr to spsr
-	MSR	spsr_cxsf, r4
+    MSR spsr_cxsf, r4
     BIC     r4, r4, #0x20           ; must be ARM mode
-	MSR	cpsr_cxsf, r4
+    MSR cpsr_cxsf, r4
 
     LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc, copy spsr to cpsr
-	ENDP
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
     LDMFD   sp!, {r4}               ; pop new task cpsr to spsr
-	MSR	spsr_cxsf, r4
+    MSR spsr_cxsf, r4
     BIC     r4, r4, #0x20           ; must be ARM mode
-	MSR	cpsr_cxsf, r4
+    MSR cpsr_cxsf, r4
 
     LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc, copy spsr to cpsr
-	ENDP
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
-
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
+
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
 ; /*
 ; * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
 ; */
-rt_hw_context_switch_interrupt_do	PROC
-	EXPORT rt_hw_context_switch_interrupt_do
-	MOV		r1,  #0			; clear flag
-	STR		r1,  [r0]
-
-	LDMFD	sp!, {r0-r12,lr}; reload saved registers
-	STMFD	sp!, {r0-r3}	; save r0-r3
-	MOV		r1,  sp
-	ADD		sp,  sp, #16	; restore sp
-	SUB		r2,  lr, #4		; save old task's pc to r2
-
-	MRS		r3,  spsr		; get cpsr of interrupt thread
-
-	; switch to SVC mode and no interrupt
-	MSR     cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
-
-	STMFD	sp!, {r2}		; push old task's pc
-	STMFD	sp!, {r4-r12,lr}; push old task's lr,r12-r4
-	MOV		r4,  r1			; Special optimised code below
-	MOV		r5,  r3
-	LDMFD	r4!, {r0-r3}
-	STMFD	sp!, {r0-r3}	; push old task's r3-r0
-	STMFD	sp!, {r5}		; push old task's cpsr
-
-	LDR		r4,  =rt_interrupt_from_thread
-	LDR		r5,  [r4]
-	STR		sp,  [r5]		; store sp in preempted tasks's TCB
-
-	LDR		r6,  =rt_interrupt_to_thread
-	LDR		r6,  [r6]
-	LDR		sp,  [r6]		; get new task's stack pointer
-	
-	LDMFD   sp!, {r4}       ; pop new task's cpsr to spsr
-	MSR		spsr_cxsf, r4
-	BIC     r4, r4, #0x20   ; must be ARM mode
-	MSR		cpsr_cxsf, r4
-
-	LDMFD   sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
-	ENDP
-
-	END
+rt_hw_context_switch_interrupt_do   PROC
+    EXPORT rt_hw_context_switch_interrupt_do
+    MOV     r1,  #0         ; clear flag
+    STR     r1,  [r0]
+
+    LDMFD   sp!, {r0-r12,lr}; reload saved registers
+    STMFD   sp!, {r0-r3}    ; save r0-r3
+    MOV     r1,  sp
+    ADD     sp,  sp, #16    ; restore sp
+    SUB     r2,  lr, #4     ; save old task's pc to r2
+
+    MRS     r3,  spsr       ; get cpsr of interrupt thread
+
+    ; switch to SVC mode and no interrupt
+    MSR     cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
+
+    STMFD   sp!, {r2}       ; push old task's pc
+    STMFD   sp!, {r4-r12,lr}; push old task's lr,r12-r4
+    MOV     r4,  r1         ; Special optimised code below
+    MOV     r5,  r3
+    LDMFD   r4!, {r0-r3}
+    STMFD   sp!, {r0-r3}    ; push old task's r3-r0
+    STMFD   sp!, {r5}       ; push old task's cpsr
+
+    LDR     r4,  =rt_interrupt_from_thread
+    LDR     r5,  [r4]
+    STR     sp,  [r5]       ; store sp in preempted tasks's TCB
+
+    LDR     r6,  =rt_interrupt_to_thread
+    LDR     r6,  [r6]
+    LDR     sp,  [r6]       ; get new task's stack pointer
+
+    LDMFD   sp!, {r4}       ; pop new task's cpsr to spsr
+    MSR     spsr_cxsf, r4
+    BIC     r4, r4, #0x20   ; must be ARM mode
+    MSR     cpsr_cxsf, r4
+
+    LDMFD   sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr
+    ENDP
+
+    END

+ 171 - 171
libcpu/arm/lpc214x/startup_gcc.S

@@ -1,25 +1,25 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Change Logs:
  * Date           Author       Notes
  */
-	.extern main                               /* 引入外部C入口 */
+    .extern main                               /* 引入外部C入口 */
 
-	.extern rt_interrupt_enter
-	.extern rt_interrupt_leave
-	.extern rt_thread_switch_interrupt_flag
-	.extern rt_interrupt_from_thread
-	.extern rt_interrupt_to_thread
-	.extern rt_hw_trap_irq
+    .extern rt_interrupt_enter
+    .extern rt_interrupt_leave
+    .extern rt_thread_switch_interrupt_flag
+    .extern rt_interrupt_from_thread
+    .extern rt_interrupt_to_thread
+    .extern rt_hw_trap_irq
 
-	.global start
-	.global endless_loop
+    .global start
+    .global endless_loop
     .global rt_hw_context_switch_interrupt_do
 
-	/* Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs */
+    /* Standard definitions of Mode bits and Interrupt (I & F) flags in PSRs */
     .set  MODE_USR, 0x10            /* User Mode */
     .set  MODE_FIQ, 0x11            /* FIQ Mode */
     .set  MODE_IRQ, 0x12            /* IRQ Mode */
@@ -34,107 +34,107 @@
     .equ  F_Bit, 0x40               /* when F bit is set, FIQ is disabled */
 
     /* VPBDIV definitions*/
-    .equ  VPBDIV, 		0xE01FC100
-    .set  VPBDIV_VALUE,	0x00000000
+    .equ  VPBDIV,       0xE01FC100
+    .set  VPBDIV_VALUE, 0x00000000
 
     /* Phase Locked Loop (PLL) definitions*/
-    .equ  PLL_BASE, 	 0xE01FC080  /* PLL Base Address */
-    .equ  PLLCON_OFS, 	 0x00        /* PLL Control Offset */
-    .equ  PLLCFG_OFS, 	 0x04        /* PLL Configuration Offset */
-    .equ  PLLSTAT_OFS, 	 0x08        /* PLL Status Offset */
+    .equ  PLL_BASE,      0xE01FC080  /* PLL Base Address */
+    .equ  PLLCON_OFS,    0x00        /* PLL Control Offset */
+    .equ  PLLCFG_OFS,    0x04        /* PLL Configuration Offset */
+    .equ  PLLSTAT_OFS,   0x08        /* PLL Status Offset */
     .equ  PLLFEED_OFS,   0x0C        /* PLL Feed Offset */
-	.equ  PLLCON_PLLE,   (1<<0)      /* PLL Enable */
-	.equ  PLLCON_PLLC,   (1<<1)      /* PLL Connect */
-	.equ  PLLCFG_MSEL,   (0x1F<<0)   /* PLL Multiplier */
-	.equ  PLLCFG_PSEL,   (0x03<<5)   /* PLL Divider */
-	.equ  PLLSTAT_PLOCK, (1<<10)     /* PLL Lock Status */
-    .equ  PLLCFG_Val,	 0x00000024  /* <o1.0..4>   MSEL: PLL Multiplier Selection,<o1.5..6>   PSEL: PLL Divider Selection */
+    .equ  PLLCON_PLLE,   (1<<0)      /* PLL Enable */
+    .equ  PLLCON_PLLC,   (1<<1)      /* PLL Connect */
+    .equ  PLLCFG_MSEL,   (0x1F<<0)   /* PLL Multiplier */
+    .equ  PLLCFG_PSEL,   (0x03<<5)   /* PLL Divider */
+    .equ  PLLSTAT_PLOCK, (1<<10)     /* PLL Lock Status */
+    .equ  PLLCFG_Val,    0x00000024  /* <o1.0..4>   MSEL: PLL Multiplier Selection,<o1.5..6>   PSEL: PLL Divider Selection */
 
-    .equ  MEMMAP,		0xE01FC040     /*Memory Mapping Control*/
+    .equ  MEMMAP,       0xE01FC040     /*Memory Mapping Control*/
 
 
     /* Memory Accelerator Module (MAM) definitions*/
-    .equ  MAM_BASE, 	0xE01FC000
-    .equ  MAMCR_OFS, 	0x00
-    .equ  MAMTIM_OFS, 	0x04
+    .equ  MAM_BASE,     0xE01FC000
+    .equ  MAMCR_OFS,    0x00
+    .equ  MAMTIM_OFS,   0x04
     .equ  MAMCR_Val,    0x00000002
     .equ  MAMTIM_Val,   0x00000004
 
-    .equ  VICIntEnClr,	0xFFFFF014
-    .equ  VICIntSelect,	0xFFFFF00C
+    .equ  VICIntEnClr,  0xFFFFF014
+    .equ  VICIntSelect, 0xFFFFF00C
 /************* 目标配置结束 *************/
 
 
 /* Setup the operating mode & stack.*/
 /* --------------------------------- */
-	.global _reset
+    .global _reset
 _reset:
-	.code 32
-	.align 0
+    .code 32
+    .align 0
 
 /************************* PLL_SETUP **********************************/
-		ldr     r0, =PLL_BASE
-		mov		r1, #0xAA
-		mov		r2, #0x55
+        ldr     r0, =PLL_BASE
+        mov     r1, #0xAA
+        mov     r2, #0x55
 
 /* Configure and Enable PLL */
-		mov     r3, #PLLCFG_Val
-		str     r3, [r0, #PLLCFG_OFS]
-		mov     r3, #PLLCON_PLLE
-		str     r3, [r0, #PLLCON_OFS]
-		str     r1, [r0, #PLLFEED_OFS]
-		str     r2, [r0, #PLLFEED_OFS]
+        mov     r3, #PLLCFG_Val
+        str     r3, [r0, #PLLCFG_OFS]
+        mov     r3, #PLLCON_PLLE
+        str     r3, [r0, #PLLCON_OFS]
+        str     r1, [r0, #PLLFEED_OFS]
+        str     r2, [r0, #PLLFEED_OFS]
 
 /*  Wait until PLL Locked */
 PLL_Locked_loop:
-		ldr     r3, [r0, #PLLSTAT_OFS]
-		ands    r3, r3, #PLLSTAT_PLOCK
-		beq     PLL_Locked_loop
+        ldr     r3, [r0, #PLLSTAT_OFS]
+        ands    r3, r3, #PLLSTAT_PLOCK
+        beq     PLL_Locked_loop
 
 /*  Switch to PLL Clock */
-		mov     r3, #(PLLCON_PLLE|PLLCON_PLLC)
-		str     r3, [r0, #PLLCON_OFS]
-		str     r1, [r0, #PLLFEED_OFS]
-		str     R2, [r0, #PLLFEED_OFS]
+        mov     r3, #(PLLCON_PLLE|PLLCON_PLLC)
+        str     r3, [r0, #PLLCON_OFS]
+        str     r1, [r0, #PLLFEED_OFS]
+        str     R2, [r0, #PLLFEED_OFS]
 /************************* PLL_SETUP **********************************/
 
 /************************ Setup VPBDIV ********************************/
-		ldr		r0, =VPBDIV
-		ldr     r1, =VPBDIV_VALUE
-		str		r1, [r0]
+        ldr     r0, =VPBDIV
+        ldr     r1, =VPBDIV_VALUE
+        str     r1, [r0]
 /************************ Setup VPBDIV ********************************/
 
 /************** Setup MAM **************/
-		ldr		r0, =MAM_BASE
-		mov		r1, #MAMTIM_Val
-		str		r1, [r0, #MAMTIM_OFS]
-		mov		r1, #MAMCR_Val
-		str		r1, [r0, #MAMCR_OFS]
+        ldr     r0, =MAM_BASE
+        mov     r1, #MAMTIM_Val
+        str     r1, [r0, #MAMTIM_OFS]
+        mov     r1, #MAMCR_Val
+        str     r1, [r0, #MAMCR_OFS]
 /************** Setup MAM **************/
 
 /************************ setup stack *********************************/
     ldr   r0, .undefined_stack_top
-	sub   r0, r0, #4
+    sub   r0, r0, #4
     msr   CPSR_c, #MODE_UND|I_BIT|F_BIT /* Undefined Instruction Mode */
     mov   sp, r0
 
     ldr   r0, .abort_stack_top
-	sub   r0, r0, #4
+    sub   r0, r0, #4
     msr   CPSR_c, #MODE_ABT|I_BIT|F_BIT /* Abort Mode */
     mov   sp, r0
 
     ldr   r0, .fiq_stack_top
-	sub   r0, r0, #4
+    sub   r0, r0, #4
     msr   CPSR_c, #MODE_FIQ|I_BIT|F_BIT /* FIQ Mode */
     mov   sp, r0
 
     ldr   r0, .irq_stack_top
-	sub   r0, r0, #4
+    sub   r0, r0, #4
     msr   CPSR_c, #MODE_IRQ|I_BIT|F_BIT /* IRQ Mode */
     mov   sp, r0
 
     ldr   r0, .svc_stack_top
-	sub   r0, r0, #4
+    sub   r0, r0, #4
     msr   CPSR_c, #MODE_SVC|I_BIT|F_BIT  /* Supervisor Mode */
     mov   sp, r0
 /************************ setup stack ********************************/
@@ -152,7 +152,7 @@ data_loop:
 
     cmp     r3, r2                   /* check if data to clear */
     blo     data_loop                /* loop until done        */
-	
+
     /* clear .bss */
     mov     r0,#0                   /* get a zero */
     ldr     r1,=__bss_start         /* bss start  */
@@ -179,20 +179,20 @@ ctor_loop:
     b       ctor_loop
 ctor_end:
 
-	/* enter C code */
-	bl		main
-
-	.align 0
-	.undefined_stack_top:
-	.word   _undefined_stack_top
-	.abort_stack_top:
-	.word   _abort_stack_top
-	.fiq_stack_top:
-	.word   _fiq_stack_top
-	.irq_stack_top:
-	.word   _irq_stack_top
-	.svc_stack_top:
-	.word   _svc_stack_top
+    /* enter C code */
+    bl      main
+
+    .align 0
+    .undefined_stack_top:
+    .word   _undefined_stack_top
+    .abort_stack_top:
+    .word   _abort_stack_top
+    .fiq_stack_top:
+    .word   _fiq_stack_top
+    .irq_stack_top:
+    .word   _irq_stack_top
+    .svc_stack_top:
+    .word   _svc_stack_top
 /*********************** END Clear BSS  ******************************/
 
 .section .init,"ax"
@@ -201,116 +201,116 @@ ctor_end:
 .globl _start
 _start:
 
-	ldr   pc, __start					/* reset - _start			*/
-	ldr   pc, _undf						/* undefined - _undf		*/
-	ldr   pc, _swi			    		/* SWI - _swi				*/
-	ldr   pc, _pabt						/* program abort - _pabt	*/
-	ldr   pc, _dabt						/* data abort - _dabt		*/
-	.word 0xB8A06F58					/* reserved                 */
-	ldr   pc, __IRQ_Handler				/* IRQ - read the VIC		*/
-	ldr   pc, _fiq						/* FIQ - _fiq				*/
+    ldr   pc, __start                   /* reset - _start           */
+    ldr   pc, _undf                     /* undefined - _undf        */
+    ldr   pc, _swi                      /* SWI - _swi               */
+    ldr   pc, _pabt                     /* program abort - _pabt    */
+    ldr   pc, _dabt                     /* data abort - _dabt       */
+    .word 0xB8A06F58                    /* reserved                 */
+    ldr   pc, __IRQ_Handler             /* IRQ - read the VIC       */
+    ldr   pc, _fiq                      /* FIQ - _fiq               */
 
 __start:.word _reset
-_undf:  .word __undf                    /* undefined				*/
-_swi:   .word __swi                     /* SWI						*/
-_pabt:  .word __pabt                    /* program abort			*/
-_dabt:  .word __dabt                    /* data abort				*/
+_undf:  .word __undf                    /* undefined                */
+_swi:   .word __swi                     /* SWI                      */
+_pabt:  .word __pabt                    /* program abort            */
+_dabt:  .word __dabt                    /* data abort               */
 temp1:  .word 0
 __IRQ_Handler:  .word IRQ_Handler
-_fiq:   .word __fiq                     /* FIQ						*/
+_fiq:   .word __fiq                     /* FIQ                      */
 
-__undf: b     .                         /* undefined				*/
+__undf: b     .                         /* undefined                */
 __swi : b     .
-__pabt: b     .                         /* program abort			*/
-__dabt: b     .                         /* data abort				*/
-__fiq : b     .  					    /* FIQ						*/
+__pabt: b     .                         /* program abort            */
+__dabt: b     .                         /* data abort               */
+__fiq : b     .                         /* FIQ                      */
 
 /* IRQ入口 */
 IRQ_Handler :
-		stmfd	sp!, {r0-r12,lr} 			   /* 对R0 – R12,LR寄存器压栈      */
-		bl	rt_interrupt_enter	 			   /* 通知RT-Thread进入中断模式     */
-		bl	rt_hw_trap_irq		 			   /* 相应中断服务例程处理  	    */
-		bl	rt_interrupt_leave		           /* ; 通知RT-Thread要离开中断模式 */
-
-		/* 如果设置了rt_thread_switch_interrupt_flag,进行中断中的线程上下文处理 */
-		ldr	r0, =rt_thread_switch_interrupt_flag
-		ldr	r1, [r0]
-		cmp	r1, #1
-		beq	rt_hw_context_switch_interrupt_do  /* 中断中切换发生 */
-											   /* 如果跳转了,将不会回来 */
-		ldmfd	sp!, {r0-r12,lr}			   /* 恢复栈 */
-		subs	pc, lr, #4					   /* 从IRQ中返回 */
+        stmfd   sp!, {r0-r12,lr}               /* 对R0 – R12,LR寄存器压栈      */
+        bl  rt_interrupt_enter                 /* 通知RT-Thread进入中断模式     */
+        bl  rt_hw_trap_irq                     /* 相应中断服务例程处理        */
+        bl  rt_interrupt_leave                 /* ; 通知RT-Thread要离开中断模式 */
+
+        /* 如果设置了rt_thread_switch_interrupt_flag,进行中断中的线程上下文处理 */
+        ldr r0, =rt_thread_switch_interrupt_flag
+        ldr r1, [r0]
+        cmp r1, #1
+        beq rt_hw_context_switch_interrupt_do  /* 中断中切换发生 */
+                                               /* 如果跳转了,将不会回来 */
+        ldmfd   sp!, {r0-r12,lr}               /* 恢复栈 */
+        subs    pc, lr, #4                     /* 从IRQ中返回 */
 
 /*
 * void rt_hw_context_switch_interrupt_do(rt_base_t flag)
 * 中断结束后的上下文切换
 */
 rt_hw_context_switch_interrupt_do:
-				mov	r1,  #0				/* clear flag */
-										/* 清楚中断中切换标志 */
-				str	r1,  [r0]			/* */
-
-				ldmfd	sp!, {r0-r12,lr}/* reload saved registers */
-										/* 先恢复被中断线程的上下文 */
-				stmfd	sp!, {r0-r3}	/* save r0-r3 */
-										/* 对R0 – R3压栈,因为后面会用到 */
-				mov	r1,  sp				/* 把此处的栈值保存到R1 */
-				add	sp,  sp, #16		/* restore sp */
-										/* 恢复IRQ的栈,后面会跳出IRQ模式 */
-				sub	r2,  lr, #4			/* save old task's pc to r2 */
-										/* 保存切换出线程的PC到R2 */
-
-				mrs	r3,  spsr			/* disable interrupt 保存中断前的CPSR到R3寄存器 */
-										/* 获得SPSR寄存器值 */
-				orr	r0,  r3, #I_BIT|F_BIT
-				msr	spsr_c, r0			/*  关闭SPSR中的IRQ/FIQ中断 */
-
-				ldr	r0,  =.+8		    /* 把当前地址+8载入到R0寄存器中 switch to interrupted task's stack */
-				movs pc,  r0            /* 退出IRQ模式,由于SPSR被设置成关中断模式 */
-										/* 所以从IRQ返回后,中断并没有打开
-										; R0寄存器中的位置实际就是下一条指令,
-										; 即PC继续往下走
-										; 此时
-										; 模式已经换成中断前的SVC模式,
-										; SP寄存器也是SVC模式下的栈寄存器
-										; R1保存IRQ模式下的栈指针
-										; R2保存切换出线程的PC
-										; R3保存切换出线程的CPSR */
-				stmfd	sp!, {r2}		/* push old task's pc */
-										/* 保存切换出任务的PC */
-				stmfd	sp!, {r4-r12,lr}/* push old task's lr,r12-r4 */
-										/* 保存R4 – R12,LR寄存器 */
-				mov	r4,  r1				/* Special optimised code below */
-										/* R1保存有压栈R0 – R3处的栈位置 */
-				mov	r5,  r3				/* R3切换出线程的CPSR */
-				ldmfd	r4!, {r0-r3}	/* 恢复R0 – R3 */
-				stmfd	sp!, {r0-r3}	/* push old task's r3-r0 */
-										/*  R0 – R3压栈到切换出线程 */
-				stmfd	sp!, {r5}		/* push old task's psr */
-										/* 切换出线程CPSR压栈 */
-				mrs	r4,  spsr
-				stmfd	sp!, {r4}		/* push old task's spsr */
-										/* 切换出线程SPSR压栈 */
-
-				ldr	r4,  =rt_interrupt_from_thread
-				ldr	r5,  [r4]
-				str	sp,  [r5]			/* store sp in preempted tasks's TCB */
-										/* 保存切换出线程的SP指针 */
-
-				ldr	r6,  =rt_interrupt_to_thread
-				ldr	r6,  [r6]
-				ldr	sp,  [r6]			/* get new task's stack pointer */
-										/* 获得切换到线程的栈 */
-
-				ldmfd	sp!, {r4}		/* pop new task's spsr */
-										/* 恢复SPSR */
-				msr	SPSR_cxsf, r4
-				ldmfd	sp!, {r4}		/* pop new task's psr */
-										/* 恢复CPSR */
-				msr	CPSR_cxsf, r4
-
-				ldmfd	sp!, {r0-r12,lr,pc}	/* pop new task's r0-r12,lr & pc */
-											/* 恢复R0 – R12,LR及PC寄存器 */
+                mov r1,  #0             /* clear flag */
+                                        /* 清楚中断中切换标志 */
+                str r1,  [r0]           /* */
+
+                ldmfd   sp!, {r0-r12,lr}/* reload saved registers */
+                                        /* 先恢复被中断线程的上下文 */
+                stmfd   sp!, {r0-r3}    /* save r0-r3 */
+                                        /* 对R0 – R3压栈,因为后面会用到 */
+                mov r1,  sp             /* 把此处的栈值保存到R1 */
+                add sp,  sp, #16        /* restore sp */
+                                        /* 恢复IRQ的栈,后面会跳出IRQ模式 */
+                sub r2,  lr, #4         /* save old task's pc to r2 */
+                                        /* 保存切换出线程的PC到R2 */
+
+                mrs r3,  spsr           /* disable interrupt 保存中断前的CPSR到R3寄存器 */
+                                        /* 获得SPSR寄存器值 */
+                orr r0,  r3, #I_BIT|F_BIT
+                msr spsr_c, r0          /*  关闭SPSR中的IRQ/FIQ中断 */
+
+                ldr r0,  =.+8           /* 把当前地址+8载入到R0寄存器中 switch to interrupted task's stack */
+                movs pc,  r0            /* 退出IRQ模式,由于SPSR被设置成关中断模式 */
+                                        /* 所以从IRQ返回后,中断并没有打开
+                                        ; R0寄存器中的位置实际就是下一条指令,
+                                        ; 即PC继续往下走
+                                        ; 此时
+                                        ; 模式已经换成中断前的SVC模式,
+                                        ; SP寄存器也是SVC模式下的栈寄存器
+                                        ; R1保存IRQ模式下的栈指针
+                                        ; R2保存切换出线程的PC
+                                        ; R3保存切换出线程的CPSR */
+                stmfd   sp!, {r2}       /* push old task's pc */
+                                        /* 保存切换出任务的PC */
+                stmfd   sp!, {r4-r12,lr}/* push old task's lr,r12-r4 */
+                                        /* 保存R4 – R12,LR寄存器 */
+                mov r4,  r1             /* Special optimised code below */
+                                        /* R1保存有压栈R0 – R3处的栈位置 */
+                mov r5,  r3             /* R3切换出线程的CPSR */
+                ldmfd   r4!, {r0-r3}    /* 恢复R0 – R3 */
+                stmfd   sp!, {r0-r3}    /* push old task's r3-r0 */
+                                        /*  R0 – R3压栈到切换出线程 */
+                stmfd   sp!, {r5}       /* push old task's psr */
+                                        /* 切换出线程CPSR压栈 */
+                mrs r4,  spsr
+                stmfd   sp!, {r4}       /* push old task's spsr */
+                                        /* 切换出线程SPSR压栈 */
+
+                ldr r4,  =rt_interrupt_from_thread
+                ldr r5,  [r4]
+                str sp,  [r5]           /* store sp in preempted tasks's TCB */
+                                        /* 保存切换出线程的SP指针 */
+
+                ldr r6,  =rt_interrupt_to_thread
+                ldr r6,  [r6]
+                ldr sp,  [r6]           /* get new task's stack pointer */
+                                        /* 获得切换到线程的栈 */
+
+                ldmfd   sp!, {r4}       /* pop new task's spsr */
+                                        /* 恢复SPSR */
+                msr SPSR_cxsf, r4
+                ldmfd   sp!, {r4}       /* pop new task's psr */
+                                        /* 恢复CPSR */
+                msr CPSR_cxsf, r4
+
+                ldmfd   sp!, {r0-r12,lr,pc} /* pop new task's r0-r12,lr & pc */
+                                            /* 恢复R0 – R12,LR及PC寄存器 */
 
 /* 代码加密功能 */
 #if defined(CODE_PROTECTION)

+ 38 - 38
libcpu/arm/lpc24xx/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -13,25 +13,25 @@
  */
 /*@{*/
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
 .globl rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
-	mrs r0, cpsr
-	orr r1, r0, #NOINT
-	msr cpsr_c, r1
-	mov pc, lr
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    mov pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-	msr cpsr, r0
-	mov pc, lr
+    msr cpsr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -40,23 +40,23 @@ rt_hw_interrupt_enable:
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	stmfd	sp!, {lr}		@ push pc (lr should be pushed in place of PC)
-	stmfd	sp!, {r0-r12, lr}	@ push lr & register file
+    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
+    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
 
-	mrs	r4, cpsr
-	stmfd	sp!, {r4}		@ push cpsr
-	mrs	r4, spsr
-	stmfd	sp!, {r4}		@ push spsr
+    mrs r4, cpsr
+    stmfd   sp!, {r4}       @ push cpsr
+    mrs r4, spsr
+    stmfd   sp!, {r4}       @ push spsr
 
-	str	sp, [r0]			@ store sp in preempted tasks TCB
-	ldr	sp, [r1]			@ get new task stack pointer
+    str sp, [r0]            @ store sp in preempted tasks TCB
+    ldr sp, [r1]            @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
@@ -64,14 +64,14 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldr	sp, [r0]		@ get new task stack pointer
+    ldr sp, [r0]        @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
@@ -81,15 +81,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldr r2, =rt_thread_switch_interrupt_flag
-	ldr r3, [r2]
-	cmp r3, #1
-	beq _reswitch
-	mov r3, #1				@ set rt_thread_switch_interrupt_flag to 1
-	str r3, [r2]
-	ldr r2, =rt_interrupt_from_thread	@ set rt_interrupt_from_thread
-	str r0, [r2]
+    ldr r2, =rt_thread_switch_interrupt_flag
+    ldr r3, [r2]
+    cmp r3, #1
+    beq _reswitch
+    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
+    str r3, [r2]
+    ldr r2, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
+    str r0, [r2]
 _reswitch:
-	ldr r2, =rt_interrupt_to_thread		@ set rt_interrupt_to_thread
-	str r1, [r2]
-	mov pc, lr
+    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
+    str r1, [r2]
+    mov pc, lr

+ 63 - 63
libcpu/arm/lpc24xx/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -9,99 +9,99 @@
 ; * 2011-07-22     Bernard      added thumb mode porting
 ; */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	TST     lr, #0x01
-	BEQ     _ARM_MODE
-	ORR     r4, r4, #0x20       ; it's thumb code
+    MRS     r4, cpsr
+    TST     lr, #0x01
+    BEQ     _ARM_MODE
+    ORR     r4, r4, #0x20       ; it's thumb code
 
 _ARM_MODE
-	STMFD	sp!, {r4}			; push cpsr
+    STMFD   sp!, {r4}           ; push cpsr
 
-	STR	    sp, [r0]			; store sp in preempted tasks TCB
-	LDR	    sp, [r1]			; get new task stack pointer
+    STR     sp, [r0]            ; store sp in preempted tasks TCB
+    LDR     sp, [r1]            ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task cpsr to spsr
-	MSR     spsr_cxsf, r4
-	BIC     r4, r4, #0x20       ; must be ARM mode
-	MSR	    cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr to spsr
+    MSR     spsr_cxsf, r4
+    BIC     r4, r4, #0x20       ; must be ARM mode
+    MSR     cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}^	; pop new task r0-r12, lr & pc, copy spsr to cpsr
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc, copy spsr to cpsr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task cpsr to spsr
-	MSR	    spsr_cxsf, r4
-	BIC     r4, r4, #0x20       ; must be ARM mode
-	MSR	    cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr to spsr
+    MSR     spsr_cxsf, r4
+    BIC     r4, r4, #0x20       ; must be ARM mode
+    MSR     cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}^	; pop new task r0-r12, lr & pc, copy spsr to cpsr
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc, copy spsr to cpsr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
-
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
+
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 17 - 17
libcpu/arm/lpc24xx/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -18,8 +18,8 @@
 #define PLLFEED        (0xE01FC000 + 0x08C)
 #define PLLSTAT        (0xE01FC000 + 0x088)
 #define CCLKCFG        (0xE01FC000 + 0x104)
-#define MEMMAP         (0xE01FC000 + 0x040)     
-#define SCS            (0xE01FC000 + 0x1A0)     
+#define MEMMAP         (0xE01FC000 + 0x040)
+#define SCS            (0xE01FC000 + 0x1A0)
 #define CLKSRCSEL      (0xE01FC000 + 0x10C)
 #define MAMCR          (0xE01FC000 + 0x000)
 #define MAMTIM         (0xE01FC000 + 0x004)
@@ -77,7 +77,7 @@ _vector_fiq:    .word vector_fiq
 .globl _rtthread_start
 _rtthread_start:
         .word _start
-        
+
 .globl _rtthread_end
 _rtthread_end:
         .word  _end
@@ -86,9 +86,9 @@ _rtthread_end:
  * rtthread bss start and end which are defined in linker script
  */
 .globl _bss_start
-_bss_start:     
+_bss_start:
         .word __bss_start
-        
+
 .globl _bss_end
 _bss_end:
         .word __bss_end
@@ -100,25 +100,25 @@ _bss_end:
 reset:
         /* enter svc mode */
         msr cpsr_c, #SVCMODE|NOINT
-        
+
         /*watch dog disable */
         ldr r0,=WDMOD
-        ldr r1,=0x0             
+        ldr r1,=0x0
         str r1,[r0]
-        
+
         /* all interrupt disable */
         ldr r0,=VICIntEnClr
         ldr r1,=0xffffffff
         str r1,[r0]
-        
+
         ldr     r1, =VICVectAddr
         ldr     r0, =0x00
         str     r0, [r1]
-        
+
         ldr     r1, =VICIntSelect
         ldr     r0, =0x00
-        str     r0, [r1]        
-        
+        str     r0, [r1]
+
         /* setup stack */
         bl              stack_setup
 
@@ -140,7 +140,7 @@ data_loop:
         mov     r0,#0                   /* get a zero */
         ldr     r1,=__bss_start         /* bss start  */
         ldr     r2,=__bss_end           /* bss end    */
-        
+
 bss_loop:
         cmp     r1,r2                   /* check if data to clear */
         strlo   r0,[r1],#4              /* clear 4 bytes          */
@@ -158,13 +158,13 @@ ctor_loop:
         mov     lr, pc
         bx      r2
         ldmfd   sp!, {r0-r1}
-        b       ctor_loop       
+        b       ctor_loop
 ctor_end:
 
         /* start RT-Thread Kernel               */
         ldr     pc, _rtthread_startup
 
-_rtthread_startup: 
+_rtthread_startup:
         .word rtthread_startup
 
         .equ USERMODE,  0x10
@@ -188,7 +188,7 @@ vector_resv:    bl rt_hw_trap_resv
 .globl rt_thread_switch_interrupt_flag
 .globl rt_interrupt_from_thread
 .globl rt_interrupt_to_thread
-vector_irq:     
+vector_irq:
         stmfd   sp!, {r0-r12,lr}
         bl      rt_interrupt_enter
         bl      rt_hw_trap_irq

+ 60 - 60
libcpu/arm/lpc24xx/start_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -20,22 +20,22 @@
 ;/*****************************************************************************/
 
 ;/*
-; *  The LPC2400.S code is executed after CPU Reset. This file may be 
-; *  translated with the following SET symbols. In uVision these SET 
+; *  The LPC2400.S code is executed after CPU Reset. This file may be
+; *  translated with the following SET symbols. In uVision these SET
 ; *  symbols are entered under Options - ASM - Define.
 ; *
-; *  NO_CLOCK_SETUP: when set the startup code will not initialize Clock 
-; *  (used mostly when clock is already initialized from script .ini 
+; *  NO_CLOCK_SETUP: when set the startup code will not initialize Clock
+; *  (used mostly when clock is already initialized from script .ini
 ; *  file).
 ; *
-; *  NO_EMC_SETUP: when set the startup code will not initialize 
+; *  NO_EMC_SETUP: when set the startup code will not initialize
 ; *  External Bus Controller.
 ; *
-; *  RAM_INTVEC: when set the startup code copies exception vectors 
+; *  RAM_INTVEC: when set the startup code copies exception vectors
 ; *  from on-chip Flash to on-chip RAM.
 ; *
-; *  REMAP: when set the startup code initializes the register MEMMAP 
-; *  which overwrites the settings of the CPU configuration pins. The 
+; *  REMAP: when set the startup code initializes the register MEMMAP
+; *  which overwrites the settings of the CPU configuration pins. The
 ; *  startup and interrupt vectors are remapped from:
 ; *     0x00000000  default setting (not remapped)
 ; *     0x40000000  when RAM_MODE is used
@@ -45,7 +45,7 @@
 ; *  from external memory starting at address 0x80000000.
 ; *
 ; *  RAM_MODE: when set the device is configured for code execution
-; *  from on-chip RAM starting at address 0x40000000. 
+; *  from on-chip RAM starting at address 0x40000000.
 ; */
 
 
@@ -64,7 +64,7 @@ F_Bit           EQU     0x40            ; when F bit is set, FIQ is disabled
 ;----------------------- Memory Definitions ------------------------------------
 
 ; Internal Memory Base Addresses
-FLASH_BASE      EQU     0x00000000   
+FLASH_BASE      EQU     0x00000000
 RAM_BASE        EQU     0x40000000
 EXTMEM_BASE     EQU     0x80000000
 
@@ -73,10 +73,10 @@ STA_MEM0_BASE   EQU     0x80000000
 STA_MEM1_BASE   EQU     0x81000000
 STA_MEM2_BASE   EQU     0x82000000
 STA_MEM3_BASE   EQU     0x83000000
-DYN_MEM0_BASE   EQU     0xA0000000   
-DYN_MEM1_BASE   EQU     0xB0000000   
-DYN_MEM2_BASE   EQU     0xC0000000   
-DYN_MEM3_BASE   EQU     0xD0000000   
+DYN_MEM0_BASE   EQU     0xA0000000
+DYN_MEM1_BASE   EQU     0xB0000000
+DYN_MEM2_BASE   EQU     0xC0000000
+DYN_MEM3_BASE   EQU     0xD0000000
 
 
 ;----------------------- Stack and Heap Definitions ----------------------------
@@ -516,7 +516,7 @@ EMC_DYN_RD_CFG_Val  EQU 0x00000001
 ;//       <h> Dynamic Memory Self-refresh Exit Time Register (EMCDynamictSREX)
 ;//         <o2.0..3> tSREX: Self-refresh exit time <1-16> <#-1>
 ;//           <i> The delay is in CCLK cycles
-;//           <i> This value is normally found in SDRAM data sheets as tSREX, 
+;//           <i> This value is normally found in SDRAM data sheets as tSREX,
 ;//           <i> for devices without this parameter you use the same value as tXSR
 ;//       </h>
 ;//       <h> Dynamic Memory Last Data Out to Active Time Register (EMCDynamictAPR)
@@ -799,7 +799,7 @@ EMC_STA_WWEN0_Val   EQU 0x00000002
 ;//           <i> The delay is in CCLK cycles
 ;//       </h>
 EMC_STA_WOEN0_Val   EQU 0x00000002
-                                      
+
 ;//       <h> Static Memory Read Delay Register (EMCStaticWaitRd0)
 ;//         <i> Selects the delay from CS0 to a read access
 ;//         <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
@@ -866,7 +866,7 @@ EMC_STA_WWEN1_Val   EQU 0x00000000
 ;//           <i> The delay is in CCLK cycles
 ;//       </h>
 EMC_STA_WOEN1_Val   EQU 0x00000000
-                                      
+
 ;//       <h> Static Memory Read Delay Register (EMCStaticWaitRd1)
 ;//         <i> Selects the delay from CS1 to a read access
 ;//         <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
@@ -933,7 +933,7 @@ EMC_STA_WWEN2_Val   EQU 0x00000000
 ;//           <i> The delay is in CCLK cycles
 ;//       </h>
 EMC_STA_WOEN2_Val   EQU 0x00000000
-                                      
+
 ;//       <h> Static Memory Read Delay Register (EMCStaticWaitRd2)
 ;//         <i> Selects the delay from CS2 to a read access
 ;//         <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
@@ -1000,7 +1000,7 @@ EMC_STA_WWEN3_Val   EQU 0x00000000
 ;//           <i> The delay is in CCLK cycles
 ;//       </h>
 EMC_STA_WOEN3_Val   EQU 0x00000000
-                                      
+
 ;//       <h> Static Memory Read Delay Register (EMCStaticWaitRd3)
 ;//         <i> Selects the delay from CS3 to a read access
 ;//         <o.0..4> WAITRD: Non-page mode read wait states or asynchronous page mode read first access wait states <1-32> <#-1>
@@ -1057,12 +1057,12 @@ EMC_STA_EXT_W_Val   EQU 0x00000000
 ;  Absolute addressing mode must be used.
 ;  Dummy Handlers are implemented as infinite loops which can be modified.
 
-Vectors         LDR     PC, Reset_Addr         
+Vectors         LDR     PC, Reset_Addr
                 LDR     PC, Undef_Addr
                 LDR     PC, SWI_Addr
                 LDR     PC, PAbt_Addr
                 LDR     PC, DAbt_Addr
-                NOP                            ; Reserved Vector 
+                NOP                            ; Reserved Vector
                 LDR     PC, IRQ_Addr
                 LDR     PC, FIQ_Addr
 
@@ -1071,7 +1071,7 @@ Undef_Addr      DCD     Undef_Handler
 SWI_Addr        DCD     SWI_Handler
 PAbt_Addr       DCD     PAbt_Handler
 DAbt_Addr       DCD     DAbt_Handler
-                DCD     0                      ; Reserved Address 
+                DCD     0                      ; Reserved Address
 IRQ_Addr        DCD     IRQ_Handler
 FIQ_Addr        DCD     FIQ_Handler
 
@@ -1085,32 +1085,32 @@ FIQ_Addr        DCD     FIQ_Handler
 
 ; Prepare Fatal Context
         MACRO
-		prepare_fatal
-		STMFD   sp!, {r0-r3}
-		MOV     r1, sp
-		ADD     sp, sp, #16
-		SUB     r2, lr, #4
-		MRS     r3, spsr
+        prepare_fatal
+        STMFD   sp!, {r0-r3}
+        MOV     r1, sp
+        ADD     sp, sp, #16
+        SUB     r2, lr, #4
+        MRS     r3, spsr
 
-		; switch to SVC mode and no interrupt
-		MSR     cpsr_c, #I_Bit :OR: F_Bit :OR: Mode_SVC
+        ; switch to SVC mode and no interrupt
+        MSR     cpsr_c, #I_Bit :OR: F_Bit :OR: Mode_SVC
 
-		STMFD   sp!, {r0}       ; old r0
-		; get sp
-		ADD     r0, sp, #4
-		STMFD   sp!, {r3}       ; cpsr
-		STMFD   sp!, {r2}       ; pc
-		STMFD   sp!, {lr}       ; lr
-		STMFD   sp!, {r0}       ; sp
-		STMFD   sp!, {r4-r12}
+        STMFD   sp!, {r0}       ; old r0
+        ; get sp
+        ADD     r0, sp, #4
+        STMFD   sp!, {r3}       ; cpsr
+        STMFD   sp!, {r2}       ; pc
+        STMFD   sp!, {lr}       ; lr
+        STMFD   sp!, {r0}       ; sp
+        STMFD   sp!, {r4-r12}
 
-		MOV     r4, r1
+        MOV     r4, r1
 
-		LDMFD   r4!, {r0-r3}
-		STMFD   sp!, {r0-r3}
+        LDMFD   r4!, {r0-r3}
+        STMFD   sp!, {r0-r3}
 
-		MOV     r0, sp
-		MEND
+        MOV     r0, sp
+        MEND
 
 Undef_Handler
         prepare_fatal
@@ -1123,7 +1123,7 @@ SWI_Handler
         B       .
 
 PAbt_Handler
-		prepare_fatal
+        prepare_fatal
         BL      rt_hw_trap_pabt
         B       .
 
@@ -1140,7 +1140,7 @@ FIQ_Handler
 ; Reset Handler
 
                 EXPORT  Reset_Handler
-Reset_Handler   
+Reset_Handler
 
 
 ; Clock Setup ------------------------------------------------------------------
@@ -1152,18 +1152,18 @@ Reset_Handler
 
 ;  Configure and Enable PLL
                 LDR     R3, =SCS_Val          ; Enable main oscillator
-                STR     R3, [R0, #SCS_OFS] 
+                STR     R3, [R0, #SCS_OFS]
 
-                IF      (SCS_Val:AND:OSCEN) != 0  
+                IF      (SCS_Val:AND:OSCEN) != 0
 OSC_Loop        LDR     R3, [R0, #SCS_OFS]    ; Wait for main osc stabilize
                 ANDS    R3, R3, #OSCSTAT
                 BEQ     OSC_Loop
                 ENDIF
 
                 LDR     R3, =CLKSRCSEL_Val    ; Select PLL source clock
-                STR     R3, [R0, #CLKSRCSEL_OFS] 
+                STR     R3, [R0, #CLKSRCSEL_OFS]
                 LDR     R3, =PLLCFG_Val
-                STR     R3, [R0, #PLLCFG_OFS] 
+                STR     R3, [R0, #PLLCFG_OFS]
                 STR     R1, [R0, #PLLFEED_OFS]
                 STR     R2, [R0, #PLLFEED_OFS]
                 MOV     R3, #PLLCON_PLLE
@@ -1217,9 +1217,9 @@ M_N_Lock        LDR     R3, [R0, #PLLSTAT_OFS]
                 IF      MAM_SETUP != 0
                 LDR     R0, =MAM_BASE
                 MOV     R1, #MAMTIM_Val
-                STR     R1, [R0, #MAMTIM_OFS] 
+                STR     R1, [R0, #MAMTIM_OFS]
                 MOV     R1, #MAMCR_Val
-                STR     R1, [R0, #MAMCR_OFS] 
+                STR     R1, [R0, #MAMCR_OFS]
                 ENDIF   ; MAM_SETUP
 
 
@@ -1325,7 +1325,7 @@ Wait_1          SUBS    R6, R6, #1                ; Delay ~200 ms proc clk 57.6
 
                 LDR     R4, =(PALL_CMD:OR:0x03)   ; Write Precharge All Command
                 STR     R4, [R0, #EMC_DYN_CTRL_OFS]
-  
+
                 MOV     R4, #2
                 STR     R4, [R0, #EMC_DYN_RFSH_OFS]
 
@@ -1338,7 +1338,7 @@ Wait_2          SUBS    R6, R6, #1                ; Delay
 
                 LDR     R4, =(MODE_CMD:OR:0x03)   ; Write MODE Command
                 STR     R4, [R0, #EMC_DYN_CTRL_OFS]
-    
+
                 ; Dummy read
                 IF      (EMC_DYNCS0_SETUP != 0)
                 LDR     R4, =DYN_MEM0_BASE
@@ -1476,7 +1476,7 @@ Wait_5          SUBS    R6, R6, #1                ; Delay ~10 ms @ proc clk 57.6
                 ADD     R5, R5, R0
                 STR     R4, [R5, #0]
 
-                ENDIF   ; EMC_STATIC_SETUP 
+                ENDIF   ; EMC_STATIC_SETUP
 
                 ENDIF   ; EMC_SETUP
 
@@ -1486,9 +1486,9 @@ Wait_5          SUBS    R6, R6, #1                ; Delay ~10 ms @ proc clk 57.6
                 IF      :DEF:RAM_INTVEC
                 ADR     R8, Vectors         ; Source
                 LDR     R9, =RAM_BASE       ; Destination
-                LDMIA   R8!, {R0-R7}        ; Load Vectors 
-                STMIA   R9!, {R0-R7}        ; Store Vectors 
-                LDMIA   R8!, {R0-R7}        ; Load Handler Addresses 
+                LDMIA   R8!, {R0-R7}        ; Load Vectors
+                STMIA   R9!, {R0-R7}        ; Store Vectors
+                LDMIA   R8!, {R0-R7}        ; Load Handler Addresses
                 STMIA   R9!, {R0-R7}        ; Store Handler Addresses
                 ENDIF
 
@@ -1610,8 +1610,8 @@ rt_hw_context_switch_interrupt_do   PROC
                 LDR     sp,  [r6]       ; get new task's stack pointer
 
                 LDMFD   sp!, {r4}       ; pop new task's cpsr to spsr
-				MSR     spsr_cxsf, r4
-				BIC     r4, r4, #0x20   ; must be ARM mode
+                MSR     spsr_cxsf, r4
+                BIC     r4, r4, #0x20   ; must be ARM mode
                 MSR     cpsr_cxsf, r4
 
                 LDMFD   sp!, {r0-r12,lr,pc}^ ; pop new task's r0-r12,lr & pc, copy spsr to cpsr

+ 10 - 10
libcpu/arm/realview-a8-vmm/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -245,16 +245,16 @@ switch_to_guest:
     ldr     r0, [r1]
     mcr     p15, 0, r0, c3, c0
 #endif
-	/* check whether there is a pending interrupt for Guest OS */
-	bl      vmm_virq_check
+    /* check whether there is a pending interrupt for Guest OS */
+    bl      vmm_virq_check
 
 #ifdef RT_VMM_USING_DOMAIN
     @ All done, restore the guest domain.
     mcr     p15, 0, r5, c3, c0
 #endif
 
-	cmp     r0, #0x0
-	beq     route_irq_to_guest
+    cmp     r0, #0x0
+    beq     route_irq_to_guest
 
     ldmfd   sp!, {r0-r12,lr}
     subs    pc, lr, #4
@@ -318,35 +318,35 @@ rt_hw_context_switch_interrupt_do:
 .endm
 
     .align  5
-    .globl	vector_swi
+    .globl  vector_swi
 vector_swi:
     push_svc_reg
     bl      rt_hw_trap_swi
     b       .
 
     .align  5
-    .globl	vector_undef
+    .globl  vector_undef
 vector_undef:
     push_svc_reg
     bl      rt_hw_trap_undef
     b       .
 
     .align  5
-    .globl	vector_pabt
+    .globl  vector_pabt
 vector_pabt:
     push_svc_reg
     bl      rt_hw_trap_pabt
     b       .
 
     .align  5
-    .globl	vector_dabt
+    .globl  vector_dabt
 vector_dabt:
     push_svc_reg
     bl      rt_hw_trap_dabt
     b       .
 
     .align  5
-    .globl	vector_resv
+    .globl  vector_resv
 vector_resv:
     push_svc_reg
     bl      rt_hw_trap_resv

+ 2 - 2
libcpu/arm/realview-a8-vmm/vector_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -49,4 +49,4 @@ _vector_irq:
 _vector_fiq:
     .word vector_fiq
 
-.balignl 	16,0xdeadbeef
+.balignl    16,0xdeadbeef

+ 38 - 38
libcpu/arm/s3c24x0/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -13,25 +13,25 @@
  */
 /*@{*/
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
 .globl rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
-	mrs r0, cpsr
-	orr r1, r0, #NOINT
-	msr cpsr_c, r1
-	mov pc, lr
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    mov pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-	msr cpsr, r0
-	mov pc, lr
+    msr cpsr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -40,23 +40,23 @@ rt_hw_interrupt_enable:
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	stmfd	sp!, {lr}		@ push pc (lr should be pushed in place of PC)
-	stmfd	sp!, {r0-r12, lr}	@ push lr & register file
+    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
+    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
 
-	mrs	r4, cpsr
-	stmfd	sp!, {r4}		@ push cpsr
-	mrs	r4, spsr
-	stmfd	sp!, {r4}		@ push spsr
+    mrs r4, cpsr
+    stmfd   sp!, {r4}       @ push cpsr
+    mrs r4, spsr
+    stmfd   sp!, {r4}       @ push spsr
 
-	str	sp, [r0]		@ store sp in preempted tasks TCB
-	ldr	sp, [r1]		@ get new task stack pointer
+    str sp, [r0]        @ store sp in preempted tasks TCB
+    ldr sp, [r1]        @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr spsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}^	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}^  @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
@@ -64,14 +64,14 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldr	sp, [r0]		@ get new task stack pointer
+    ldr sp, [r0]        @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
@@ -81,15 +81,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldr r2, =rt_thread_switch_interrupt_flag
-	ldr r3, [r2]
-	cmp r3, #1
-	beq _reswitch
-	mov r3, #1				@ set rt_thread_switch_interrupt_flag to 1
-	str r3, [r2]
-	ldr r2, =rt_interrupt_from_thread	@ set rt_interrupt_from_thread
-	str r0, [r2]
+    ldr r2, =rt_thread_switch_interrupt_flag
+    ldr r3, [r2]
+    cmp r3, #1
+    beq _reswitch
+    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
+    str r3, [r2]
+    ldr r2, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
+    str r0, [r2]
 _reswitch:
-	ldr r2, =rt_interrupt_to_thread		@ set rt_interrupt_to_thread
-	str r1, [r2]
-	mov pc, lr
+    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
+    str r1, [r2]
+    mov pc, lr

+ 61 - 61
libcpu/arm/s3c24x0/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -8,96 +8,96 @@
 ; * 2009-01-20     Bernard      first version
 ; */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	STMFD	sp!, {r4}			; push cpsr
-	MRS		r4, spsr
-	STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR spsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}^	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}^  ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 273 - 273
libcpu/arm/s3c24x0/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -10,66 +10,66 @@
  * 2008-01-29     Yi.Qiu       for QEMU emulator
  */
 
-#define CONFIG_STACKSIZE 	512
-#define S_FRAME_SIZE 		72
-
-#define S_OLD_R0 			68
-#define S_PSR  				64
-#define S_PC  				60
-#define S_LR  				56
-#define S_SP  				52
-
-#define S_IP  				48
-#define S_FP  				44
-#define S_R10  				40
-#define S_R9  				36
-#define S_R8  				32
-#define S_R7  				28
-#define S_R6  				24
-#define S_R5  				20
-#define S_R4  				16
-#define S_R3  				12
-#define S_R2  				8
-#define S_R1  				4
-#define S_R0 				0
-
-.equ 	USERMODE, 			0x10
-.equ 	FIQMODE,			0x11
-.equ 	IRQMODE,			0x12
-.equ 	SVCMODE,			0x13
-.equ 	ABORTMODE,			0x17
-.equ 	UNDEFMODE,			0x1b
-.equ 	MODEMASK,			0x1f
-.equ 	NOINT,				0xc0
-
-.equ 	RAM_BASE,			0x00000000	/*Start address of RAM		*/
-.equ 	ROM_BASE,			0x30000000	/*Start address of Flash	*/
-
-.equ 	MPLLCON,			0x4c000004	/*Mpll control register		*/
-.equ 	M_MDIV,				0x20
-.equ 	M_PDIV,				0x4
-.equ 	M_SDIV,				0x2
-
-.equ 	INTMSK,				0x4a000008
-.equ 	INTSUBMSK, 			0x4a00001c
-.equ 	WTCON, 				0x53000000
-.equ 	LOCKTIME,			0x4c000000
-.equ 	CLKDIVN,			0x4c000014	/*Clock divider control		*/
-.equ 	GPHCON,				0x56000070	/*Port H control			*/
-.equ 	GPHUP,				0x56000078	/*Pull-up control H			*/
-.equ 	BWSCON,				0x48000000	/*Bus width & wait status	*/
-.equ 	BANKCON0,			0x48000004	/*Boot ROM control			*/
-.equ 	BANKCON1,			0x48000008	/*BANK1 control				*/
-.equ 	BANKCON2,			0x4800000c	/*BANK2 cControl			*/
-.equ 	BANKCON3,			0x48000010	/*BANK3 control				*/
-.equ 	BANKCON4,			0x48000014	/*BANK4 control				*/
-.equ 	BANKCON5,			0x48000018	/*BANK5 control				*/
-.equ 	BANKCON6,			0x4800001c	/*BANK6 control				*/
-.equ 	BANKCON7,			0x48000020	/*BANK7 control				*/
-.equ 	REFRESH,			0x48000024	/*DRAM/SDRAM efresh			*/
-.equ 	BANKSIZE,			0x48000028	/*Flexible Bank Size		*/
-.equ 	MRSRB6,				0x4800002c	/*Mode egister set for SDRAM*/
-.equ 	MRSRB7,				0x48000030	/*Mode egister set for SDRAM*/
+#define CONFIG_STACKSIZE    512
+#define S_FRAME_SIZE        72
+
+#define S_OLD_R0            68
+#define S_PSR               64
+#define S_PC                60
+#define S_LR                56
+#define S_SP                52
+
+#define S_IP                48
+#define S_FP                44
+#define S_R10               40
+#define S_R9                36
+#define S_R8                32
+#define S_R7                28
+#define S_R6                24
+#define S_R5                20
+#define S_R4                16
+#define S_R3                12
+#define S_R2                8
+#define S_R1                4
+#define S_R0                0
+
+.equ    USERMODE,           0x10
+.equ    FIQMODE,            0x11
+.equ    IRQMODE,            0x12
+.equ    SVCMODE,            0x13
+.equ    ABORTMODE,          0x17
+.equ    UNDEFMODE,          0x1b
+.equ    MODEMASK,           0x1f
+.equ    NOINT,              0xc0
+
+.equ    RAM_BASE,           0x00000000  /*Start address of RAM      */
+.equ    ROM_BASE,           0x30000000  /*Start address of Flash    */
+
+.equ    MPLLCON,            0x4c000004  /*Mpll control register     */
+.equ    M_MDIV,             0x20
+.equ    M_PDIV,             0x4
+.equ    M_SDIV,             0x2
+
+.equ    INTMSK,             0x4a000008
+.equ    INTSUBMSK,          0x4a00001c
+.equ    WTCON,              0x53000000
+.equ    LOCKTIME,           0x4c000000
+.equ    CLKDIVN,            0x4c000014  /*Clock divider control     */
+.equ    GPHCON,             0x56000070  /*Port H control            */
+.equ    GPHUP,              0x56000078  /*Pull-up control H         */
+.equ    BWSCON,             0x48000000  /*Bus width & wait status   */
+.equ    BANKCON0,           0x48000004  /*Boot ROM control          */
+.equ    BANKCON1,           0x48000008  /*BANK1 control             */
+.equ    BANKCON2,           0x4800000c  /*BANK2 cControl            */
+.equ    BANKCON3,           0x48000010  /*BANK3 control             */
+.equ    BANKCON4,           0x48000014  /*BANK4 control             */
+.equ    BANKCON5,           0x48000018  /*BANK5 control             */
+.equ    BANKCON6,           0x4800001c  /*BANK6 control             */
+.equ    BANKCON7,           0x48000020  /*BANK7 control             */
+.equ    REFRESH,            0x48000024  /*DRAM/SDRAM efresh         */
+.equ    BANKSIZE,           0x48000028  /*Flexible Bank Size        */
+.equ    MRSRB6,             0x4800002c  /*Mode egister set for SDRAM*/
+.equ    MRSRB7,             0x48000030  /*Mode egister set for SDRAM*/
 
 /*
  *************************************************************************
@@ -84,24 +84,24 @@
 
 .globl _start
 _start:
-	b		reset
-	ldr		pc, _vector_undef
-	ldr		pc, _vector_swi
-	ldr		pc, _vector_pabt
-	ldr		pc, _vector_dabt
-	ldr		pc, _vector_resv
-	ldr		pc, _vector_irq
-	ldr		pc, _vector_fiq
-
-_vector_undef:	.word vector_undef
-_vector_swi:	.word vector_swi
-_vector_pabt:	.word vector_pabt
-_vector_dabt:	.word vector_dabt
-_vector_resv:	.word vector_resv
-_vector_irq:	.word vector_irq
-_vector_fiq:	.word vector_fiq
-
-.balignl 	16,0xdeadbeef
+    b       reset
+    ldr     pc, _vector_undef
+    ldr     pc, _vector_swi
+    ldr     pc, _vector_pabt
+    ldr     pc, _vector_dabt
+    ldr     pc, _vector_resv
+    ldr     pc, _vector_irq
+    ldr     pc, _vector_fiq
+
+_vector_undef:  .word vector_undef
+_vector_swi:    .word vector_swi
+_vector_pabt:   .word vector_pabt
+_vector_dabt:   .word vector_dabt
+_vector_resv:   .word vector_resv
+_vector_irq:    .word vector_irq
+_vector_fiq:    .word vector_fiq
+
+.balignl    16,0xdeadbeef
 
 /*
  *************************************************************************
@@ -115,7 +115,7 @@ _vector_fiq:	.word vector_fiq
  */
 
 _TEXT_BASE:
-	.word	TEXT_BASE
+    .word   TEXT_BASE
 
 /*
  * rtthread kernel start and end
@@ -123,117 +123,117 @@ _TEXT_BASE:
  */
 .globl _rtthread_start
 _rtthread_start:
-	.word _start
-	
+    .word _start
+
 .globl _rtthread_end
 _rtthread_end:
-	.word  _end
+    .word  _end
 
 /*
  * rtthread bss start and end which are defined in linker script
  */
 .globl _bss_start
-_bss_start:	
-	.word __bss_start
-	
+_bss_start:
+    .word __bss_start
+
 .globl _bss_end
 _bss_end:
-	.word __bss_end
+    .word __bss_end
 
-/* IRQ stack memory (calculated at run-time) 						*/
+/* IRQ stack memory (calculated at run-time)                        */
 .globl IRQ_STACK_START
 IRQ_STACK_START:
-	.word _irq_stack_start + 1024
-	
+    .word _irq_stack_start + 1024
+
 .globl FIQ_STACK_START
 FIQ_STACK_START:
-	.word _fiq_stack_start + 1024
-	
+    .word _fiq_stack_start + 1024
+
 .globl UNDEFINED_STACK_START
 UNDEFINED_STACK_START:
-	.word _undefined_stack_start + CONFIG_STACKSIZE
-	
+    .word _undefined_stack_start + CONFIG_STACKSIZE
+
 .globl ABORT_STACK_START
 ABORT_STACK_START:
-	.word _abort_stack_start + CONFIG_STACKSIZE
-	
+    .word _abort_stack_start + CONFIG_STACKSIZE
+
 .globl _STACK_START
 _STACK_START:
-	.word _svc_stack_start + 4096
+    .word _svc_stack_start + 4096
 
 /* ----------------------------------entry------------------------------*/
 reset:
-	
-	/* set the cpu to SVC32 mode 	*/
-	mrs		r0,cpsr
-	bic		r0,r0,#MODEMASK
-	orr		r0,r0,#SVCMODE
-	msr		cpsr,r0
-	
-	/* watch dog disable 			*/
-	ldr 	r0,=WTCON
-	ldr 	r1,=0x0
-	str 	r1,[r0]
-
-	/* mask all IRQs by clearing all bits in the INTMRs 				*/
-	ldr		r1, =INTMSK
-	ldr		r0, =0xffffffff
-	str		r0, [r1]
-	ldr		r1, =INTSUBMSK
-	ldr		r0, =0x7fff				/*all sub interrupt disable			*/
-	str		r0, [r1]
-
-	/* set interrupt vector 		*/
-	ldr 	r0, _load_address
-	mov		r1, #0x0				/* target address    				*/
-	add		r2, r0, #0x20			/* size, 32bytes         			*/
+
+    /* set the cpu to SVC32 mode    */
+    mrs     r0,cpsr
+    bic     r0,r0,#MODEMASK
+    orr     r0,r0,#SVCMODE
+    msr     cpsr,r0
+
+    /* watch dog disable            */
+    ldr     r0,=WTCON
+    ldr     r1,=0x0
+    str     r1,[r0]
+
+    /* mask all IRQs by clearing all bits in the INTMRs                 */
+    ldr     r1, =INTMSK
+    ldr     r0, =0xffffffff
+    str     r0, [r1]
+    ldr     r1, =INTSUBMSK
+    ldr     r0, =0x7fff             /*all sub interrupt disable         */
+    str     r0, [r1]
+
+    /* set interrupt vector         */
+    ldr     r0, _load_address
+    mov     r1, #0x0                /* target address                   */
+    add     r2, r0, #0x20           /* size, 32bytes                    */
 
 copy_loop:
-	ldmia	r0!, {r3-r10}			/* copy from source address [r0]    */
-	stmia	r1!, {r3-r10}			/* copy to   target address [r1]    */
-	cmp		r0, r2			/* until source end address [r2]    */
-	ble		copy_loop
-
-	/* setup stack */
-	bl		stack_setup
-
-	/* clear .bss */
-	mov   	r0,#0                   /* get a zero 						*/
-	ldr   	r1,=__bss_start         /* bss start 						*/
-	ldr   	r2,=__bss_end           /* bss end 							*/
-	
+    ldmia   r0!, {r3-r10}           /* copy from source address [r0]    */
+    stmia   r1!, {r3-r10}           /* copy to   target address [r1]    */
+    cmp     r0, r2          /* until source end address [r2]    */
+    ble     copy_loop
+
+    /* setup stack */
+    bl      stack_setup
+
+    /* clear .bss */
+    mov     r0,#0                   /* get a zero                       */
+    ldr     r1,=__bss_start         /* bss start                        */
+    ldr     r2,=__bss_end           /* bss end                          */
+
 bss_loop:
-	cmp   	r1,r2                   /* check if data to clear 			*/
-	strlo 	r0,[r1],#4              /* clear 4 bytes 					*/
-	blo   	bss_loop                /* loop until done 					*/
-
-	/* call C++ constructors of global objects 							*/
-	ldr 	r0, =__ctors_start__
-	ldr 	r1, =__ctors_end__
-	
+    cmp     r1,r2                   /* check if data to clear           */
+    strlo   r0,[r1],#4              /* clear 4 bytes                    */
+    blo     bss_loop                /* loop until done                  */
+
+    /* call C++ constructors of global objects                          */
+    ldr     r0, =__ctors_start__
+    ldr     r1, =__ctors_end__
+
 ctor_loop:
-	cmp 	r0, r1
-	beq 	ctor_end
-	ldr 	r2, [r0], #4
-	stmfd 	sp!, {r0-r1}
-	mov 	lr, pc
-	bx 		r2
-	ldmfd 	sp!, {r0-r1}
-	b		ctor_loop
-	
+    cmp     r0, r1
+    beq     ctor_end
+    ldr     r2, [r0], #4
+    stmfd   sp!, {r0-r1}
+    mov     lr, pc
+    bx      r2
+    ldmfd   sp!, {r0-r1}
+    b       ctor_loop
+
 ctor_end:
 
-	/* start RT-Thread Kernel 		*/
-	ldr		pc, _rtthread_startup
+    /* start RT-Thread Kernel       */
+    ldr     pc, _rtthread_startup
 
-_rtthread_startup: 
-	.word rtthread_startup
+_rtthread_startup:
+    .word rtthread_startup
 #if defined (__FLASH_BUILD__)
-_load_address: 
-	.word ROM_BASE + _TEXT_BASE
+_load_address:
+    .word ROM_BASE + _TEXT_BASE
 #else
-_load_address: 
-	.word RAM_BASE + _TEXT_BASE
+_load_address:
+    .word RAM_BASE + _TEXT_BASE
 #endif
 
 /*
@@ -244,143 +244,143 @@ _load_address:
  *************************************************************************
  */
 
-/* exception handlers 				*/
-	.align  5
+/* exception handlers               */
+    .align  5
 vector_undef:
-	sub 	sp, sp, #S_FRAME_SIZE
-	stmia 	sp, {r0 - r12}   		/* Calling r0-r12					*/
-	add		r8, sp, #S_PC
-	stmdb   r8, {sp, lr}^           /* Calling SP, LR					*/
-	str		lr, [r8, #0]            /* Save calling PC					*/
-	mrs		r6, spsr
-	str		r6, [r8, #4]            /* Save CPSR						*/
-	str		r0, [r8, #8]            /* Save OLD_R0						*/
-	mov		r0, sp
-
-	bl		rt_hw_trap_udef
-
-	.align	5
+    sub     sp, sp, #S_FRAME_SIZE
+    stmia   sp, {r0 - r12}          /* Calling r0-r12                   */
+    add     r8, sp, #S_PC
+    stmdb   r8, {sp, lr}^           /* Calling SP, LR                   */
+    str     lr, [r8, #0]            /* Save calling PC                  */
+    mrs     r6, spsr
+    str     r6, [r8, #4]            /* Save CPSR                        */
+    str     r0, [r8, #8]            /* Save OLD_R0                      */
+    mov     r0, sp
+
+    bl      rt_hw_trap_udef
+
+    .align  5
 vector_swi:
-	bl 		rt_hw_trap_swi
+    bl      rt_hw_trap_swi
 
-	.align	5
+    .align  5
 vector_pabt:
-	bl 		rt_hw_trap_pabt
+    bl      rt_hw_trap_pabt
 
-	.align	5
+    .align  5
 vector_dabt:
-	sub 	sp, sp, #S_FRAME_SIZE
-	stmia 	sp, {r0 - r12}   		/* Calling r0-r12					*/
-	add		r8, sp, #S_PC
-	stmdb   r8, {sp, lr}^           /* Calling SP, LR					*/
-	str		lr, [r8, #0]            /* Save calling PC					*/
-	mrs		r6, spsr
-	str		r6, [r8, #4]            /* Save CPSR						*/
-	str		r0, [r8, #8]            /* Save OLD_R0						*/
-	mov		r0, sp
-
-	bl 		rt_hw_trap_dabt
-
-	.align	5
+    sub     sp, sp, #S_FRAME_SIZE
+    stmia   sp, {r0 - r12}          /* Calling r0-r12                   */
+    add     r8, sp, #S_PC
+    stmdb   r8, {sp, lr}^           /* Calling SP, LR                   */
+    str     lr, [r8, #0]            /* Save calling PC                  */
+    mrs     r6, spsr
+    str     r6, [r8, #4]            /* Save CPSR                        */
+    str     r0, [r8, #8]            /* Save OLD_R0                      */
+    mov     r0, sp
+
+    bl      rt_hw_trap_dabt
+
+    .align  5
 vector_resv:
-	bl 		rt_hw_trap_resv
+    bl      rt_hw_trap_resv
 
-.globl 		rt_interrupt_enter
-.globl 		rt_interrupt_leave
-.globl 		rt_thread_switch_interrupt_flag
-.globl 		rt_interrupt_from_thread
-.globl 		rt_interrupt_to_thread
+.globl      rt_interrupt_enter
+.globl      rt_interrupt_leave
+.globl      rt_thread_switch_interrupt_flag
+.globl      rt_interrupt_from_thread
+.globl      rt_interrupt_to_thread
 vector_irq:
-	stmfd	sp!, {r0-r12,lr}
-	bl		rt_interrupt_enter
-	bl		rt_hw_trap_irq
-	bl		rt_interrupt_leave
+    stmfd   sp!, {r0-r12,lr}
+    bl      rt_interrupt_enter
+    bl      rt_hw_trap_irq
+    bl      rt_interrupt_leave
 
-	/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
-	ldr		r0, =rt_thread_switch_interrupt_flag
-	ldr		r1, [r0]
-	cmp		r1, #1
-	beq		_interrupt_thread_switch
+    /* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
+    ldr     r0, =rt_thread_switch_interrupt_flag
+    ldr     r1, [r0]
+    cmp     r1, #1
+    beq     _interrupt_thread_switch
 
-	ldmfd	sp!, {r0-r12,lr}
-	subs	pc, lr, #4
+    ldmfd   sp!, {r0-r12,lr}
+    subs    pc, lr, #4
 
-	.align	5
+    .align  5
 vector_fiq:
-	stmfd	sp!,{r0-r7,lr}
-	bl 		rt_hw_trap_fiq
-	ldmfd	sp!,{r0-r7,lr}
-	subs	pc,lr,#4
+    stmfd   sp!,{r0-r7,lr}
+    bl      rt_hw_trap_fiq
+    ldmfd   sp!,{r0-r7,lr}
+    subs    pc,lr,#4
 
 _interrupt_thread_switch:
-	mov		r1,  #0					/* clear rt_thread_switch_interrupt_flag*/
-	str		r1,  [r0]
-
-	ldmfd	sp!, {r0-r12,lr}		/* reload saved registers			*/
-	stmfd	sp!, {r0-r3}			/* save r0-r3						*/
-	mov		r1,  sp
-	add		sp,  sp, #16			/* restore sp						*/
-	sub		r2,  lr, #4				/* save old task's pc to r2			*/
-	
-	mrs		r3,  spsr				/* disable interrupt				*/
-	orr		r0,  r3, #NOINT
-	msr		spsr_c, r0
-
-	ldr		r0,  =.+8				/* switch to interrupted task's stack*/
-	movs	pc,  r0
-
-	stmfd	sp!, {r2}				/* push old task's pc				*/
-	stmfd	sp!, {r4-r12,lr}		/* push old task's lr,r12-r4		*/
-	mov		r4,  r1					/* Special optimised code below		*/
-	mov		r5,  r3
-	ldmfd	r4!, {r0-r3}
-	stmfd	sp!, {r0-r3}			/* push old task's r3-r0			*/
-	stmfd	sp!, {r5}				/* push old task's psr				*/
-	mrs		r4,  spsr
-	stmfd	sp!, {r4}				/* push old task's spsr				*/
-
-	ldr		r4,  =rt_interrupt_from_thread
-	ldr		r5,  [r4]
-	str		sp,  [r5]				/* store sp in preempted tasks's TCB*/
-
-	ldr	r6,  =rt_interrupt_to_thread
-	ldr	r6,  [r6]
-	ldr	sp,  [r6]					/* get new task's stack pointer		*/
-
-	ldmfd	sp!, {r4}				/* pop new task's spsr				*/
-	msr		SPSR_cxsf, r4
-	ldmfd	sp!, {r4}				/* pop new task's psr				*/
-	msr		CPSR_cxsf, r4
-
-	ldmfd	sp!, {r0-r12,lr,pc}		/* pop new task's r0-r12,lr & pc	*/
+    mov     r1,  #0                 /* clear rt_thread_switch_interrupt_flag*/
+    str     r1,  [r0]
+
+    ldmfd   sp!, {r0-r12,lr}        /* reload saved registers           */
+    stmfd   sp!, {r0-r3}            /* save r0-r3                       */
+    mov     r1,  sp
+    add     sp,  sp, #16            /* restore sp                       */
+    sub     r2,  lr, #4             /* save old task's pc to r2         */
+
+    mrs     r3,  spsr               /* disable interrupt                */
+    orr     r0,  r3, #NOINT
+    msr     spsr_c, r0
+
+    ldr     r0,  =.+8               /* switch to interrupted task's stack*/
+    movs    pc,  r0
+
+    stmfd   sp!, {r2}               /* push old task's pc               */
+    stmfd   sp!, {r4-r12,lr}        /* push old task's lr,r12-r4        */
+    mov     r4,  r1                 /* Special optimised code below     */
+    mov     r5,  r3
+    ldmfd   r4!, {r0-r3}
+    stmfd   sp!, {r0-r3}            /* push old task's r3-r0            */
+    stmfd   sp!, {r5}               /* push old task's psr              */
+    mrs     r4,  spsr
+    stmfd   sp!, {r4}               /* push old task's spsr             */
+
+    ldr     r4,  =rt_interrupt_from_thread
+    ldr     r5,  [r4]
+    str     sp,  [r5]               /* store sp in preempted tasks's TCB*/
+
+    ldr r6,  =rt_interrupt_to_thread
+    ldr r6,  [r6]
+    ldr sp,  [r6]                   /* get new task's stack pointer     */
+
+    ldmfd   sp!, {r4}               /* pop new task's spsr              */
+    msr     SPSR_cxsf, r4
+    ldmfd   sp!, {r4}               /* pop new task's psr               */
+    msr     CPSR_cxsf, r4
+
+    ldmfd   sp!, {r0-r12,lr,pc}     /* pop new task's r0-r12,lr & pc    */
 
 stack_setup:
-	mrs		r0, cpsr
-	bic		r0, r0, #MODEMASK
-	orr		r1, r0, #UNDEFMODE|NOINT
-	msr		cpsr_cxsf, r1			/* undef mode						*/
-	ldr		sp, UNDEFINED_STACK_START
+    mrs     r0, cpsr
+    bic     r0, r0, #MODEMASK
+    orr     r1, r0, #UNDEFMODE|NOINT
+    msr     cpsr_cxsf, r1           /* undef mode                       */
+    ldr     sp, UNDEFINED_STACK_START
 
-	orr		r1,r0,#ABORTMODE|NOINT
-	msr		cpsr_cxsf,r1			/* abort mode						*/
-	ldr		sp, ABORT_STACK_START
+    orr     r1,r0,#ABORTMODE|NOINT
+    msr     cpsr_cxsf,r1            /* abort mode                       */
+    ldr     sp, ABORT_STACK_START
 
-	orr		r1,r0,#IRQMODE|NOINT
-	msr		cpsr_cxsf,r1			/* IRQ mode							*/
-	ldr		sp, IRQ_STACK_START
+    orr     r1,r0,#IRQMODE|NOINT
+    msr     cpsr_cxsf,r1            /* IRQ mode                         */
+    ldr     sp, IRQ_STACK_START
 
-	orr		r1,r0,#FIQMODE|NOINT
-	msr		cpsr_cxsf,r1			/* FIQ mode							*/
-	ldr		sp, FIQ_STACK_START
+    orr     r1,r0,#FIQMODE|NOINT
+    msr     cpsr_cxsf,r1            /* FIQ mode                         */
+    ldr     sp, FIQ_STACK_START
 
-	bic		r0,r0,#MODEMASK
-	orr		r1,r0,#SVCMODE|NOINT
-	msr		cpsr_cxsf,r1			/* SVC mode							*/
+    bic     r0,r0,#MODEMASK
+    orr     r1,r0,#SVCMODE|NOINT
+    msr     cpsr_cxsf,r1            /* SVC mode                         */
 
-	ldr		sp, _STACK_START
+    ldr     sp, _STACK_START
 
-	/* USER mode is not initialized. */
-	mov		pc,lr					/* The LR register may be not valid for the mode changes.*/
+    /* USER mode is not initialized. */
+    mov     pc,lr                   /* The LR register may be not valid for the mode changes.*/
 
 /*/*}*/
 

+ 13 - 13
libcpu/arm/s3c24x0/start_rvds.S

@@ -843,14 +843,14 @@ GPJUP_Val       EQU     0x00000000
 ;  Absolute addressing mode must be used.
 ;  Dummy Handlers are implemented as infinite loops which can be modified.
 
-				EXPORT Entry_Point
+                EXPORT Entry_Point
 Entry_Point
 Vectors         LDR     PC, Reset_Addr
                 LDR     PC, Undef_Addr
                 LDR     PC, SWI_Addr
                 LDR     PC, PAbt_Addr
                 LDR     PC, DAbt_Addr
-		NOP
+        NOP
                 LDR     PC, IRQ_Addr
                 LDR     PC, FIQ_Addr
 
@@ -880,17 +880,17 @@ FIQ_Handler     B       FIQ_Handler
 DAbt_Handler
                IMPORT rt_hw_trap_dabt
 
-                	sub 	sp, sp, #72
-	                stmia 	sp, {r0 - r12}   		;/* Calling r0-r12					*/
-                    add		r8, sp, #60
-	                stmdb   r8, {sp, lr}            ;/* Calling SP, LR					*/
-	                str		lr, [r8, #0]            ;/* Save calling PC					*/
-	                mrs		r6, spsr
-	                str		r6, [r8, #4]            ;/* Save CPSR						*/
-	                str		r0, [r8, #8]            ;/* Save OLD_R0						*/
-	                mov		r0, sp
+                    sub     sp, sp, #72
+                    stmia   sp, {r0 - r12}          ;/* Calling r0-r12                  */
+                    add     r8, sp, #60
+                    stmdb   r8, {sp, lr}            ;/* Calling SP, LR                  */
+                    str     lr, [r8, #0]            ;/* Save calling PC                 */
+                    mrs     r6, spsr
+                    str     r6, [r8, #4]            ;/* Save CPSR                       */
+                    str     r0, [r8, #8]            ;/* Save OLD_R0                     */
+                    mov     r0, sp
 
-	                bl 		rt_hw_trap_dabt
+                    bl      rt_hw_trap_dabt
 
 
 ;##########################################
@@ -1137,7 +1137,7 @@ rt_hw_context_switch_interrupt_do   PROC
                 MRS     r3,  spsr       ; get cpsr of interrupt thread
 
                 ; switch to SVC mode and no interrupt
-				MSR 	cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
+                MSR     cpsr_c, #I_Bit:OR:F_Bit:OR:Mode_SVC
 
                 STMFD   sp!, {r2}       ; push old task's pc
                 STMFD   sp!, {r4-r12,lr}; push old task's lr,r12-r4

+ 38 - 38
libcpu/arm/s3c44b0/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -13,25 +13,25 @@
  */
 /*@{*/
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
  */
 .globl rt_hw_interrupt_disable
 rt_hw_interrupt_disable:
-	mrs r0, cpsr
-	orr r1, r0, #NOINT
-	msr cpsr_c, r1
-	mov pc, lr
+    mrs r0, cpsr
+    orr r1, r0, #NOINT
+    msr cpsr_c, r1
+    mov pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
  */
 .globl rt_hw_interrupt_enable
 rt_hw_interrupt_enable:
-	msr cpsr, r0
-	mov pc, lr
+    msr cpsr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -40,23 +40,23 @@ rt_hw_interrupt_enable:
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	stmfd	sp!, {lr}		@ push pc (lr should be pushed in place of PC)
-	stmfd	sp!, {r0-r12, lr}	@ push lr & register file
+    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
+    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
 
-	mrs	r4, cpsr
-	stmfd	sp!, {r4}		@ push cpsr
-	mrs	r4, spsr
-	stmfd	sp!, {r4}		@ push spsr
+    mrs r4, cpsr
+    stmfd   sp!, {r4}       @ push cpsr
+    mrs r4, spsr
+    stmfd   sp!, {r4}       @ push spsr
 
-	str	sp, [r0]			@ store sp in preempted tasks TCB
-	ldr	sp, [r1]			@ get new task stack pointer
+    str sp, [r0]            @ store sp in preempted tasks TCB
+    ldr sp, [r1]            @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
@@ -64,14 +64,14 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldr	sp, [r0]		@ get new task stack pointer
+    ldr sp, [r0]        @ get new task stack pointer
 
-	ldmfd	sp!, {r4}		@ pop new task spsr
-	msr	spsr_cxsf, r4
-	ldmfd	sp!, {r4}		@ pop new task cpsr
-	msr	cpsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task spsr
+    msr spsr_cxsf, r4
+    ldmfd   sp!, {r4}       @ pop new task cpsr
+    msr cpsr_cxsf, r4
 
-	ldmfd	sp!, {r0-r12, lr, pc}	@ pop new task r0-r12, lr & pc
+    ldmfd   sp!, {r0-r12, lr, pc}   @ pop new task r0-r12, lr & pc
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
@@ -81,15 +81,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldr r2, =rt_thread_switch_interrupt_flag
-	ldr r3, [r2]
-	cmp r3, #1
-	beq _reswitch
-	mov r3, #1				@ set rt_thread_switch_interrupt_flag to 1
-	str r3, [r2]
-	ldr r2, =rt_interrupt_from_thread	@ set rt_interrupt_from_thread
-	str r0, [r2]
+    ldr r2, =rt_thread_switch_interrupt_flag
+    ldr r3, [r2]
+    cmp r3, #1
+    beq _reswitch
+    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
+    str r3, [r2]
+    ldr r2, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
+    str r0, [r2]
 _reswitch:
-	ldr r2, =rt_interrupt_to_thread		@ set rt_interrupt_to_thread
-	str r1, [r2]
-	mov pc, lr
+    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
+    str r1, [r2]
+    mov pc, lr

+ 61 - 61
libcpu/arm/s3c44b0/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -8,96 +8,96 @@
 ; * 2009-01-20     Bernard      first version
 ; */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	STMFD	sp!, {r4}			; push cpsr
-	MRS		r4, spsr
-	STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 165 - 165
libcpu/arm/s3c44b0/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -18,22 +18,22 @@
 .code 32
 .globl _start
 _start:
-	b reset
-	ldr	pc, _vector_undef
-	ldr	pc, _vector_swi
-	ldr	pc, _vector_pabt
-	ldr	pc, _vector_dabt
-	ldr	pc, _vector_resv
-	ldr	pc, _vector_irq
-	ldr	pc, _vector_fiq
-
-_vector_undef:	.word vector_undef
-_vector_swi:	.word vector_swi
-_vector_pabt:	.word vector_pabt
-_vector_dabt:	.word vector_dabt
-_vector_resv:	.word vector_resv
-_vector_irq:	.word vector_irq
-_vector_fiq:	.word vector_fiq
+    b reset
+    ldr pc, _vector_undef
+    ldr pc, _vector_swi
+    ldr pc, _vector_pabt
+    ldr pc, _vector_dabt
+    ldr pc, _vector_resv
+    ldr pc, _vector_irq
+    ldr pc, _vector_fiq
+
+_vector_undef:  .word vector_undef
+_vector_swi:    .word vector_swi
+_vector_pabt:   .word vector_pabt
+_vector_dabt:   .word vector_dabt
+_vector_resv:   .word vector_resv
+_vector_irq:    .word vector_irq
+_vector_fiq:    .word vector_fiq
 
 .text
 .code 32
@@ -45,209 +45,209 @@ _vector_fiq:	.word vector_fiq
 .globl _rtthread_start
 _rtthread_start:.word _start
 .globl _rtthread_end
-_rtthread_end:	.word  _end
+_rtthread_end:  .word  _end
 
 /*
  * rtthread bss start and end
  * which are defined in linker script
  */
 .globl _bss_start
-_bss_start:	.word __bss_start
+_bss_start: .word __bss_start
 .globl _bss_end
-_bss_end:	.word __bss_end
+_bss_end:   .word __bss_end
 
 #if defined(__FLASH_BUILD__)
 /*
- * TEXT_BASE, 
+ * TEXT_BASE,
  * which is defined in macro of make
  */
-_TEXT_BASE: .word	TEXT_BASE
+_TEXT_BASE: .word   TEXT_BASE
 #endif
 
-	.equ WTCON,		0x1d30000
-	.equ INTCON,	0x1e00000
-	.equ INTMSK, 	0x1e0000c
+    .equ WTCON,     0x1d30000
+    .equ INTCON,    0x1e00000
+    .equ INTMSK,    0x1e0000c
 
 /* the system entry */
 reset:
-	/* enter svc mode */
-	msr cpsr_c, #SVCMODE|NOINT
+    /* enter svc mode */
+    msr cpsr_c, #SVCMODE|NOINT
 
-	/*watch dog disable */
-	ldr r0,=WTCON
-    ldr r1,=0x0 		
+    /*watch dog disable */
+    ldr r0,=WTCON
+    ldr r1,=0x0
     str r1,[r0]
-	
-	/* all interrupt disable */
-	ldr r0,=INTMSK
-	ldr r1,=0x07ffffff
-	str r1,[r0]
-	
-	ldr	r1, =INTCON
-	ldr	r0, =0x05
-	str	r0, [r1]
+
+    /* all interrupt disable */
+    ldr r0,=INTMSK
+    ldr r1,=0x07ffffff
+    str r1,[r0]
+
+    ldr r1, =INTCON
+    ldr r0, =0x05
+    str r0, [r1]
 
 #if defined(__FLASH_BUILD__)
-	/* init lowlevel */
-	bl lowlevel_init
+    /* init lowlevel */
+    bl lowlevel_init
 #endif
 
-	/* setup stack */
-	bl stack_setup
-	
+    /* setup stack */
+    bl stack_setup
+
 #if defined(__FLASH_BUILD__)
-	mov r0, #0x0			/* r0 <- flash base address         */
-	ldr r1, _TEXT_BASE		/* r1 <- the taget address          */
-	
-	ldr	r2, _rtthread_start
-	ldr	r3, _bss_start
-	sub	r2, r3, r2			/* r2 <- size of rtthread kernel    */
-	add	r2, r0, r2			/* r2 <- source end address         */
-	
+    mov r0, #0x0            /* r0 <- flash base address         */
+    ldr r1, _TEXT_BASE      /* r1 <- the taget address          */
+
+    ldr r2, _rtthread_start
+    ldr r3, _bss_start
+    sub r2, r3, r2          /* r2 <- size of rtthread kernel    */
+    add r2, r0, r2          /* r2 <- source end address         */
+
 copy_loop:
-	ldmia	r0!, {r3-r10}	/* copy from source address [r0]    */
-	stmia	r1!, {r3-r10}	/* copy to   target address [r1]    */
-	cmp	r0, r2		/* until source end address [r2]    */
-	ble	copy_loop
+    ldmia   r0!, {r3-r10}   /* copy from source address [r0]    */
+    stmia   r1!, {r3-r10}   /* copy to   target address [r1]    */
+    cmp r0, r2      /* until source end address [r2]    */
+    ble copy_loop
 #endif
-	
-	/* start RT-Thread Kernel */
-	ldr	pc, _rtthread_startup
+
+    /* start RT-Thread Kernel */
+    ldr pc, _rtthread_startup
 
 _rtthread_startup: .word rtthread_startup
 
-	.equ USERMODE, 	0x10
-	.equ FIQMODE, 	0x11
-	.equ IRQMODE, 	0x12
-	.equ SVCMODE, 	0x13
-	.equ ABORTMODE, 0x17
-	.equ UNDEFMODE, 0x1b
-	.equ MODEMASK, 	0x1f
-	.equ NOINT,		0xc0
+    .equ USERMODE,  0x10
+    .equ FIQMODE,   0x11
+    .equ IRQMODE,   0x12
+    .equ SVCMODE,   0x13
+    .equ ABORTMODE, 0x17
+    .equ UNDEFMODE, 0x1b
+    .equ MODEMASK,  0x1f
+    .equ NOINT,     0xc0
 
 /* exception handlers */
-vector_undef:	bl rt_hw_trap_udef
-vector_swi:		bl rt_hw_trap_swi
-vector_pabt: 	bl rt_hw_trap_pabt
-vector_dabt:	bl rt_hw_trap_dabt
-vector_resv: 	bl rt_hw_trap_resv
+vector_undef:   bl rt_hw_trap_udef
+vector_swi:     bl rt_hw_trap_swi
+vector_pabt:    bl rt_hw_trap_pabt
+vector_dabt:    bl rt_hw_trap_dabt
+vector_resv:    bl rt_hw_trap_resv
 
 .globl rt_interrupt_enter
 .globl rt_interrupt_leave
 .globl rt_thread_switch_interrupt_flag
 .globl rt_interrupt_from_thread
 .globl rt_interrupt_to_thread
-vector_irq:	
-	stmfd	sp!, {r0-r12,lr}
-	bl  led_off
-	bl	rt_interrupt_enter
-	bl	rt_hw_trap_irq
-	bl	rt_interrupt_leave
-
-	/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
-	ldr	r0, =rt_thread_switch_interrupt_flag
-	ldr	r1, [r0]
-	cmp	r1, #1
-	beq	_interrupt_thread_switch
-
-	ldmfd	sp!, {r0-r12,lr}
-	subs	pc, lr, #4
-
-	.align	5
+vector_irq:
+    stmfd   sp!, {r0-r12,lr}
+    bl  led_off
+    bl  rt_interrupt_enter
+    bl  rt_hw_trap_irq
+    bl  rt_interrupt_leave
+
+    /* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
+    ldr r0, =rt_thread_switch_interrupt_flag
+    ldr r1, [r0]
+    cmp r1, #1
+    beq _interrupt_thread_switch
+
+    ldmfd   sp!, {r0-r12,lr}
+    subs    pc, lr, #4
+
+    .align  5
 vector_fiq:
-	stmfd sp!,{r0-r7,lr}
-	bl rt_hw_trap_fiq
-	ldmfd sp!,{r0-r7,lr}
-	subs pc,lr,#4
+    stmfd sp!,{r0-r7,lr}
+    bl rt_hw_trap_fiq
+    ldmfd sp!,{r0-r7,lr}
+    subs pc,lr,#4
 
 _interrupt_thread_switch:
-	mov	r1, #0				@ clear rt_thread_switch_interrupt_flag
-	str	r1, [r0]
-
-	ldmfd sp!, {r0-r12,lr}	@ reload saved registers
-	stmfd sp!, {r0-r3}		@ save r0-r3
-	mov	r1, sp
-	add	sp, sp, #16			@ restore sp
-	sub	r2, lr, #4			@ save old task's pc to r2
-
-	mrs	r3, spsr			@ disable interrupt
-	orr	r0, r3, #NOINT
-	msr	spsr_c, r0
-
-	ldr	r0,  =.+8			@ switch to interrupted task's stack
-	movs pc, r0
-
-	stmfd sp!, {r2}			@ push old task's pc
-	stmfd sp!, {r4-r12,lr}	@ push old task's lr,r12-r4
-	mov	r4, r1				@ Special optimised code below
-	mov	r5, r3
-	ldmfd r4!, {r0-r3}
-	stmfd sp!, {r0-r3}		@ push old task's r3-r0
-	stmfd sp!, {r5}			@ push old task's psr
-	mrs	r4, spsr
-	stmfd sp!, {r4}			@ push old task's spsr
-
-	ldr	r4, =rt_interrupt_from_thread
-	ldr	r5, [r4]
-	str	sp, [r5]			@ store sp in preempted tasks's TCB
-
-	ldr	r6, =rt_interrupt_to_thread
-	ldr	r6, [r6]
-	ldr	sp, [r6]			@ get new task's stack pointer
-
-	ldmfd sp!, {r4}			@ pop new task's spsr
-	msr	SPSR_cxsf, r4
-	ldmfd sp!, {r4}			@ pop new task's psr
-	msr CPSR_cxsf, r4
-
-	ldmfd sp!, {r0-r12,lr,pc}	@ pop new task's r0-r12,lr & pc
+    mov r1, #0              @ clear rt_thread_switch_interrupt_flag
+    str r1, [r0]
+
+    ldmfd sp!, {r0-r12,lr}  @ reload saved registers
+    stmfd sp!, {r0-r3}      @ save r0-r3
+    mov r1, sp
+    add sp, sp, #16         @ restore sp
+    sub r2, lr, #4          @ save old task's pc to r2
+
+    mrs r3, spsr            @ disable interrupt
+    orr r0, r3, #NOINT
+    msr spsr_c, r0
+
+    ldr r0,  =.+8           @ switch to interrupted task's stack
+    movs pc, r0
+
+    stmfd sp!, {r2}         @ push old task's pc
+    stmfd sp!, {r4-r12,lr}  @ push old task's lr,r12-r4
+    mov r4, r1              @ Special optimised code below
+    mov r5, r3
+    ldmfd r4!, {r0-r3}
+    stmfd sp!, {r0-r3}      @ push old task's r3-r0
+    stmfd sp!, {r5}         @ push old task's psr
+    mrs r4, spsr
+    stmfd sp!, {r4}         @ push old task's spsr
+
+    ldr r4, =rt_interrupt_from_thread
+    ldr r5, [r4]
+    str sp, [r5]            @ store sp in preempted tasks's TCB
+
+    ldr r6, =rt_interrupt_to_thread
+    ldr r6, [r6]
+    ldr sp, [r6]            @ get new task's stack pointer
+
+    ldmfd sp!, {r4}         @ pop new task's spsr
+    msr SPSR_cxsf, r4
+    ldmfd sp!, {r4}         @ pop new task's psr
+    msr CPSR_cxsf, r4
+
+    ldmfd sp!, {r0-r12,lr,pc}   @ pop new task's r0-r12,lr & pc
 
 /* each mode stack memory */
-UNDSTACK_START:	.word _undefined_stack_start + 128
-ABTSTACK_START:	.word _abort_stack_start + 128
-FIQSTACK_START:	.word _fiq_stack_start + 1024
-IRQSTACK_START:	.word _irq_stack_start + 1024
+UNDSTACK_START: .word _undefined_stack_start + 128
+ABTSTACK_START: .word _abort_stack_start + 128
+FIQSTACK_START: .word _fiq_stack_start + 1024
+IRQSTACK_START: .word _irq_stack_start + 1024
 SVCSTACK_START: .word _svc_stack_start + 4096
 
 stack_setup:
-	/* undefined instruction mode */
-	msr cpsr_c, #UNDEFMODE|NOINT
-	ldr sp, UNDSTACK_START
+    /* undefined instruction mode */
+    msr cpsr_c, #UNDEFMODE|NOINT
+    ldr sp, UNDSTACK_START
 
-	/* abort mode */
-	msr cpsr_c, #ABORTMODE|NOINT
-	ldr sp, ABTSTACK_START
+    /* abort mode */
+    msr cpsr_c, #ABORTMODE|NOINT
+    ldr sp, ABTSTACK_START
 
-	/* FIQ mode */
-	msr cpsr_c, #FIQMODE|NOINT
-	ldr sp, FIQSTACK_START
+    /* FIQ mode */
+    msr cpsr_c, #FIQMODE|NOINT
+    ldr sp, FIQSTACK_START
 
-	/* IRQ mode */
-	msr cpsr_c, #IRQMODE|NOINT
-	ldr sp, IRQSTACK_START
+    /* IRQ mode */
+    msr cpsr_c, #IRQMODE|NOINT
+    ldr sp, IRQSTACK_START
 
-	/* supervisor mode */
-	msr cpsr_c, #SVCMODE|NOINT
-	ldr sp, SVCSTACK_START
+    /* supervisor mode */
+    msr cpsr_c, #SVCMODE|NOINT
+    ldr sp, SVCSTACK_START
 
-	mov	pc,lr				@ The LR register may be not valid for the mode changes.
+    mov pc,lr               @ The LR register may be not valid for the mode changes.
 
 .globl led_on
 led_on:
-	ldr	r1,	=0x1d20014		@ r1<-PDATC
-	ldr	r0,	[r1]			@ r0<-[r1]
-	orr	r0,	r0, #0x0e		@ r0=r0 or 0x0e
-	str	r0,	[r1]			@ r0->[r1]
-	mov	pc, lr
+    ldr r1, =0x1d20014      @ r1<-PDATC
+    ldr r0, [r1]            @ r0<-[r1]
+    orr r0, r0, #0x0e       @ r0=r0 or 0x0e
+    str r0, [r1]            @ r0->[r1]
+    mov pc, lr
 
 .globl led_off
 led_off:
-	ldr	r1,	=0x1d20010		@ r1<-PCONC
-	ldr	r0,	=0x5f555555		@ r0<-0x5f555555
-	str	r0,	[r1]			@ r0->[r1]
+    ldr r1, =0x1d20010      @ r1<-PCONC
+    ldr r0, =0x5f555555     @ r0<-0x5f555555
+    str r0, [r1]            @ r0->[r1]
 
-	ldr	r1,	=0x1d20014		@ r1<-PDATC
-	ldr	r0,	=0x0			@ r0<-00
-	str	r0,	[r1]			@ r0->[r1]
+    ldr r1, =0x1d20014      @ r1<-PDATC
+    ldr r0, =0x0            @ r0<-00
+    str r0, [r1]            @ r0->[r1]
 
-	mov	pc, lr
+    mov pc, lr

+ 61 - 61
libcpu/arm/sep4020/context_rvds.S

@@ -1,5 +1,5 @@
 ;/*
-; * Copyright (c) 2006-2018, RT-Thread Development Team
+; * Copyright (c) 2006-2022, RT-Thread Development Team
 ; *
 ; * SPDX-License-Identifier: Apache-2.0
 ; *
@@ -8,96 +8,96 @@
 ; * 2009-01-20     Bernard      first version
 ; */
 
-NOINT	EQU		0xc0	; disable interrupt in psr
+NOINT   EQU     0xc0    ; disable interrupt in psr
 
-	AREA |.text|, CODE, READONLY, ALIGN=2
-	ARM
-	REQUIRE8
-	PRESERVE8
+    AREA |.text|, CODE, READONLY, ALIGN=2
+    ARM
+    REQUIRE8
+    PRESERVE8
 
 ;/*
 ; * rt_base_t rt_hw_interrupt_disable();
 ; */
-rt_hw_interrupt_disable	PROC
-	EXPORT rt_hw_interrupt_disable
-	MRS r0, cpsr
-	ORR r1, r0, #NOINT
-	MSR cpsr_c, r1
-	BX	lr
-	ENDP
+rt_hw_interrupt_disable PROC
+    EXPORT rt_hw_interrupt_disable
+    MRS r0, cpsr
+    ORR r1, r0, #NOINT
+    MSR cpsr_c, r1
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_interrupt_enable(rt_base_t level);
 ; */
-rt_hw_interrupt_enable	PROC
-	EXPORT rt_hw_interrupt_enable
-	MSR cpsr_c, r0
-	BX	lr
-	ENDP
+rt_hw_interrupt_enable  PROC
+    EXPORT rt_hw_interrupt_enable
+    MSR cpsr_c, r0
+    BX  lr
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; * r0 --> from
 ; * r1 --> to
 ; */
-rt_hw_context_switch	PROC
-	EXPORT rt_hw_context_switch
-	STMFD	sp!, {lr}			; push pc (lr should be pushed in place of PC)
-	STMFD	sp!, {r0-r12, lr}	; push lr & register file
+rt_hw_context_switch    PROC
+    EXPORT rt_hw_context_switch
+    STMFD   sp!, {lr}           ; push pc (lr should be pushed in place of PC)
+    STMFD   sp!, {r0-r12, lr}   ; push lr & register file
 
-	MRS		r4, cpsr
-	STMFD	sp!, {r4}			; push cpsr
-	MRS		r4, spsr
-	STMFD	sp!, {r4}			; push spsr
+    MRS     r4, cpsr
+    STMFD   sp!, {r4}           ; push cpsr
+    MRS     r4, spsr
+    STMFD   sp!, {r4}           ; push spsr
 
-	STR	sp, [r0]				; store sp in preempted tasks TCB
-	LDR	sp, [r1]				; get new task stack pointer
+    STR sp, [r0]                ; store sp in preempted tasks TCB
+    LDR sp, [r1]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
 ; */
-rt_hw_context_switch_to	PROC
-	EXPORT rt_hw_context_switch_to
-	LDR	sp, [r0]				; get new task stack pointer
+rt_hw_context_switch_to PROC
+    EXPORT rt_hw_context_switch_to
+    LDR sp, [r0]                ; get new task stack pointer
 
-	LDMFD	sp!, {r4}			; pop new task spsr
-	MSR	spsr_cxsf, r4
-	LDMFD	sp!, {r4}			; pop new task cpsr
-	MSR	cpsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task spsr
+    MSR spsr_cxsf, r4
+    LDMFD   sp!, {r4}           ; pop new task cpsr
+    MSR cpsr_cxsf, r4
 
-	LDMFD	sp!, {r0-r12, lr, pc}	; pop new task r0-r12, lr & pc
-	ENDP
+    LDMFD   sp!, {r0-r12, lr, pc}   ; pop new task r0-r12, lr & pc
+    ENDP
 
 ;/*
 ; * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
 ; */
-	IMPORT rt_thread_switch_interrupt_flag
-	IMPORT rt_interrupt_from_thread
-	IMPORT rt_interrupt_to_thread
+    IMPORT rt_thread_switch_interrupt_flag
+    IMPORT rt_interrupt_from_thread
+    IMPORT rt_interrupt_to_thread
 
-rt_hw_context_switch_interrupt	PROC
-	EXPORT rt_hw_context_switch_interrupt
-	LDR r2, =rt_thread_switch_interrupt_flag
-	LDR r3, [r2]
-	CMP r3, #1
-	BEQ _reswitch
-	MOV r3, #1							; set rt_thread_switch_interrupt_flag to 1
-	STR r3, [r2]
-	LDR r2, =rt_interrupt_from_thread	; set rt_interrupt_from_thread
-	STR r0, [r2]
+rt_hw_context_switch_interrupt  PROC
+    EXPORT rt_hw_context_switch_interrupt
+    LDR r2, =rt_thread_switch_interrupt_flag
+    LDR r3, [r2]
+    CMP r3, #1
+    BEQ _reswitch
+    MOV r3, #1                          ; set rt_thread_switch_interrupt_flag to 1
+    STR r3, [r2]
+    LDR r2, =rt_interrupt_from_thread   ; set rt_interrupt_from_thread
+    STR r0, [r2]
 _reswitch
-	LDR r2, =rt_interrupt_to_thread		; set rt_interrupt_to_thread
-	STR r1, [r2]
-	BX	lr
-	ENDP
+    LDR r2, =rt_interrupt_to_thread     ; set rt_interrupt_to_thread
+    STR r1, [r2]
+    BX  lr
+    ENDP
 
-	END
+    END

+ 4 - 4
libcpu/arm/zynqmp-r5/context_gcc.S

@@ -1,11 +1,11 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Change Logs:
  * Date           Author       Notes
- * 2020-03-19 	  WangHuachen  first version
+ * 2020-03-19     WangHuachen  first version
  */
 
 .section .text, "ax"
@@ -35,7 +35,7 @@ rt_hw_context_switch_to:
     ldr sp, [r0]            @ get new task stack pointer
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
-    ldmfd sp!, {r1}				/* Restore floating point registers */
+    ldmfd sp!, {r1}             /* Restore floating point registers */
     vmsr FPEXC, r1
     ldmfd sp!, {r1}
     vmsr FPSCR, r1
@@ -79,7 +79,7 @@ _ARM_MODE:
     ldr sp, [r1]            @ get new task stack pointer
 
 #if defined (__VFP_FP__) && !defined(__SOFTFP__)
-    ldmfd sp!, {r1}				/* Restore floating point registers */
+    ldmfd sp!, {r1}             /* Restore floating point registers */
     vmsr FPEXC, r1
     ldmfd sp!, {r1}
     vmsr FPSCR, r1

+ 37 - 37
libcpu/arm/zynqmp-r5/start_gcc.S

@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
  * Change Logs:
  * Date           Author       Notes
- * 2020-03-19 	  WangHuachen  first version
- * 2021-05-11 	  WangHuachen  Added call to Xil_InitializeExistingMPURegConfig to
+ * 2020-03-19     WangHuachen  first version
+ * 2021-05-11     WangHuachen  Added call to Xil_InitializeExistingMPURegConfig to
  *                             initialize the MPU configuration table with the MPU
  *                             configurations already set in Init_Mpu function.
  */
@@ -59,19 +59,19 @@ stack_top:
 _reset:
 
     /* Initialize processor registers to 0 */
-    mov	r0,#0
-    mov	r1,#0
-    mov	r2,#0
-    mov	r3,#0
-    mov	r4,#0
-    mov	r5,#0
-    mov	r6,#0
-    mov	r7,#0
-    mov	r8,#0
-    mov	r9,#0
-    mov	r10,#0
-    mov	r11,#0
-    mov	r12,#0
+    mov r0,#0
+    mov r1,#0
+    mov r2,#0
+    mov r3,#0
+    mov r4,#0
+    mov r5,#0
+    mov r6,#0
+    mov r7,#0
+    mov r8,#0
+    mov r9,#0
+    mov r10,#0
+    mov r11,#0
+    mov r12,#0
 
     /* set the cpu to SVC32 mode and disable interrupt */
     cpsid  if, #Mode_SVC
@@ -83,18 +83,18 @@ _reset:
     * Enable access to VFP by enabling access to Coprocessors 10 and 11.
     * Enables Full Access i.e. in both privileged and non privileged modes
     */
-    mrc     p15, 0, r0, c1, c0, 2      	/* Read Coprocessor Access Control Register (CPACR) */
-    orr     r0, r0, #(0xF << 20)       	/* Enable access to CP 10 & 11 */
-    mcr     p15, 0, r0, c1, c0, 2      	/* Write Coprocessor Access Control Register (CPACR) */
+    mrc     p15, 0, r0, c1, c0, 2       /* Read Coprocessor Access Control Register (CPACR) */
+    orr     r0, r0, #(0xF << 20)        /* Enable access to CP 10 & 11 */
+    mcr     p15, 0, r0, c1, c0, 2       /* Write Coprocessor Access Control Register (CPACR) */
     isb
 
     /* enable fpu access  */
     vmrs    r3, FPEXC
-    orr	r1, r3, #(1<<30)
+    orr r1, r3, #(1<<30)
     vmsr    FPEXC, r1
 
     /* clear the floating point register*/
-    mov	    r1,#0
+    mov     r1,#0
     vmov    d0,r1,r1
     vmov    d1,r1,r1
     vmov    d2,r1,r1
@@ -114,16 +114,16 @@ _reset:
 
 #ifdef __SOFTFP__
 /* Disable the FPU if SOFTFP is defined*/
-    vmsr	FPEXC,r3
+    vmsr    FPEXC,r3
 #endif
 
     /* Disable MPU and caches */
-    mrc     p15, 0, r0, c1, c0, 0       	/* Read CP15 Control Register*/
-    bic     r0, r0, #0x05               	/* Disable MPU (M bit) and data cache (C bit) */
-    bic     r0, r0, #0x1000             	/* Disable instruction cache (I bit) */
-    dsb                                 	/* Ensure all previous loads/stores have completed */
-    mcr     p15, 0, r0, c1, c0, 0       	/* Write CP15 Control Register */
-    isb                                 	/* Ensure subsequent insts execute wrt new MPU settings */
+    mrc     p15, 0, r0, c1, c0, 0           /* Read CP15 Control Register*/
+    bic     r0, r0, #0x05                   /* Disable MPU (M bit) and data cache (C bit) */
+    bic     r0, r0, #0x1000                 /* Disable instruction cache (I bit) */
+    dsb                                     /* Ensure all previous loads/stores have completed */
+    mcr     p15, 0, r0, c1, c0, 0           /* Write CP15 Control Register */
+    isb                                     /* Ensure subsequent insts execute wrt new MPU settings */
 
     /* Disable Branch prediction, TCM ECC checks */
     mrc     p15, 0, r0, c1, c0, 1           /* Read ACTLR */
@@ -173,14 +173,14 @@ _reset:
     nop
 
 init:
-    bl 	Init_MPU                            /* Initialize MPU */
+    bl  Init_MPU                            /* Initialize MPU */
 
     /* Enable Branch prediction */
     mrc     p15, 0, r0, c1, c0, 1       /* Read ACTLR*/
     bic     r0, r0, #(0x1 << 17)        /* Clear RSDIS bit 17 to enable return stack*/
     bic     r0, r0, #(0x1 << 16)        /* Clear BP bit 15 and BP bit 16:*/
     bic     r0, r0, #(0x1 << 15)        /* Normal operation, BP is taken from the global history table.*/
-    orr     r0, r0, #(0x1 << 14)	    /* Disable DBWR for errata 780125 */
+    orr     r0, r0, #(0x1 << 14)        /* Disable DBWR for errata 780125 */
     mcr     p15, 0, r0, c1, c0, 1       /* Write ACTLR*/
 
     /* Enable icahce and dcache */
@@ -189,7 +189,7 @@ init:
     orr     r1,r1,r0
     dsb
     mcr     p15,0,r1,c1,c0,0            /* Enable cache  */
-    isb                                 /* isb	flush prefetch buffer */
+    isb                                 /* isb  flush prefetch buffer */
 
     /* Set vector table in TCM/LOVEC */
     mrc     p15, 0, r0, c1, c0, 0
@@ -237,7 +237,7 @@ ctor_loop:
     b       ctor_loop
 ctor_end:
 
-    bl 	Xil_InitializeExistingMPURegConfig	/* Initialize MPU config */
+    bl  Xil_InitializeExistingMPURegConfig  /* Initialize MPU config */
     /* start RT-Thread Kernel       */
     ldr     pc, _entry
 
@@ -393,35 +393,35 @@ rt_hw_context_switch_interrupt_do:
 .endm
 
     .align  5
-    .globl	vector_swi
+    .globl  vector_swi
 vector_swi:
     push_svc_reg
     bl      rt_hw_trap_swi
     b       .
 
     .align  5
-    .globl	vector_undef
+    .globl  vector_undef
 vector_undef:
     push_svc_reg
     bl      rt_hw_trap_undef
     b       .
 
     .align  5
-    .globl	vector_pabt
+    .globl  vector_pabt
 vector_pabt:
     push_svc_reg
     bl      rt_hw_trap_pabt
     b       .
 
     .align  5
-    .globl	vector_dabt
+    .globl  vector_dabt
 vector_dabt:
     push_svc_reg
     bl      rt_hw_trap_dabt
     b       .
 
     .align  5
-    .globl	vector_resv
+    .globl  vector_resv
 vector_resv:
     push_svc_reg
     bl      rt_hw_trap_resv

+ 2 - 2
libcpu/arm/zynqmp-r5/vector_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -48,4 +48,4 @@ _vector_irq:
 _vector_fiq:
     .word vector_fiq
 
-.balignl 	16,0xdeadbeef
+.balignl    16,0xdeadbeef

+ 36 - 36
libcpu/avr32/uc3/context_gcc.S

@@ -12,8 +12,8 @@
  * 2010-03-27     Kyle         First version
  */
 
-#define AVR32_SR			0
-#define AVR32_SR_GM_OFFSET	16
+#define AVR32_SR            0
+#define AVR32_SR_GM_OFFSET  16
 
 .text
 
@@ -23,8 +23,8 @@
 .globl rt_hw_interrupt_disable
 .type rt_hw_interrupt_disable, %function
 rt_hw_interrupt_disable:
-	ssrf	AVR32_SR_GM_OFFSET
-	mov		pc, lr
+    ssrf    AVR32_SR_GM_OFFSET
+    mov     pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level)
@@ -32,8 +32,8 @@ rt_hw_interrupt_disable:
 .globl rt_hw_interrupt_enable
 .type rt_hw_interrupt_enable, %function
 rt_hw_interrupt_enable:
-	csrf	AVR32_SR_GM_OFFSET
-	mov		pc, lr
+    csrf    AVR32_SR_GM_OFFSET
+    mov     pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to)/*
@@ -43,22 +43,22 @@ rt_hw_interrupt_enable:
 .globl rt_hw_context_switch
 .type rt_hw_context_switch, %function
 rt_hw_context_switch:
-	ssrf	AVR32_SR_GM_OFFSET	/* Disable global interrupt */
-	stm		--sp, r8-r12, lr	/* Push R8-R12, LR */
-	st.w	--sp, lr			/* Push LR (instead of PC) */
-	mfsr	r8, AVR32_SR		/* Read Status Register */
-	cbr		r8, AVR32_SR_GM_OFFSET	/* Clear GM bit */
-	st.w	--sp, r8			/* Push SR */
-	stm		--sp, r0-r7			/* Push R0-R7 */
-								/* Stack layout: R8-R12, LR, PC, SR, R0-R7 */
+    ssrf    AVR32_SR_GM_OFFSET  /* Disable global interrupt */
+    stm     --sp, r8-r12, lr    /* Push R8-R12, LR */
+    st.w    --sp, lr            /* Push LR (instead of PC) */
+    mfsr    r8, AVR32_SR        /* Read Status Register */
+    cbr     r8, AVR32_SR_GM_OFFSET  /* Clear GM bit */
+    st.w    --sp, r8            /* Push SR */
+    stm     --sp, r0-r7         /* Push R0-R7 */
+                                /* Stack layout: R8-R12, LR, PC, SR, R0-R7 */
 
-	st.w	r12[0], sp			/* Store SP in preempted tasks TCB */
-	ld.w	sp, r11[0]			/* Get new task stack pointer */
+    st.w    r12[0], sp          /* Store SP in preempted tasks TCB */
+    ld.w    sp, r11[0]          /* Get new task stack pointer */
 
-	ldm		sp++, r0-r7			/* pop R0-R7 */
-	ld.w	r8, sp++			/* pop SR */
-	mtsr	AVR32_SR, r8		/* Restore SR */
-	ldm		sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume to thread */
+    ldm     sp++, r0-r7         /* pop R0-R7 */
+    ld.w    r8, sp++            /* pop SR */
+    mtsr    AVR32_SR, r8        /* Restore SR */
+    ldm     sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume to thread */
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to)/*
@@ -67,12 +67,12 @@ rt_hw_context_switch:
 .globl rt_hw_context_switch_to
 .type rt_hw_context_switch_to, %function
 rt_hw_context_switch_to:
-	ld.w	sp, r12[0]			/* Get new task stack pointer */
+    ld.w    sp, r12[0]          /* Get new task stack pointer */
 
-	ldm		sp++, r0-r7			/* pop R0-R7 */
-	ld.w	r8, sp++			/* pop SR */
-	mtsr	AVR32_SR, r8		/* Restore SR */
-	ldm		sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume execution */
+    ldm     sp++, r0-r7         /* pop R0-R7 */
+    ld.w    r8, sp++            /* pop SR */
+    mtsr    AVR32_SR, r8        /* Restore SR */
+    ldm     sp++, r8-r12, lr, pc/* Pop R8-R12, LR, PC and resume execution */
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to)/*
@@ -83,15 +83,15 @@ rt_hw_context_switch_to:
 .globl rt_hw_context_switch_interrupt
 .type rt_hw_context_switch_interrupt, %function
 rt_hw_context_switch_interrupt:
-	lda.w	r8, rt_thread_switch_interrupt_flag
-	ld.w	r9, r8[0]
-	cp.w	r9, 1
-	breq	_reswitch
-	mov		r9, 1
-	st.w	r8[0], r9
-	lda.w	r8, rt_interrupt_from_thread
-	st.w	r8[0], r12
+    lda.w   r8, rt_thread_switch_interrupt_flag
+    ld.w    r9, r8[0]
+    cp.w    r9, 1
+    breq    _reswitch
+    mov     r9, 1
+    st.w    r8[0], r9
+    lda.w   r8, rt_interrupt_from_thread
+    st.w    r8[0], r12
 _reswitch:
-	lda.w	r8, rt_interrupt_to_thread
-	st.w	r8[0], r11
-	mov		pc, lr
+    lda.w   r8, rt_interrupt_to_thread
+    st.w    r8[0], r11
+    mov     pc, lr

+ 43 - 43
libcpu/ia32/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -19,63 +19,63 @@
  */
 .globl rt_hw_context_switch
 rt_hw_context_switch:
-	pushfl					/*pushed eflags*/
+    pushfl                  /*pushed eflags*/
 /*
- *	add by ssslady@gmail.com 2009-10-14
+ *  add by ssslady@gmail.com 2009-10-14
  *      When we return again the esp should no be change.
- * 	The old code change the esp to esp-4 :-(.
- *	A protection fault maybe occure for img created by some compiler,eg.gcc in the fedor-11
+ *  The old code change the esp to esp-4 :-(.
+ *  A protection fault maybe occure for img created by some compiler,eg.gcc in the fedor-11
  *      -------------------------------------------------------------------------
- *	entry			old code			new code
- *	EIP	->return esp	EIP				FLAGS ->return esp
- *	...			FLAGS    ->retern esp		CS
- *				CS				EIP
- *				EIP
+ *  entry           old code            new code
+ *  EIP ->return esp    EIP             FLAGS ->return esp
+ *  ...         FLAGS    ->retern esp       CS
+ *              CS              EIP
+ *              EIP
  */
-	popl %eax	/*get flags*/
-	popl %ebx	/*get eip*/
-	pushl %eax	/*push flags*/
-	push %cs	/*push cs*/
-	pushl %ebx	/*push eip*/
-	
+    popl %eax   /*get flags*/
+    popl %ebx   /*get eip*/
+    pushl %eax  /*push flags*/
+    push %cs    /*push cs*/
+    pushl %ebx  /*push eip*/
+
 /*-------------------------------------------------------------------
  */
 
-	/*push %cs*/				/*push cs register*/
-	/*pushl 0x8(%esp)*/			/*pushed eip register*/
+    /*push %cs*/                /*push cs register*/
+    /*pushl 0x8(%esp)*/         /*pushed eip register*/
+
+    pushl $0                /*fill irqno*/
+    push %ds                /*push ds register*/
+    push %es                /*push es register*/
+    pushal                  /*push eax,ecx,edx,ebx,esp,ebp,esp,edi registers*/
 
-	pushl $0				/*fill irqno*/
-	push %ds				/*push ds register*/
-	push %es				/*push es register*/
-	pushal					/*push eax,ecx,edx,ebx,esp,ebp,esp,edi registers*/
-	
-	/*movl 0x40(%esp), %eax*/	/*to thread TCB*/
-	/*movl 0x3c(%esp), %ebx*/	/*from thread TCB*/
-	movl 0x3c(%esp), %eax	/*to thread TCB*/
-	movl 0x38(%esp), %ebx	/*from thread TCB*/
+    /*movl 0x40(%esp), %eax*/   /*to thread TCB*/
+    /*movl 0x3c(%esp), %ebx*/   /*from thread TCB*/
+    movl 0x3c(%esp), %eax   /*to thread TCB*/
+    movl 0x38(%esp), %ebx   /*from thread TCB*/
 
-	movl %esp, (%ebx)		/*store esp in preempted tasks TCB*/
-	movl (%eax), %esp		/*get new task stack pointer*/
+    movl %esp, (%ebx)       /*store esp in preempted tasks TCB*/
+    movl (%eax), %esp       /*get new task stack pointer*/
 
-	popal					/*restore new task TCB*/
-	pop %es
-	pop %ds
-	add $4,%esp				/*skip irqno*/
-	iret
+    popal                   /*restore new task TCB*/
+    pop %es
+    pop %ds
+    add $4,%esp             /*skip irqno*/
+    iret
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	push %ebp
-	movl %esp, %ebp
+    push %ebp
+    movl %esp, %ebp
 
-	movl 0x8(%ebp), %eax	/* to thread TCB */
-	movl (%eax), %esp		/* get new task stack pointer */
+    movl 0x8(%ebp), %eax    /* to thread TCB */
+    movl (%eax), %esp       /* get new task stack pointer */
 
-	popal					/* restore new task TCB*/
-	pop %es
-	pop %ds
-	add $4, %esp			/* skip irqno */
-	iret
+    popal                   /* restore new task TCB*/
+    pop %es
+    pop %ds
+    add $4, %esp            /* skip irqno */
+    iret

+ 61 - 61
libcpu/ia32/hdisr_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -14,23 +14,23 @@
 /*@{*/
 
 #define ENTRY(proc)\
-	.align 2;\
-	.globl proc;\
-	.type proc,@function;\
-	proc:
+    .align 2;\
+    .globl proc;\
+    .type proc,@function;\
+    proc:
 #define HDINTERRUPTFNC(name,num) \
-	ENTRY(name)\
-	pushl $(num);\
-	jmp _hdinterrupts;\
-	.data;\
-	.long name;\
-	.text
+    ENTRY(name)\
+    pushl $(num);\
+    jmp _hdinterrupts;\
+    .data;\
+    .long name;\
+    .text
 
 .globl hdinterrupt_func
-	.data
-	.align 4
-	.type hdinterrupt_func,@object
-	hdinterrupt_func :
+    .data
+    .align 4
+    .type hdinterrupt_func,@object
+    hdinterrupt_func :
 .text
 
 /* the external device interrupts */
@@ -62,57 +62,57 @@ HDINTERRUPTFNC(irq15, 15)
 .globl rt_interrupt_to_thread
 
 _hdinterrupts:
-	push %ds
-	push %es
-	pushal
-	movw $0x10, %ax
-	movw %ax, %ds
-	movw %ax, %es
-	pushl %esp
-
-	call rt_interrupt_enter
-
-	movl %esp, %eax	      /* copy esp to eax */
-	addl $0x2c, %eax      /* move to vector address */
-	movl (%eax), %eax     /* vector(eax) = *eax */
-
-	pushl %eax            /* push argument : int vector */
-	call rt_hw_isr
-	add $4, %esp          /* restore argument */
-
-	call rt_interrupt_leave
-
-	/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
-	movl $rt_thread_switch_interrupt_flag, %eax
-	movl (%eax), %ebx
-	cmp $0x1, %ebx
-	jz _interrupt_thread_switch
-
-	popl %esp
-	popal
-	pop %es
-	pop %ds
-	add $4,%esp
-	iret
+    push %ds
+    push %es
+    pushal
+    movw $0x10, %ax
+    movw %ax, %ds
+    movw %ax, %es
+    pushl %esp
+
+    call rt_interrupt_enter
+
+    movl %esp, %eax       /* copy esp to eax */
+    addl $0x2c, %eax      /* move to vector address */
+    movl (%eax), %eax     /* vector(eax) = *eax */
+
+    pushl %eax            /* push argument : int vector */
+    call rt_hw_isr
+    add $4, %esp          /* restore argument */
+
+    call rt_interrupt_leave
+
+    /* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
+    movl $rt_thread_switch_interrupt_flag, %eax
+    movl (%eax), %ebx
+    cmp $0x1, %ebx
+    jz _interrupt_thread_switch
+
+    popl %esp
+    popal
+    pop %es
+    pop %ds
+    add $4,%esp
+    iret
 
 _interrupt_thread_switch:
-	popl %esp
+    popl %esp
 
-	movl $0x0, %ebx
-	movl %ebx, (%eax)
+    movl $0x0, %ebx
+    movl %ebx, (%eax)
 
-	movl $rt_interrupt_from_thread, %eax
-	movl (%eax), %ebx
-	movl %esp, (%ebx)
+    movl $rt_interrupt_from_thread, %eax
+    movl (%eax), %ebx
+    movl %esp, (%ebx)
 
-	movl $rt_interrupt_to_thread, %ecx
-	movl (%ecx), %edx
-	movl (%edx), %esp
+    movl $rt_interrupt_to_thread, %ecx
+    movl (%ecx), %edx
+    movl (%edx), %esp
 
-	popal
-	pop %es
-	pop %ds
-	add $4,%esp
-	iret
+    popal
+    pop %es
+    pop %ds
+    add $4,%esp
+    iret
 
 /*@}*/

+ 44 - 44
libcpu/ia32/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -10,10 +10,10 @@
  */
 
 /* the magic number for the multiboot header.  */
-#define MULTIBOOT_HEADER_MAGIC		0x1BADB002
+#define MULTIBOOT_HEADER_MAGIC      0x1BADB002
 
 /* the flags for the multiboot header.  */
-#define MULTIBOOT_HEADER_FLAGS		0x00000003
+#define MULTIBOOT_HEADER_FLAGS      0x00000003
 
 #define CONFIG_STACKSIZE            8192
 
@@ -27,67 +27,67 @@
 /* the system entry */
 .globl _start
 _start:
-	jmp	multiboot_entry
+    jmp multiboot_entry
 
-	/* Align 32 bits boundary.  */
-	.align	4
+    /* Align 32 bits boundary.  */
+    .align  4
 
-	/* multiboot header.  */
+    /* multiboot header.  */
 multiboot_header:
-	/* magic */
-	.long	MULTIBOOT_HEADER_MAGIC
-	/* flags */
-	.long	MULTIBOOT_HEADER_FLAGS
-	/* checksum */
-	.long	-(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
+    /* magic */
+    .long   MULTIBOOT_HEADER_MAGIC
+    /* flags */
+    .long   MULTIBOOT_HEADER_FLAGS
+    /* checksum */
+    .long   -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
 
 multiboot_entry:
-	movl	$(_end + 0x1000),%esp
+    movl    $(_end + 0x1000),%esp
 
-	/* reset eflags.  */
-	pushl	$0
-	popf
+    /* reset eflags.  */
+    pushl   $0
+    popf
 
-	/* rebuild globe describe table */
-	lgdt	__gdtdesc
+    /* rebuild globe describe table */
+    lgdt    __gdtdesc
 
-	movl	$0x10,%eax
-	movw	%ax,%ds
-	movw	%ax,%es
-	movw	%ax,%ss
-	ljmp	$0x08, $relocated
+    movl    $0x10,%eax
+    movw    %ax,%ds
+    movw    %ax,%es
+    movw    %ax,%ss
+    ljmp    $0x08, $relocated
 
 relocated:
-	/* push the pointer to the multiboot information structure.  */
-	pushl	%ebx
+    /* push the pointer to the multiboot information structure.  */
+    pushl   %ebx
 
-	/* push the magic value.  */
-	pushl	%eax
+    /* push the magic value.  */
+    pushl   %eax
 
-	call	rtthread_startup
+    call    rtthread_startup
 
-	/* never get here */
+    /* never get here */
 spin:
-	hlt
-	jmp	spin
+    hlt
+    jmp spin
 
 .data
-.p2align	2
+.p2align    2
 __gdt:
-	.word	0,0,0,0
+    .word   0,0,0,0
 
-	.word	0x07FF		/* 8Mb - limit=2047 */
-	.word	0x0000
-	.word	0x9A00		/* code read/exec */
-	.word	0x00C0
+    .word   0x07FF      /* 8Mb - limit=2047 */
+    .word   0x0000
+    .word   0x9A00      /* code read/exec */
+    .word   0x00C0
 
-	.word	0x07FF		/* 8Mb - limit=2047 */
-	.word	0x0000
-	.word	0x9200		/* data read/write */
-	.word	0x00C0
+    .word   0x07FF      /* 8Mb - limit=2047 */
+    .word   0x0000
+    .word   0x9200      /* data read/write */
+    .word   0x00C0
 
 __gdtdesc:
-	.word	0x17
-	.long	__gdt
+    .word   0x17
+    .long   __gdt
 
 /*@}*/

+ 45 - 45
libcpu/ia32/trapisr_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -14,33 +14,33 @@
 /*@{*/
 
 #define ENTRY(proc)\
-	.align 2;\
-	.globl proc;\
-	.type proc,@function;\
-	proc:
+    .align 2;\
+    .globl proc;\
+    .type proc,@function;\
+    proc:
 
 #define TRAPFNC(name,num)\
-	ENTRY(name)\
-	pushl $(num);\
-	jmp _traps;\
-	.data;\
-	.long name;\
-	.text
+    ENTRY(name)\
+    pushl $(num);\
+    jmp _traps;\
+    .data;\
+    .long name;\
+    .text
 
 #define TRAPFNC_NOEC(name,num)\
-	ENTRY(name)\
-	pushl $0;\
-	pushl $(num);\
-	jmp _traps;\
-	.data;\
-	.long name;\
-	.text
+    ENTRY(name)\
+    pushl $0;\
+    pushl $(num);\
+    jmp _traps;\
+    .data;\
+    .long name;\
+    .text
 
 .globl trap_func
-	.data
-	.align 4
-	.type trap_func,@object
-	trap_func :
+    .data
+    .align 4
+    .type trap_func,@object
+    trap_func :
 .text
 
 /* CPU traps */
@@ -65,32 +65,32 @@ TRAPFNC (Xalign,  15)
 TRAPFNC (Xdefault, 500)
 
 .p2align 4,0x90
-.globl	_traps
-.type	_traps,@function
+.globl  _traps
+.type   _traps,@function
 .globl rt_interrupt_enter
 .globl rt_interrupt_leave
 
 _traps:
-	push %ds
-	push %es
-	pushal
-	movw $0x10,%ax
-	movw %ax,%ds
-	movw %ax,%es
-	pushl %esp
-	call rt_interrupt_enter
-	movl %esp, %eax		
-	addl $0x2c,%eax		/*get trapno*/
-	movl (%eax),%eax
-	pushl %eax			/*push trapno*/
-	call rt_hw_trap_irq
-	addl $4,%esp
-	call rt_interrupt_leave
-	popl %esp
-	popal
-	pop %es
-	pop %ds
-	add $8,%esp
-	iret
+    push %ds
+    push %es
+    pushal
+    movw $0x10,%ax
+    movw %ax,%ds
+    movw %ax,%es
+    pushl %esp
+    call rt_interrupt_enter
+    movl %esp, %eax
+    addl $0x2c,%eax     /*get trapno*/
+    movl (%eax),%eax
+    pushl %eax          /*push trapno*/
+    call rt_hw_trap_irq
+    addl $4,%esp
+    call rt_interrupt_leave
+    popl %esp
+    popal
+    pop %es
+    pop %ds
+    add $8,%esp
+    iret
 
 /*@}*/

+ 9 - 9
libcpu/m16c/m16c62p/context_gcc.S

@@ -11,32 +11,32 @@
  * Date           Author       Notes
  * 2010-04-09     fify         the first version
  * 2010-04-19     fify         rewrite rt_hw_interrupt_disable/enable fuction
- * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34 
+ * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34
  */
 
     .section .text
-  
+
     .globl _rt_interrupt_from_thread
     .globl _rt_interrupt_to_thread
-    .global	_os_context_switch
-    .type	_os_context_switch, @function
+    .global _os_context_switch
+    .type   _os_context_switch, @function
 _os_context_switch:
     PUSHM   R0,R1,R2,R3,A0,A1,SB,FB
-    
+
     MOV.W   _rt_interrupt_from_thread, A0
     STC     ISP, [A0]
     MOV.W   _rt_interrupt_to_thread, A0
     LDC     [A0], ISP
 
     POPM    R0,R1,R2,R3,A0,A1,SB,FB             ; Restore registers from the new task's stack
-    REIT                                        ; Return from interrup    
-    
+    REIT                                        ; Return from interrup
+
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
  * this fucntion is used to perform the first thread switch
  */
-    .global	_rt_hw_context_switch_to
-    .type	_rt_hw_context_switch_to, @function
+    .global _rt_hw_context_switch_to
+    .type   _rt_hw_context_switch_to, @function
 _rt_hw_context_switch_to:
     ENTER   #0x0
     MOV.W   0x5[FB], A0

+ 5 - 5
libcpu/m16c/m16c62p/context_iar.S

@@ -11,7 +11,7 @@
  * Date           Author       Notes
  * 2010-04-09     fify         the first version
  * 2010-04-19     fify         rewrite rt_hw_interrupt_disable/enable fuction
- * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34 
+ * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34
  *
  * For       : Renesas M16C
  * Toolchain : IAR's EW for M16C v3.401
@@ -39,19 +39,19 @@ rt_hw_interrupt_disable:
 rt_hw_interrupt_enable:
     LDC     R0, FLG    ;fify 20100419
     RTS
-    
+
     .EVEN
 os_context_switch:
     PUSHM   R0,R1,R2,R3,A0,A1,SB,FB
-    
+
     MOV.W   rt_interrupt_from_thread, A0
     STC     ISP, [A0]
     MOV.W   rt_interrupt_to_thread, A0
     LDC     [A0], ISP
 
     POPM    R0,R1,R2,R3,A0,A1,SB,FB             ; Restore registers from the new task's stack
-    REIT                                        ; Return from interrup    
-    
+    REIT                                        ; Return from interrup
+
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
  * r0 --> to

+ 5 - 5
libcpu/m16c/m16c62p/context_iar.asm

@@ -11,7 +11,7 @@
  * Date           Author       Notes
  * 2010-04-09     fify         the first version
  * 2010-04-19     fify         rewrite rt_hw_interrupt_disable/enable fuction
- * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34 
+ * 2010-04-20     fify         move peripheral ISR to bsp/interrupts.s34
  *
  * For       : Renesas M16C
  * Toolchain : IAR's EW for M16C v3.401
@@ -39,19 +39,19 @@ rt_hw_interrupt_disable:
 rt_hw_interrupt_enable:
     LDC     R0, FLG    ;fify 20100419
     RTS
-    
+
     .EVEN
 os_context_switch:
     PUSHM   R0,R1,R2,R3,A0,A1,SB,FB
-    
+
     MOV.W   rt_interrupt_from_thread, A0
     STC     ISP, [A0]
     MOV.W   rt_interrupt_to_thread, A0
     LDC     [A0], ISP
 
     POPM    R0,R1,R2,R3,A0,A1,SB,FB             ; Restore registers from the new task's stack
-    REIT                                        ; Return from interrup    
-    
+    REIT                                        ; Return from interrup
+
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
  * r0 --> to

+ 2 - 2
libcpu/mips/common/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2020, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -84,7 +84,7 @@ mips_irq_handle:
     jal     rt_interrupt_enter
     nop
     /* Get Old SP from k0 as paremeter in a0 */
-    move	a0, k0
+    move    a0, k0
     jal     rt_general_exc_dispatch
     nop
     jal     rt_interrupt_leave

+ 16 - 16
libcpu/mips/common/entry_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2019, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -28,37 +28,37 @@ _rtthread_entry:
     .globl  _start
 _start:
 #endif
-    PTR_LA	ra, _rtthread_entry
+    PTR_LA  ra, _rtthread_entry
 
     /* disable interrupt */
-    MTC0	zero, CP0_CAUSE
-    MTC0	zero, CP0_STATUS	# Set CPU to disable interrupt.
+    MTC0    zero, CP0_CAUSE
+    MTC0    zero, CP0_STATUS    # Set CPU to disable interrupt.
     ehb
 
 #ifdef ARCH_MIPS64
-    dli		t0, ST0_KX
-    MTC0	t0, CP0_STATUS
+    dli     t0, ST0_KX
+    MTC0    t0, CP0_STATUS
 #endif
 
     /* setup stack pointer */
-    PTR_LA	sp, _system_stack
-    PTR_LA	gp, _gp
+    PTR_LA  sp, _system_stack
+    PTR_LA  gp, _gp
 
-    bal	rt_cpu_early_init
+    bal rt_cpu_early_init
     nop
 
     /* clear bss */
-    PTR_LA	t0, __bss_start
-    PTR_LA	t1, __bss_end
+    PTR_LA  t0, __bss_start
+    PTR_LA  t1, __bss_end
 
 _clr_bss_loop:
-    sw	zero, 0(t0)
-    bne	t1, t0, _clr_bss_loop
-    addu	t0, 4
+    sw  zero, 0(t0)
+    bne t1, t0, _clr_bss_loop
+    addu    t0, 4
     /* jump to RT-Thread RTOS */
-    jal	rtthread_startup
+    jal rtthread_startup
     nop
 
     /* restart, never die */
-    j	_start
+    j   _start
     nop

+ 17 - 17
libcpu/mips/common/exception_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2020, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -21,50 +21,50 @@
 
     /* 0x0 - TLB refill handler */
     .global tlb_refill_exception
-    .type	tlb_refill_exception,@function
+    .type   tlb_refill_exception,@function
 ebase_start:
 tlb_refill_exception:
-    b	_general_exception_handler
+    b   _general_exception_handler
     nop
 
     /* 0x080 - XTLB refill handler */
     .org ebase_start + 0x080
-    b	_general_exception_handler
+    b   _general_exception_handler
     nop
 
     /* 0x100 - Cache error handler */
     .org ebase_start + 0x100
-    j	cache_error_handler
+    j   cache_error_handler
     nop
 
     /* 0x180 - Exception/Interrupt handler */
     .global general_exception
-    .type	general_exception,@function
+    .type   general_exception,@function
     .org ebase_start + 0x180
 general_exception:
-    b	_general_exception_handler
+    b   _general_exception_handler
     nop
 
     /* 0x200 - Special Exception Interrupt handler (when IV is set in CP0_CAUSE) */
     .global irq_exception
-    .type	irq_exception,@function
+    .type   irq_exception,@function
     .org ebase_start + 0x200
 irq_exception:
-    b	_general_exception_handler
+    b   _general_exception_handler
     nop
 
     /* general exception handler */
 _general_exception_handler:
-    .set	noreorder
-    PTR_LA	k0, mips_irq_handle
-    jr	k0
+    .set    noreorder
+    PTR_LA  k0, mips_irq_handle
+    jr  k0
     nop
-    .set	reorder
+    .set    reorder
 
     /* interrupt handler */
 _irq_handler:
-    .set	noreorder
-    PTR_LA	k0, mips_irq_handle
-    jr	k0
+    .set    noreorder
+    PTR_LA  k0, mips_irq_handle
+    jr  k0
     nop
-    .set	reorder
+    .set    reorder

+ 86 - 86
libcpu/mips/gs232/cache_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2019, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -19,7 +19,7 @@
 #include <mips.h>
 #include "cache.h"
 
-    .ent	cache_init
+    .ent    cache_init
     .global cache_init
     .set noreorder
 cache_init:
@@ -68,67 +68,67 @@ cache_i4way:
         sllv    t6, t4, t6
         sllv    t5, t4, t5
 #if 0
-    la	t0, memvar
-    sw	t7, 0x0(t0) #ways
-    sw	t5, 0x4(t0) #icache size
-    sw	t6, 0x8(t0) #dcache size
+    la  t0, memvar
+    sw  t7, 0x0(t0) #ways
+    sw  t5, 0x4(t0) #icache size
+    sw  t6, 0x8(t0) #dcache size
 #endif
 ####part 3####
-    .set	mips3
-    lui	a0, 0x8000
-    addu	a1, $0, t5
-    addu	a2, $0, t6
+    .set    mips3
+    lui a0, 0x8000
+    addu    a1, $0, t5
+    addu    a2, $0, t6
 cache_init_d2way:
 #a0=0x80000000, a1=icache_size, a2=dcache_size
 #a3, v0 and v1 used as local registers
-    mtc0	$0, CP0_TAGHI
-    addu	v0, $0, a0
-    addu	v1, a0, a2
-1:	slt	a3, v0, v1
-    beq	a3, $0, 1f
+    mtc0    $0, CP0_TAGHI
+    addu    v0, $0, a0
+    addu    v1, a0, a2
+1:  slt a3, v0, v1
+    beq a3, $0, 1f
     nop
-    mtc0	$0, CP0_TAGLO
-    beq	t7, 1, 4f
-    cache	Index_Store_Tag_D, 0x0(v0)	# 1 way
-    beq	t7, 2 ,4f
-    cache	Index_Store_Tag_D, 0x1(v0)	# 2 way
-    cache	Index_Store_Tag_D, 0x2(v0)	# 4 way
-    cache	Index_Store_Tag_D, 0x3(v0)
-4:	beq	$0, $0, 1b
-    addiu	v0, v0, 0x20
+    mtc0    $0, CP0_TAGLO
+    beq t7, 1, 4f
+    cache   Index_Store_Tag_D, 0x0(v0)  # 1 way
+    beq t7, 2 ,4f
+    cache   Index_Store_Tag_D, 0x1(v0)  # 2 way
+    cache   Index_Store_Tag_D, 0x2(v0)  # 4 way
+    cache   Index_Store_Tag_D, 0x3(v0)
+4:  beq $0, $0, 1b
+    addiu   v0, v0, 0x20
 1:
 cache_flush_i2way:
-    addu	v0, $0, a0
-    addu	v1, a0, a1
-1:	slt	a3, v0, v1
-    beq	a3, $0, 1f
+    addu    v0, $0, a0
+    addu    v1, a0, a1
+1:  slt a3, v0, v1
+    beq a3, $0, 1f
     nop
-    beq	t3, 1, 4f
-    cache	Index_Invalidate_I, 0x0(v0)	# 1 way
-    beq	t3, 2, 4f
-    cache	Index_Invalidate_I, 0x1(v0)	# 2 way
-    cache	Index_Invalidate_I, 0x2(v0)
-    cache	Index_Invalidate_I, 0x3(v0)	# 4 way
-4:	beq	$0, $0, 1b
-    addiu	v0, v0, 0x20
+    beq t3, 1, 4f
+    cache   Index_Invalidate_I, 0x0(v0) # 1 way
+    beq t3, 2, 4f
+    cache   Index_Invalidate_I, 0x1(v0) # 2 way
+    cache   Index_Invalidate_I, 0x2(v0)
+    cache   Index_Invalidate_I, 0x3(v0) # 4 way
+4:  beq $0, $0, 1b
+    addiu   v0, v0, 0x20
 1:
 cache_flush_d2way:
-    addu	v0, $0, a0
-    addu	v1, a0, a2
-1:	slt	a3, v0, v1
-    beq	a3, $0, 1f
+    addu    v0, $0, a0
+    addu    v1, a0, a2
+1:  slt a3, v0, v1
+    beq a3, $0, 1f
     nop
-    beq	t7, 1, 4f
-    cache	Index_Writeback_Inv_D, 0x0(v0) 	#1 way
-    beq	t7, 2, 4f
-    cache	Index_Writeback_Inv_D, 0x1(v0)	# 2 way
-    cache	Index_Writeback_Inv_D, 0x2(v0)
-    cache	Index_Writeback_Inv_D, 0x3(v0)	# 4 way
-4:	beq	$0, $0, 1b
-    addiu	v0, v0, 0x20
+    beq t7, 1, 4f
+    cache   Index_Writeback_Inv_D, 0x0(v0)  #1 way
+    beq t7, 2, 4f
+    cache   Index_Writeback_Inv_D, 0x1(v0)  # 2 way
+    cache   Index_Writeback_Inv_D, 0x2(v0)
+    cache   Index_Writeback_Inv_D, 0x3(v0)  # 4 way
+4:  beq $0, $0, 1b
+    addiu   v0, v0, 0x20
 1:
 cache_init_finish:
-    jr	t1
+    jr  t1
     nop
     .set reorder
     .end cache_init
@@ -139,80 +139,80 @@ cache_init_finish:
 
 LEAF(enable_cpu_cache)
     .set noreorder
-    mfc0	t0, CP0_CONFIG
+    mfc0    t0, CP0_CONFIG
     nop
-    and		t0, ~0x03
-    or		t0, 0x03
-    mtc0	t0, CP0_CONFIG
+    and     t0, ~0x03
+    or      t0, 0x03
+    mtc0    t0, CP0_CONFIG
     nop
     .set reorder
-    j	ra
+    j   ra
 END (enable_cpu_cache)
-    
+
 ###########################
 #  disable CPU cache      #
 ###########################
 
 LEAF(disable_cpu_cache)
     .set noreorder
-    mfc0	t0, CP0_CONFIG
+    mfc0    t0, CP0_CONFIG
     nop
-    and		t0, ~0x03
-    or 		t0, 0x2
-    mtc0	t0, CP0_CONFIG
+    and     t0, ~0x03
+    or      t0, 0x2
+    mtc0    t0, CP0_CONFIG
     nop
     .set reorder
-    j	ra
+    j   ra
 END (disable_cpu_cache)
 
 /**********************************/
-/* Invalidate Instruction Cache	  */
+/* Invalidate Instruction Cache   */
 /**********************************/
 LEAF(Clear_TagLo)
-    .set 	noreorder
-    mtc0	zero, CP0_TAGLO
+    .set    noreorder
+    mtc0    zero, CP0_TAGLO
     nop
-    .set 	reorder
-    j		ra
+    .set    reorder
+    j       ra
 END(Clear_TagLo)
 
     .set mips3
 /**********************************/
-/* Invalidate Instruction Cache	  */
+/* Invalidate Instruction Cache   */
 /**********************************/
 LEAF(Invalidate_Icache_Ls1c)
-    .set	noreorder
-    cache	Index_Invalidate_I,0(a0)
-    cache	Index_Invalidate_I,1(a0)
-    cache	Index_Invalidate_I,2(a0)
-    cache	Index_Invalidate_I,3(a0)
-    .set	reorder
-    j		ra
+    .set    noreorder
+    cache   Index_Invalidate_I,0(a0)
+    cache   Index_Invalidate_I,1(a0)
+    cache   Index_Invalidate_I,2(a0)
+    cache   Index_Invalidate_I,3(a0)
+    .set    reorder
+    j       ra
 END(Invalidate_Icache_Ls1c)
 
 /**********************************/
-/* Invalidate Data Cache		  */
+/* Invalidate Data Cache          */
 /**********************************/
 LEAF(Invalidate_Dcache_ClearTag_Ls1c)
-    .set	noreorder
-    cache	Index_Store_Tag_D, 0(a0)	# BDSLOT: clear tag
-    cache	Index_Store_Tag_D, 1(a0)	# BDSLOT: clear tag
-    .set	reorder
-    j		ra
+    .set    noreorder
+    cache   Index_Store_Tag_D, 0(a0)    # BDSLOT: clear tag
+    cache   Index_Store_Tag_D, 1(a0)    # BDSLOT: clear tag
+    .set    reorder
+    j       ra
 END(Invalidate_Dcache_ClearTag_Ls1c)
 
 LEAF(Invalidate_Dcache_Fill_Ls1c)
-    .set	noreorder
-    cache	Index_Writeback_Inv_D, 0(a0)	# BDSLOT: clear tag
-    cache	Index_Writeback_Inv_D, 1(a0)	# BDSLOT: clear tag
-    .set	reorder
-    j		ra
+    .set    noreorder
+    cache   Index_Writeback_Inv_D, 0(a0)    # BDSLOT: clear tag
+    cache   Index_Writeback_Inv_D, 1(a0)    # BDSLOT: clear tag
+    .set    reorder
+    j       ra
 END(Invalidate_Dcache_Fill_Ls1c)
 
 LEAF(Writeback_Invalidate_Dcache)
     .set noreorder
-    cache	Hit_Writeback_Inv_D, (a0)
+    cache   Hit_Writeback_Inv_D, (a0)
     .set reorder
-    j	ra
+    j   ra
 END(Writeback_Invalidate_Dcache)
     .set mips0

+ 3 - 3
libcpu/mips/gs232/cpuinit_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2019, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -19,5 +19,5 @@
 
     .globl  rt_cpu_early_init
 rt_cpu_early_init:
-     jr	ra
-     nop
+     jr ra
+     nop

+ 11 - 11
libcpu/mips/pic32/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2019, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -18,7 +18,7 @@
 #include "../common/stackframe.h"
 
     .section ".text", "ax"
-	.set 		noat
+    .set        noat
     .set noreorder
 
 /*
@@ -92,7 +92,7 @@ _reswitch:
     mfc0    t0, CP0_CAUSE                  /* t0 = Cause */
     ori     t0, t0, (1<<8)                 /* t0 |= (1<<8) */
     mtc0    t0, CP0_CAUSE                  /* cause = t0 */
-    addiu   t1,	zero,   -257               /* t1 = ~(1<<8) */
+    addiu   t1, zero,   -257               /* t1 = ~(1<<8) */
     and     t0, t0, t1                     /* t0 &= t1 */
     mtc0    t0, CP0_CAUSE                  /* cause = t0 */
     jr      ra
@@ -103,17 +103,17 @@ _reswitch:
  */
     .section ".text", "ax"
     .set noreorder
-	.set 		noat
- 	.ent		CoreSW0Handler
+    .set        noat
+    .ent        CoreSW0Handler
 
-	    .globl CoreSW0Handler
+        .globl CoreSW0Handler
 CoreSW0Handler:
     SAVE_ALL
 
-	/* mCS0ClearIntFlag(); */
-	la      t0, IFS0CLR             /* t0 = IFS0CLR */
-	addiu   t1,zero,0x02            /* t1 = (1<<2) */
-	sw      t1, 0(t0)               /* IFS0CLR = t1 */
+    /* mCS0ClearIntFlag(); */
+    la      t0, IFS0CLR             /* t0 = IFS0CLR */
+    addiu   t1,zero,0x02            /* t1 = (1<<2) */
+    sw      t1, 0(t0)               /* IFS0CLR = t1 */
 
     la      k0, rt_thread_switch_interrupt_flag
     sw      zero, 0(k0)                     /* clear flag */
@@ -133,4 +133,4 @@ CoreSW0Handler:
 
     RESTORE_ALL_AND_RET
 
-	.end		CoreSW0Handler
+    .end        CoreSW0Handler

+ 2 - 2
libcpu/risc-v/e310/interrupt_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -94,7 +94,7 @@ spurious_interrupt:
     li    t0, 0x00001800
     csrs  mstatus, t0
     LOAD  t0,   2 * REGBYTES(sp)
-    csrs  mstatus, t0 
+    csrs  mstatus, t0
 
     LOAD  x4,   4 * REGBYTES(sp)
     LOAD  x5,   5 * REGBYTES(sp)

+ 2 - 2
libcpu/risc-v/k210/startup_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -12,7 +12,7 @@
 #define MSTATUS_FS      0x00006000U /* initial state of FPU     */
 #include <cpuport.h>
 
-  .global	_start
+  .global   _start
   .section ".start", "ax"
 _start:
   j 1f

+ 2 - 2
libcpu/risc-v/rv32m1/interrupt_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -95,7 +95,7 @@ spurious_interrupt:
     li    t0, 0x00001800
     csrs  mstatus, t0
     LOAD  t0,   2 * REGBYTES(sp)
-    csrs  mstatus, t0 
+    csrs  mstatus, t0
 
     LOAD  x4,   4 * REGBYTES(sp)
     LOAD  x5,   5 * REGBYTES(sp)

+ 3 - 3
libcpu/risc-v/virt64/startup_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2018, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -11,13 +11,13 @@
  */
 
 #define XSTATUS_FS         (3 << 13) /* initial state of FPU, clear to disable */
-#define XSTATUS_PUM        (1 << 18) 
+#define XSTATUS_PUM        (1 << 18)
 #include <cpuport.h>
 
 boot_hartid: .int
   .global      boot_hartid
 
-  .global	_start
+  .global   _start
   .section ".start", "ax"
 _start:
 #ifdef RISCV_S_MODE

+ 59 - 59
libcpu/ti-dsp/c28x/context.s

@@ -1,5 +1,5 @@
 ;
-; Copyright (c) 2006-2018, RT-Thread Development Team
+; Copyright (c) 2006-2022, RT-Thread Development Team
 ;
 ; SPDX-License-Identifier: Apache-2.0
 ;
@@ -25,7 +25,7 @@
     .def   _rt_hw_interrupt_thread_switch
     .def   _rt_hw_interrupt_disable
     .def   _rt_hw_interrupt_enable
-    
+
 ;workaround for importing fpu settings from the compiler
     .cdecls C,NOLIST
     %{
@@ -37,7 +37,7 @@
     %}
 
 
-RT_CTX_SAVE  .macro      
+RT_CTX_SAVE  .macro
 
 
     PUSH    AR1H:AR0H
@@ -49,38 +49,38 @@ RT_CTX_SAVE  .macro
     PUSH    XAR7
     PUSH    XT
     PUSH    RPC
-    
+
     .if __FPU32__
-    PUSH	RB
-    MOV32	*SP++, STF
-    MOV32	*SP++, R0H
-    MOV32	*SP++, R1H
-    MOV32	*SP++, R2H
-    MOV32	*SP++, R3H
-    MOV32	*SP++, R4H
-    MOV32	*SP++, R5H
-    MOV32	*SP++, R6H
-    MOV32	*SP++, R7H
+    PUSH    RB
+    MOV32   *SP++, STF
+    MOV32   *SP++, R0H
+    MOV32   *SP++, R1H
+    MOV32   *SP++, R2H
+    MOV32   *SP++, R3H
+    MOV32   *SP++, R4H
+    MOV32   *SP++, R5H
+    MOV32   *SP++, R6H
+    MOV32   *SP++, R7H
     .endif
- 
+
     .endm
 
 
 RT_CTX_RESTORE  .macro
 
     .if __FPU32__
-    MOV32	R7H, *--SP, UNCF
-    MOV32	R6H, *--SP, UNCF
-    MOV32	R5H, *--SP, UNCF
-    MOV32	R4H, *--SP, UNCF
-    MOV32	R3H, *--SP, UNCF
-    MOV32	R2H, *--SP, UNCF
-    MOV32	R1H, *--SP, UNCF
-    MOV32	R0H, *--SP, UNCF
-    MOV32	STF, *--SP
-    POP		RB
+    MOV32   R7H, *--SP, UNCF
+    MOV32   R6H, *--SP, UNCF
+    MOV32   R5H, *--SP, UNCF
+    MOV32   R4H, *--SP, UNCF
+    MOV32   R3H, *--SP, UNCF
+    MOV32   R2H, *--SP, UNCF
+    MOV32   R1H, *--SP, UNCF
+    MOV32   R0H, *--SP, UNCF
+    MOV32   STF, *--SP
+    POP     RB
     .endif
-                                  
+
     POP     RPC
     POP     XT
     POP     XAR7
@@ -91,7 +91,7 @@ RT_CTX_RESTORE  .macro
     POP     XAR2
 
 
-    MOVZ    AR0 , @SP                                           
+    MOVZ    AR0 , @SP
     SUBB    XAR0, #6
     MOVL    ACC , *XAR0
     AND     ACC, #0xFFFF << 16
@@ -127,13 +127,13 @@ _rt_hw_interrupt_enable:
     POP   ST1
     LRETR
     .endasmfunc
-    
+
 ;
 ; void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
 ; r0 --> from
 ; r4 --> to
 
- 
+
     .asmfunc
 _rt_hw_context_switch_interrupt:
     MOVL    XAR0, #0
@@ -169,7 +169,7 @@ _rt_hw_context_switch:
     MOVL    XAR0, #0
     MOV     AR0, AL
     MOVL    XAR4, *-SP[4]
-    ; set rt_thread_switch_interrupt_flag to 1 
+    ; set rt_thread_switch_interrupt_flag to 1
     MOVL    XAR5, #_rt_thread_switch_interrupt_flag
     MOVL    XAR6, *XAR5
     MOVL    ACC, XAR6
@@ -188,20 +188,20 @@ _reswitch2:
     TRAP    #16
     LRETR
     .endasmfunc
-     
+
      .asmfunc
 _RTOSINT_Handler:
-; disable interrupt to protect context switch 
+; disable interrupt to protect context switch
     DINT
 
-    ; get rt_thread_switch_interrupt_flag 
+    ; get rt_thread_switch_interrupt_flag
     MOV     AR0, #_rt_thread_switch_interrupt_flag
     MOV     AL, *AR0
     MOV     AR1, AL
     CMP     AR1, #0
-    B       rtosint_exit, EQ         ; pendsv already handled 
+    B       rtosint_exit, EQ         ; pendsv already handled
 
-    ; clear rt_thread_switch_interrupt_flag to 0 
+    ; clear rt_thread_switch_interrupt_flag to 0
     MOV     AR1, #0x00
     MOV     *AR0, AR1
 
@@ -209,22 +209,22 @@ _RTOSINT_Handler:
     MOV     AL, *AR0
     MOV     AR1, AL
     CMP     AR1, #0
-    B       switch_to_thread, EQ    ; skip register save at the first time 
-    
+    B       switch_to_thread, EQ    ; skip register save at the first time
+
 ;#if defined (__VFP_FP__) && !defined(__SOFTFP__)
-;    TST     lr, #0x10           ; if(!EXC_RETURN[4]) 
-;    VSTMDBEQ r1!, {d8 - d15}    ; push FPU register s16~s31 
+;    TST     lr, #0x10           ; if(!EXC_RETURN[4])
+;    VSTMDBEQ r1!, {d8 - d15}    ; push FPU register s16~s31
 ;#endif
-    
-    RT_CTX_SAVE     ; push r4 - r11 register 
+
+    RT_CTX_SAVE     ; push r4 - r11 register
 
 ;#if defined (__VFP_FP__) && !defined(__SOFTFP__)
-;    MOV     r4, #0x00           ; flag = 0 
+;    MOV     r4, #0x00           ; flag = 0
 
-;    TST     lr, #0x10           ; if(!EXC_RETURN[4]) 
-;    MOVEQ   r4, #0x01           ; flag = 1 
+;    TST     lr, #0x10           ; if(!EXC_RETURN[4])
+;    MOVEQ   r4, #0x01           ; flag = 1
 
-;    STMFD   r1!, {r4}           ; push flag 
+;    STMFD   r1!, {r4}           ; push flag
 ;#endif
 
     MOV     AL, *AR0
@@ -237,10 +237,10 @@ switch_to_thread:
     MOV     AL, *AR1
     MOV     AR1, AL
     MOV     AL, *AR1
-    MOV     AR1, AL                ; load thread stack pointer 
+    MOV     AR1, AL                ; load thread stack pointer
 
 ;#if defined (__VFP_FP__) && !defined(__SOFTFP__)
-;    LDMFD   r1!, {r3}           ; pop flag 
+;    LDMFD   r1!, {r3}           ; pop flag
 ;#endif
 
     MOV     @SP, AR1
@@ -248,7 +248,7 @@ switch_to_thread:
     RT_CTX_RESTORE     ; pop r4 - r11 register
 
 rtosint_exit:
-    ; restore interrupt 
+    ; restore interrupt
     EINT
 
     IRET
@@ -290,25 +290,25 @@ _rt_hw_calc_csb:
 ;
 ; * void rt_hw_context_switch_to(rt_uint32 to);
 ; * r0 --> to
- 
+
     .asmfunc
 _rt_hw_context_switch_to:
     MOV     AR1, #_rt_interrupt_to_thread
     MOV     *AR1, AL
 
 ;#if defined (__VFP_FP__) && !defined(__SOFTFP__)
-    ; CLEAR CONTROL.FPCA 
-;    MRS     r2, CONTROL         ; read 
-;    BIC     r2, #0x04           ; modify 
-;    MSR     CONTROL, r2         ; write-back 
+    ; CLEAR CONTROL.FPCA
+;    MRS     r2, CONTROL         ; read
+;    BIC     r2, #0x04           ; modify
+;    MSR     CONTROL, r2         ; write-back
 ;#endif
 
-    ; set from thread to 0 
+    ; set from thread to 0
     MOV     AR1, #_rt_interrupt_from_thread
     MOV     AR0, #0x0
     MOV     *AR1, AR0
 
-    ; set interrupt flag to 1 
+    ; set interrupt flag to 1
     MOV     AR1, #_rt_thread_switch_interrupt_flag
     MOV     AR0, #1
     MOV     *AR1, AR0
@@ -316,14 +316,14 @@ _rt_hw_context_switch_to:
     TRAP    #16
 
 
-    ; never reach here! 
+    ; never reach here!
     .endasmfunc
-    
-; compatible with old version 
+
+; compatible with old version
     .asmfunc
 _rt_hw_interrupt_thread_switch:
     LRETR
     NOP
     .endasmfunc
-    
+
 .end

+ 41 - 41
libcpu/unicore32/sep6200/context_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -14,7 +14,7 @@
 
 /*@{*/
 
-#define NOINT			0xc0
+#define NOINT           0xc0
 
 /*
  * rt_base_t rt_hw_interrupt_disable();
@@ -23,11 +23,11 @@
 .type rt_hw_interrupt_disable, %function
 rt_hw_interrupt_disable:
   stw.w   r1, [sp-], #4
-	mov    	r0, asr
-	or    	r1, r0, #NOINT
-	mov.a 	asr, r1
+    mov     r0, asr
+    or      r1, r0, #NOINT
+    mov.a   asr, r1
   ldw.w   r1, [sp]+, #4
-	mov	    pc, lr
+    mov     pc, lr
 
 /*
  * void rt_hw_interrupt_enable(rt_base_t level);
@@ -35,8 +35,8 @@ rt_hw_interrupt_disable:
 .globl rt_hw_interrupt_enable
 .type rt_hw_interrupt_disable, %function
 rt_hw_interrupt_enable:
-	mov.a asr, r0
-	mov pc, lr
+    mov.a asr, r0
+    mov pc, lr
 
 /*
  * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
@@ -46,24 +46,24 @@ rt_hw_interrupt_enable:
 .globl rt_hw_context_switch
 .type rt_hw_interrupt_disable, %function
 rt_hw_context_switch:
-	stm.w   (lr), [sp-]
-	stm.w	(r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr), [sp-]
-	stm.w	(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15),  [sp-]
-	mov	r4, asr
-	stm.w	(r4), [sp-]
-	mov 	r4, bsr
-	stm.w	(r4), [sp-]
+    stm.w   (lr), [sp-]
+    stm.w   (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr), [sp-]
+    stm.w   (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15),  [sp-]
+    mov r4, asr
+    stm.w   (r4), [sp-]
+    mov     r4, bsr
+    stm.w   (r4), [sp-]
 
-	stw	sp, [r0+]
-	ldw	sp, [r1+]
+    stw sp, [r0+]
+    ldw sp, [r1+]
 
-	ldm.w	(r4), [sp]+
-	mov.a	bsr,r4
-	ldm.w	(r4), [sp]+
-	mov.a	asr, r4
+    ldm.w   (r4), [sp]+
+    mov.a   bsr,r4
+    ldm.w   (r4), [sp]+
+    mov.a   asr, r4
 
-	ldm.w	(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
-	ldm.w	(r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
+    ldm.w   (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
+    ldm.w   (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
 
 /*
  * void rt_hw_context_switch_to(rt_uint32 to);
@@ -71,13 +71,13 @@ rt_hw_context_switch:
  */
 .globl rt_hw_context_switch_to
 rt_hw_context_switch_to:
-	ldw	sp, [r0+]
-	ldm.w	(r4), [sp]+
-	mov.a	bsr, r4
-	ldm.w	(r4), [sp]+
-	mov.a	asr, r4
-	ldm.w	(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
-	ldm.w	(r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
+    ldw sp, [r0+]
+    ldm.w   (r4), [sp]+
+    mov.a   bsr, r4
+    ldm.w   (r4), [sp]+
+    mov.a   asr, r4
+    ldm.w   (r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15), [sp]+
+    ldm.w   (r16, r17, r18, r19, r20, r21, r22, r23, r24, r25, r26, r27, r28, lr, pc), [sp]+
 
 /*
  * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
@@ -87,15 +87,15 @@ rt_hw_context_switch_to:
 .globl rt_interrupt_to_thread
 .globl rt_hw_context_switch_interrupt
 rt_hw_context_switch_interrupt:
-	ldw r2, =rt_thread_switch_interrupt_flag
-	ldw r3, [r2+]
-	cmpsub.a r3, #1
-	beq _reswitch
-	mov r3, #1
-	stw r3, [r2+]
-	ldw r2, =rt_interrupt_from_thread
-	stw r0, [r2+]
+    ldw r2, =rt_thread_switch_interrupt_flag
+    ldw r3, [r2+]
+    cmpsub.a r3, #1
+    beq _reswitch
+    mov r3, #1
+    stw r3, [r2+]
+    ldw r2, =rt_interrupt_from_thread
+    stw r0, [r2+]
 _reswitch:
-	ldw r2, =rt_interrupt_to_thread
-	stw r1, [r2+]
-	mov pc, lr
+    ldw r2, =rt_interrupt_to_thread
+    stw r1, [r2+]
+    mov pc, lr

+ 162 - 162
libcpu/unicore32/sep6200/start_gcc.S

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006-2021, RT-Thread Development Team
+ * Copyright (c) 2006-2022, RT-Thread Development Team
  *
  * SPDX-License-Identifier: Apache-2.0
  *
@@ -9,53 +9,53 @@
  */
 
 
-#define CONFIG_STACKSIZE 	1024
-#define S_FRAME_SIZE 		132
-
-#define S_OLD_R0 			132
-#define S_PSR  				128
-#define S_PC  				124
-#define S_LR  				120
-#define S_SP  				116
-
-#define S_IP  				112
-#define S_FP  				108
-#define S_R26  				104
-#define S_R25  				100
-#define S_R24  				96
-#define S_R23  				92
-#define S_R22  				88
-#define S_R21  				84
-#define S_R20 				80
-#define S_R19  				76
-#define S_R18  				72
-#define S_R17  				68
-#define S_R16  				64
-#define S_R15  				60
-#define S_R14  				56
-#define S_R13  				52
-#define S_R12  				48
-#define S_R11  				44
-#define S_R10  				40
-#define S_R9  				36
-#define S_R8  				32
-#define S_R7  				28
-#define S_R6  				24
-#define S_R5  				20
-#define S_R4  				16
-#define S_R3  				12
-#define S_R2  				8
-#define S_R1  				4
-#define S_R0 				0
-
-.equ 	USERMODE,			0x10
-.equ 	REALMODE,			0x11
-.equ 	IRQMODE,			0x12
-.equ 	PRIVMODE,			0x13
-.equ 	TRAPMODE,			0x17
-.equ 	EXTNMODE,			0x1b
-.equ 	MODEMASK,			0x1f
-.equ 	NOINT,				0xc0
+#define CONFIG_STACKSIZE    1024
+#define S_FRAME_SIZE        132
+
+#define S_OLD_R0            132
+#define S_PSR               128
+#define S_PC                124
+#define S_LR                120
+#define S_SP                116
+
+#define S_IP                112
+#define S_FP                108
+#define S_R26               104
+#define S_R25               100
+#define S_R24               96
+#define S_R23               92
+#define S_R22               88
+#define S_R21               84
+#define S_R20               80
+#define S_R19               76
+#define S_R18               72
+#define S_R17               68
+#define S_R16               64
+#define S_R15               60
+#define S_R14               56
+#define S_R13               52
+#define S_R12               48
+#define S_R11               44
+#define S_R10               40
+#define S_R9                36
+#define S_R8                32
+#define S_R7                28
+#define S_R6                24
+#define S_R5                20
+#define S_R4                16
+#define S_R3                12
+#define S_R2                8
+#define S_R1                4
+#define S_R0                0
+
+.equ    USERMODE,           0x10
+.equ    REALMODE,           0x11
+.equ    IRQMODE,            0x12
+.equ    PRIVMODE,           0x13
+.equ    TRAPMODE,           0x17
+.equ    EXTNMODE,           0x1b
+.equ    MODEMASK,           0x1f
+.equ    NOINT,              0xc0
 
 /*
  *************************************************************************
@@ -127,96 +127,96 @@ FIQ_STACK_START:
 
 .globl UNDEFINED_STACK_START
 UNDEFINED_STACK_START:
-	.word _undefined_stack_start + CONFIG_STACKSIZE
+    .word _undefined_stack_start + CONFIG_STACKSIZE
 
 .globl ABORT_STACK_START
 ABORT_STACK_START:
-	.word _abort_stack_start + CONFIG_STACKSIZE
+    .word _abort_stack_start + CONFIG_STACKSIZE
 
 .globl _STACK_START
 _STACK_START:
-	.word _priv_stack_start + 4096
+    .word _priv_stack_start + 4096
 
-.equ  SEP6200_VIC_BASE,		 0xb0000000
-.equ  SEP6200_SYSCTL_BASE,	 0xb0008000
+.equ  SEP6200_VIC_BASE,      0xb0000000
+.equ  SEP6200_SYSCTL_BASE,   0xb0008000
 /* ----------------------------------entry------------------------------*/
 reset:
-	/* set the cpu to PRIV mode and disable cpu interrupt */
-	mov		r0, asr
-	andn		r0, r0, #0xff
-	or		r0, r0, #PRIVMODE|NOINT
-	mov.a		asr, r0
+    /* set the cpu to PRIV mode and disable cpu interrupt */
+    mov     r0, asr
+    andn        r0, r0, #0xff
+    or      r0, r0, #PRIVMODE|NOINT
+    mov.a       asr, r0
 
-	/* mask all IRQs by clearing all bits in the INTMRs */
-	ldw	r1, =SEP6200_VIC_BASE
-	ldw 	r0, =0xffffffff
-	stw	r0, [r1+], #0x20 /*interrupt enable clear*/
-	stw	r0, [r1+], #0x24
+    /* mask all IRQs by clearing all bits in the INTMRs */
+    ldw r1, =SEP6200_VIC_BASE
+    ldw     r0, =0xffffffff
+    stw r0, [r1+], #0x20 /*interrupt enable clear*/
+    stw r0, [r1+], #0x24
 
 
-	/*remap ddr to 0x00000000 address*/
-	ldw	r1, =SEP6200_SYSCTL_BASE
-	ldw	r0, [r1+]
-	ldw	r2, =0x80000000
-	or	r0, r0, r2
-	stw	r2, [r1+]
+    /*remap ddr to 0x00000000 address*/
+    ldw r1, =SEP6200_SYSCTL_BASE
+    ldw r0, [r1+]
+    ldw r2, =0x80000000
+    or  r0, r0, r2
+    stw r2, [r1+]
 
-	/* set interrupt vector */
-	/*do nothing here for vector*/
+    /* set interrupt vector */
+    /*do nothing here for vector*/
 
-	/* setup stack */
-	b.l		stack_setup
+    /* setup stack */
+    b.l     stack_setup
 
   /* copy the vector code to address 0 */
-	ldw	r12, =0x100
-	ldw	r0, = 0x40000000
-	ldw	r1, = 0x00000000
+    ldw r12, =0x100
+    ldw r0, = 0x40000000
+    ldw r1, = 0x00000000
 copy_vetor:
-	ldw	r2, [r0]
-	stw	r2, [r1]
-	add	r0, r0, #4
-	add	r1, r1, #4
-	sub	r12, r12, #4
-	cmpsub.a	r12, #0
-	bne	copy_vetor
+    ldw r2, [r0]
+    stw r2, [r1]
+    add r0, r0, #4
+    add r1, r1, #4
+    sub r12, r12, #4
+    cmpsub.a    r12, #0
+    bne copy_vetor
 
-	/* clear .bss */
-	ldw   	r0, _bss_start         /* bss start   */
-	ldw   	r1, _bss_end           /* bss end     */
-	mov   	r2,#0                  /* get a zero  */
+    /* clear .bss */
+    ldw     r0, _bss_start         /* bss start   */
+    ldw     r1, _bss_end           /* bss end     */
+    mov     r2,#0                  /* get a zero  */
 
 
 bss_loop:
-	stw r2, [r0]            @ clear loop...
-	add r0, r0, #4
-	cmpsub.a    r0, r1
-	bel bss_loop
+    stw r2, [r0]            @ clear loop...
+    add r0, r0, #4
+    cmpsub.a    r0, r1
+    bel bss_loop
 
-	/* call C++ constructors of global objects 							*/
-	ldw	r0, =__ctors_start__
-	ldw	r1, =__ctors_end__
+    /* call C++ constructors of global objects                          */
+    ldw r0, =__ctors_start__
+    ldw r1, =__ctors_end__
 
 ctor_loop:
-	cmpsub.a	r0, r1
-	beq	ctor_end
-	ldw.w	r2, [r0]+, #4
-	stm.w	(r0, r1), [sp-]
-	add	lr, pc, #4
-	mov	pc, r2
-	ldm.w	(r0, r1), [sp]+
-	b ctor_loop
+    cmpsub.a    r0, r1
+    beq ctor_end
+    ldw.w   r2, [r0]+, #4
+    stm.w   (r0, r1), [sp-]
+    add lr, pc, #4
+    mov pc, r2
+    ldm.w   (r0, r1), [sp]+
+    b ctor_loop
 ctor_end:
 
   /*enable interrupt*/
-	mov 	r0, asr
-	andn 	r1, r0, #NOINT
-	mov.a 	asr, r1
+    mov     r0, asr
+    andn    r1, r0, #NOINT
+    mov.a   asr, r1
 
-	/* start RT-Thread Kernel */
-	ldw		pc, _rtthread_startup
+    /* start RT-Thread Kernel */
+    ldw     pc, _rtthread_startup
 
 _rtthread_startup:
-	.word rtthread_startup
+    .word rtthread_startup
 
 /*
  *************************************************************************
@@ -228,7 +228,7 @@ _rtthread_startup:
 
 /* exception handlers */
 /*Just simple implementation here */
-	.align  5
+    .align  5
 extend_handle:
     b rt_hw_trap_extn
 swi_handle:
@@ -240,84 +240,84 @@ dabort_handle:
 reserve_handle:
     b rt_hw_trap_resv
 
-.globl 		rt_interrupt_enter
-.globl 		rt_interrupt_leave
-.globl 		rt_thread_switch_interrupt_flag
-.globl 		rt_interrupt_from_thread
-.globl 		rt_interrupt_to_thread
+.globl      rt_interrupt_enter
+.globl      rt_interrupt_leave
+.globl      rt_thread_switch_interrupt_flag
+.globl      rt_interrupt_from_thread
+.globl      rt_interrupt_to_thread
 IRQ_handle:
 
   stm.w (lr), [sp-]
   stm.w (r16 - r28), [sp-]
   stm.w (r0 - r15), [sp-]
 
-	b.l		rt_interrupt_enter
-	b.l		rt_hw_trap_irq
-	b.l		rt_interrupt_leave
+    b.l     rt_interrupt_enter
+    b.l     rt_hw_trap_irq
+    b.l     rt_interrupt_leave
 
-	/* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
-	ldw		r0, =rt_thread_switch_interrupt_flag
-	ldw		r1, [r0+]
-	cmpsub.a	r1, #1
-	beq		_interrupt_thread_switch
+    /* if rt_thread_switch_interrupt_flag set, jump to _interrupt_thread_switch and don't return */
+    ldw     r0, =rt_thread_switch_interrupt_flag
+    ldw     r1, [r0+]
+    cmpsub.a    r1, #1
+    beq     _interrupt_thread_switch
 
   ldm.w (r0 - r15), [sp]+
   ldm.w (r16 - r28), [sp]+
   ldm.w (lr), [sp]+
   mov.a pc, lr
 
-	.align	5
+    .align  5
 FIQ_handle:
   b rt_hw_trap_fiq
 
 _interrupt_thread_switch:
 
-	mov		r1,  #0	/* clear rt_thread_switch_interrupt_flag*/
-	stw		r1,  [r0+]
+    mov     r1,  #0 /* clear rt_thread_switch_interrupt_flag*/
+    stw     r1,  [r0+]
 
-	/*reload register*/
+    /*reload register*/
   ldm.w (r0 - r15), [sp]+
   ldm.w (r16 - r28), [sp]+
   ldm.w (lr), [sp]+
 
-	stm.w	(r0 - r3), [sp-] /*save r0-r3*/
+    stm.w   (r0 - r3), [sp-] /*save r0-r3*/
 
-	mov		r1,  sp
-	add		sp,  sp, #16 /* restore sp */
-	mov		r2,  lr	/* save old task's pc to r2 */
+    mov     r1,  sp
+    add     sp,  sp, #16 /* restore sp */
+    mov     r2,  lr /* save old task's pc to r2 */
 
   mov r3, bsr
   mov r0, #0xd3 /*I:F:0:PRIV*/
   mov.a asr, r0
 
-	stm.w	(r2), [sp-] /* push old task's pc */
+    stm.w   (r2), [sp-] /* push old task's pc */
 
-	/* push old task's registers */
+    /* push old task's registers */
   stm.w (lr), [sp-]
   stm.w (r16 - r28), [sp-]
   stm.w (r4 - r15), [sp-]
-	mov		r4,  r1	/* Special optimised code below		*/
-  mov		r5,  r3
+    mov     r4,  r1 /* Special optimised code below     */
+  mov       r5,  r3
   ldm.w (r0 - r3), [r4]+
   stm.w (r0 - r3), [sp-] /*push old task's r3-r0*/
-	stm.w	(r5),	 [sp-] /* push old task's asr */
-	mov	r4, bsr
-	stm.w (r4), [sp-]	/* push old task's bsr*/
+    stm.w   (r5),    [sp-] /* push old task's asr */
+    mov r4, bsr
+    stm.w (r4), [sp-]   /* push old task's bsr*/
 
-	ldw		r4,  =rt_interrupt_from_thread
-	ldw		r5,  [r4+]
-	stw		sp,  [r5+] /* store sp in preempted tasks's TCB*/
+    ldw     r4,  =rt_interrupt_from_thread
+    ldw     r5,  [r4+]
+    stw     sp,  [r5+] /* store sp in preempted tasks's TCB*/
 
-	ldw	r6,  =rt_interrupt_to_thread
-	ldw	r6,  [r6+]
-	ldw	sp,  [r6+] /* get new task's stack pointer	*/
+    ldw r6,  =rt_interrupt_to_thread
+    ldw r6,  [r6+]
+    ldw sp,  [r6+] /* get new task's stack pointer  */
 
-	ldm.w	(r4), [sp]+	/* pop new task's spsr				*/
-	mov.a	bsr, r4
-	ldm.w	(r4), [sp]+	/* pop new task's psr				*/
-	mov.a	asr, r4
+    ldm.w   (r4), [sp]+ /* pop new task's spsr              */
+    mov.a   bsr, r4
+    ldm.w   (r4), [sp]+ /* pop new task's psr               */
+    mov.a   asr, r4
 
-	/* pop new task's r0-r28,lr & pc */
+    /* pop new task's r0-r28,lr & pc */
 
   ldm.w (r0 - r15), [sp]+
   ldm.w (r16 - r28), [sp]+
@@ -325,25 +325,25 @@ _interrupt_thread_switch:
   ldm.w (pc), [sp]+
 
 stack_setup:
-	/*irq*/
+    /*irq*/
   mov ip, lr
-	mov		r0, asr
-	andn  r0, r0, #0x1f
-	or		r0, r0, #IRQMODE|NOINT
-	mov.a		asr, r0 /*IRQMODE*/
+    mov     r0, asr
+    andn  r0, r0, #0x1f
+    or      r0, r0, #IRQMODE|NOINT
+    mov.a       asr, r0 /*IRQMODE*/
   ldw   r0, =IRQ_STACK_START
   ldw   sp, [r0+]
-	/*ldw		sp, IRQ_STACK_START*/
+    /*ldw       sp, IRQ_STACK_START*/
 
-	/*priv*/
-	mov		r0, asr
-	andn    	r0, r0, #0x1f
-	or		r0, r0, #PRIVMODE|NOINT
-	mov.a		asr, r0 /*PRIVMODE*/
+    /*priv*/
+    mov     r0, asr
+    andn        r0, r0, #0x1f
+    or      r0, r0, #PRIVMODE|NOINT
+    mov.a       asr, r0 /*PRIVMODE*/
   ldw   r0, =_STACK_START
   ldw   sp, [r0+]
-	/*ldw		sp, _STACK_START*/
+    /*ldw       sp, _STACK_START*/
   mov lr, ip
-	/*fiq and other mode is not implemented in code here*/
-	mov 		pc, lr /*lr may not be valid for the mode changes*/
+    /*fiq and other mode is not implemented in code here*/
+    mov         pc, lr /*lr may not be valid for the mode changes*/
 /*/*}*/