cpuport.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2010-01-25 Bernard first version
  9. * 2012-05-31 aozima Merge all of the C source code into cpuport.c
  10. * 2012-08-17 aozima fixed bug: store r8 - r11.
  11. * 2012-12-23 aozima stack addr align to 8byte.
  12. * 2023-01-22 rose_man add RT_USING_SMP
  13. */
  14. #include <rtthread.h>
  15. #include <rthw.h>
  16. #include <rtthread.h>
  17. #include <stdint.h>
  18. #include "board.h"
  19. #ifdef RT_USING_SMP
  20. #include "hardware/structs/sio.h"
  21. #include "hardware/irq.h"
  22. #include "pico/sync.h"
  23. #include "pico/multicore.h"
  24. int rt_hw_cpu_id(void)
  25. {
  26. return sio_hw->cpuid;
  27. }
  28. void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
  29. {
  30. static uint8_t spin_cnt = 0;
  31. if ( spin_cnt < 32)
  32. {
  33. lock->slock = (rt_uint32_t)spin_lock_instance(spin_cnt);
  34. spin_cnt = spin_cnt + 1;
  35. }
  36. else
  37. {
  38. lock->slock = 0;
  39. }
  40. }
  41. void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
  42. {
  43. if ( lock->slock != 0 )
  44. {
  45. spin_lock_unsafe_blocking((spin_lock_t*)lock->slock);
  46. }
  47. }
  48. void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
  49. {
  50. if ( lock->slock != 0 )
  51. {
  52. spin_unlock_unsafe((spin_lock_t*)lock->slock);
  53. }
  54. }
  55. void secondary_cpu_c_start(void)
  56. {
  57. irq_set_enabled(SIO_IRQ_PROC1,RT_TRUE);
  58. extern uint32_t systick_config(uint32_t ticks);
  59. systick_config(frequency_count_khz(CLOCKS_FC0_SRC_VALUE_ROSC_CLKSRC)*10000/RT_TICK_PER_SECOND);
  60. rt_hw_spin_lock(&_cpus_lock);
  61. rt_system_scheduler_start();
  62. }
  63. void rt_hw_secondary_cpu_up(void)
  64. {
  65. multicore_launch_core1(secondary_cpu_c_start);
  66. irq_set_enabled(SIO_IRQ_PROC0,RT_TRUE);
  67. }
  68. void rt_hw_secondary_cpu_idle_exec(void)
  69. {
  70. asm volatile ("wfi");
  71. }
  72. #define IPI_MAGIC 0x5a5a
  73. void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
  74. {
  75. sio_hw->fifo_wr = IPI_MAGIC;
  76. }
  77. void rt_hw_ipi_handler(void)
  78. {
  79. uint32_t status = sio_hw->fifo_st;
  80. if ( status & (SIO_FIFO_ST_ROE_BITS | SIO_FIFO_ST_WOF_BITS) )
  81. {
  82. sio_hw->fifo_st = 0;
  83. }
  84. if ( status & SIO_FIFO_ST_VLD_BITS )
  85. {
  86. if ( sio_hw->fifo_rd == IPI_MAGIC )
  87. {
  88. //rt_schedule();
  89. }
  90. }
  91. }
  92. void isr_irq15(void)
  93. {
  94. rt_hw_ipi_handler();
  95. }
  96. void isr_irq16(void)
  97. {
  98. rt_hw_ipi_handler();
  99. }
  100. struct __rt_thread_switch_array
  101. {
  102. rt_ubase_t flag;
  103. rt_ubase_t from;
  104. rt_ubase_t to;
  105. };
  106. struct __rt_thread_switch_array rt_thread_switch_array[2] = { {0,0,0}, {0,0,0} };
  107. void __rt_cpu_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *thread)
  108. {
  109. struct rt_cpu* pcpu = rt_cpu_self();
  110. rt_uint32_t cpuid = rt_hw_cpu_id();
  111. if ( rt_thread_switch_array[cpuid].flag != 1)
  112. {
  113. rt_thread_switch_array[cpuid].flag = 1;
  114. rt_thread_switch_array[cpuid].from = from;
  115. }
  116. rt_thread_switch_array[cpuid].to = to;
  117. if ( pcpu->current_thread != RT_NULL )
  118. {
  119. thread->cpus_lock_nest = pcpu->current_thread->cpus_lock_nest;
  120. thread->critical_lock_nest = pcpu->current_thread->critical_lock_nest;
  121. thread->scheduler_lock_nest = pcpu->current_thread->scheduler_lock_nest;
  122. }
  123. pcpu->current_thread = thread;
  124. if (!thread->cpus_lock_nest)
  125. {
  126. rt_hw_spin_unlock(&_cpus_lock);
  127. }
  128. }
  129. #endif /*RT_USING_SMP*/
  130. struct exception_stack_frame
  131. {
  132. rt_uint32_t r0;
  133. rt_uint32_t r1;
  134. rt_uint32_t r2;
  135. rt_uint32_t r3;
  136. rt_uint32_t r12;
  137. rt_uint32_t lr;
  138. rt_uint32_t pc;
  139. rt_uint32_t psr;
  140. };
  141. struct stack_frame
  142. {
  143. /* r4 ~ r7 low register */
  144. rt_uint32_t r4;
  145. rt_uint32_t r5;
  146. rt_uint32_t r6;
  147. rt_uint32_t r7;
  148. /* r8 ~ r11 high register */
  149. rt_uint32_t r8;
  150. rt_uint32_t r9;
  151. rt_uint32_t r10;
  152. rt_uint32_t r11;
  153. struct exception_stack_frame exception_stack_frame;
  154. };
  155. /* flag in interrupt handling */
  156. rt_uint32_t rt_interrupt_from_thread, rt_interrupt_to_thread;
  157. rt_uint32_t rt_thread_switch_interrupt_flag;
  158. /**
  159. * This function will initialize thread stack
  160. *
  161. * @param tentry the entry of thread
  162. * @param parameter the parameter of entry
  163. * @param stack_addr the beginning stack address
  164. * @param texit the function will be called when thread exit
  165. *
  166. * @return stack address
  167. */
  168. rt_uint8_t *rt_hw_stack_init(void *tentry,
  169. void *parameter,
  170. rt_uint8_t *stack_addr,
  171. void *texit)
  172. {
  173. struct stack_frame *stack_frame;
  174. rt_uint8_t *stk;
  175. unsigned long i;
  176. stk = stack_addr + sizeof(rt_uint32_t);
  177. stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
  178. stk -= sizeof(struct stack_frame);
  179. stack_frame = (struct stack_frame *)stk;
  180. /* init all register */
  181. for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
  182. {
  183. ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
  184. }
  185. stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
  186. stack_frame->exception_stack_frame.r1 = 0; /* r1 */
  187. stack_frame->exception_stack_frame.r2 = 0; /* r2 */
  188. stack_frame->exception_stack_frame.r3 = 0; /* r3 */
  189. stack_frame->exception_stack_frame.r12 = 0; /* r12 */
  190. stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
  191. stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
  192. stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
  193. /* return task's current stack address */
  194. return stk;
  195. }
  196. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  197. extern long list_thread(void);
  198. #endif
  199. extern rt_thread_t rt_current_thread;
  200. /**
  201. * fault exception handling
  202. */
  203. void rt_hw_hard_fault_exception(struct exception_stack_frame *contex)
  204. {
  205. rt_kprintf("psr: 0x%08x\n", contex->psr);
  206. rt_kprintf(" pc: 0x%08x\n", contex->pc);
  207. rt_kprintf(" lr: 0x%08x\n", contex->lr);
  208. rt_kprintf("r12: 0x%08x\n", contex->r12);
  209. rt_kprintf("r03: 0x%08x\n", contex->r3);
  210. rt_kprintf("r02: 0x%08x\n", contex->r2);
  211. rt_kprintf("r01: 0x%08x\n", contex->r1);
  212. rt_kprintf("r00: 0x%08x\n", contex->r0);
  213. #ifdef RT_USING_SMP
  214. rt_thread_t rt_current_thread = rt_thread_self();
  215. rt_kprintf("hard fault on cpu : %d on thread: %s\n", rt_current_thread->oncpu, rt_current_thread->parent.name);
  216. #else
  217. rt_kprintf("hard fault on thread: %s\n", rt_current_thread->parent.name);
  218. #endif
  219. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  220. list_thread();
  221. #endif
  222. while (1);
  223. }
  224. #define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
  225. #define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
  226. #define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
  227. #define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
  228. #define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
  229. #define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
  230. #define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
  231. #define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
  232. #define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
  233. /**
  234. * reset CPU
  235. */
  236. rt_weak void rt_hw_cpu_reset(void)
  237. {
  238. SCB_AIRCR = SCB_RESET_VALUE;//((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |SCB_AIRCR_SYSRESETREQ_Msk);
  239. }