lwp.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_posix.h>
  16. #include <lwp_elf.h>
  17. #ifndef RT_USING_DFS
  18. #error "lwp need file system(RT_USING_DFS)"
  19. #endif
  20. #include "lwp.h"
  21. #include "lwp_arch.h"
  22. #include "lwp_arch_comm.h"
  23. #include "console.h"
  24. #define DBG_TAG "LWP"
  25. #define DBG_LVL DBG_WARNING
  26. #include <rtdbg.h>
  27. #ifdef RT_USING_USERSPACE
  28. #include <lwp_mm_area.h>
  29. #include <lwp_user_mm.h>
  30. #endif /* end of RT_USING_USERSPACE */
  31. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  32. #ifdef DFS_USING_WORKDIR
  33. extern char working_directory[];
  34. #endif
  35. static struct termios stdin_termios, old_stdin_termios;
  36. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  37. struct termios *get_old_termios(void)
  38. {
  39. return &old_stdin_termios;
  40. }
  41. void lwp_setcwd(char *buf)
  42. {
  43. struct rt_lwp *lwp = RT_NULL;
  44. if(strlen(buf) >= DFS_PATH_MAX)
  45. {
  46. rt_kprintf("buf too long!\n");
  47. return ;
  48. }
  49. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  50. if (lwp)
  51. {
  52. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  53. }
  54. else
  55. {
  56. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  57. }
  58. return ;
  59. }
  60. char *lwp_getcwd(void)
  61. {
  62. char *dir_buf = RT_NULL;
  63. struct rt_lwp *lwp = RT_NULL;
  64. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  65. if (lwp)
  66. {
  67. if(lwp->working_directory[0] != '/')
  68. {
  69. dir_buf = &working_directory[0];
  70. }
  71. else
  72. {
  73. dir_buf = &lwp->working_directory[0];
  74. }
  75. }
  76. else
  77. dir_buf = &working_directory[0];
  78. return dir_buf;
  79. }
  80. /**
  81. * RT-Thread light-weight process
  82. */
  83. void lwp_set_kernel_sp(uint32_t *sp)
  84. {
  85. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  86. }
  87. uint32_t *lwp_get_kernel_sp(void)
  88. {
  89. #ifdef RT_USING_USERSPACE
  90. return (uint32_t *)rt_thread_self()->sp;
  91. #else
  92. uint32_t* kernel_sp;
  93. extern rt_uint32_t rt_interrupt_from_thread;
  94. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  95. if (rt_thread_switch_interrupt_flag)
  96. {
  97. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  98. }
  99. else
  100. {
  101. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  102. }
  103. return kernel_sp;
  104. #endif
  105. }
  106. #ifdef RT_USING_USERSPACE
  107. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  108. {
  109. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  110. int *args;
  111. char *str;
  112. char *str_k;
  113. char **new_argve;
  114. int i;
  115. int len;
  116. size_t *args_k;
  117. struct process_aux *aux;
  118. for (i = 0; i < argc; i++)
  119. {
  120. size += (rt_strlen(argv[i]) + 1);
  121. }
  122. size += (sizeof(size_t) * argc);
  123. i = 0;
  124. if (envp)
  125. {
  126. while (envp[i] != 0)
  127. {
  128. size += (rt_strlen(envp[i]) + 1);
  129. size += sizeof(size_t);
  130. i++;
  131. }
  132. }
  133. /* for aux */
  134. size += sizeof(struct process_aux);
  135. if (size > ARCH_PAGE_SIZE)
  136. {
  137. return RT_NULL;
  138. }
  139. /* args = (int *)lwp_map_user(lwp, 0, size); */
  140. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  141. if (args == RT_NULL)
  142. {
  143. return RT_NULL;
  144. }
  145. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  146. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  147. /* argc, argv[], 0, envp[], 0 , aux[] */
  148. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  149. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  150. new_argve = (char **)&args_k[1];
  151. args_k[0] = argc;
  152. for (i = 0; i < argc; i++)
  153. {
  154. len = rt_strlen(argv[i]) + 1;
  155. new_argve[i] = str;
  156. rt_memcpy(str_k, argv[i], len);
  157. str += len;
  158. str_k += len;
  159. }
  160. new_argve[i] = 0;
  161. i++;
  162. new_argve[i] = 0;
  163. if (envp)
  164. {
  165. int j;
  166. for (j = 0; envp[j] != 0; j++)
  167. {
  168. len = rt_strlen(envp[j]) + 1;
  169. new_argve[i] = str;
  170. rt_memcpy(str_k, envp[j], len);
  171. str += len;
  172. str_k += len;
  173. i++;
  174. }
  175. new_argve[i] = 0;
  176. }
  177. i++;
  178. /* aux */
  179. aux = (struct process_aux *)(new_argve + i);
  180. aux->item[0].key = AT_EXECFN;
  181. aux->item[0].value = (size_t)(size_t)new_argve[0];
  182. i += AUX_ARRAY_ITEMS_NR * 2;
  183. new_argve[i] = 0;
  184. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  185. lwp->args = args;
  186. return aux;
  187. }
  188. #else
  189. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  190. {
  191. #ifdef ARCH_MM_MMU
  192. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  193. struct process_aux *aux;
  194. #else
  195. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  196. #endif /* ARCH_MM_MMU */
  197. int *args;
  198. char *str;
  199. char **new_argve;
  200. int i;
  201. int len;
  202. for (i = 0; i < argc; i++)
  203. {
  204. size += (rt_strlen(argv[i]) + 1);
  205. }
  206. size += (sizeof(int) * argc);
  207. i = 0;
  208. if (envp)
  209. {
  210. while (envp[i] != 0)
  211. {
  212. size += (rt_strlen(envp[i]) + 1);
  213. size += sizeof(int);
  214. i++;
  215. }
  216. }
  217. #ifdef ARCH_MM_MMU
  218. /* for aux */
  219. size += sizeof(struct process_aux);
  220. args = (int *)rt_malloc(size);
  221. if (args == RT_NULL)
  222. {
  223. return RT_NULL;
  224. }
  225. /* argc, argv[], 0, envp[], 0 */
  226. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  227. #else
  228. args = (int *)rt_malloc(size);
  229. if (args == RT_NULL)
  230. {
  231. return RT_NULL;
  232. }
  233. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  234. #endif /* ARCH_MM_MMU */
  235. new_argve = (char **)&args[1];
  236. args[0] = argc;
  237. for (i = 0; i < argc; i++)
  238. {
  239. len = rt_strlen(argv[i]) + 1;
  240. new_argve[i] = str;
  241. rt_memcpy(str, argv[i], len);
  242. str += len;
  243. }
  244. new_argve[i] = 0;
  245. i++;
  246. new_argve[i] = 0;
  247. if (envp)
  248. {
  249. int j;
  250. for (j = 0; envp[j] != 0; j++)
  251. {
  252. len = rt_strlen(envp[j]) + 1;
  253. new_argve[i] = str;
  254. rt_memcpy(str, envp[j], len);
  255. str += len;
  256. i++;
  257. }
  258. new_argve[i] = 0;
  259. }
  260. #ifdef ARCH_MM_MMU
  261. /* aux */
  262. aux = (struct process_aux *)(new_argve + i);
  263. aux->item[0].key = AT_EXECFN;
  264. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  265. i += AUX_ARRAY_ITEMS_NR * 2;
  266. new_argve[i] = 0;
  267. lwp->args = args;
  268. return aux;
  269. #else
  270. lwp->args = args;
  271. lwp->args_length = size;
  272. return (struct process_aux *)(new_argve + i);
  273. #endif /* ARCH_MM_MMU */
  274. }
  275. #endif
  276. #ifdef ARCH_MM_MMU
  277. #define check_off(voff, vlen) \
  278. do \
  279. { \
  280. if (voff > vlen) \
  281. { \
  282. result = -RT_ERROR; \
  283. goto _exit; \
  284. } \
  285. } while (0)
  286. #define check_read(vrlen, vrlen_want) \
  287. do \
  288. { \
  289. if (vrlen < vrlen_want) \
  290. { \
  291. result = -RT_ERROR; \
  292. goto _exit; \
  293. } \
  294. } while (0)
  295. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  296. {
  297. size_t read_block = 0;
  298. while (nmemb)
  299. {
  300. size_t count;
  301. count = read(fd, ptr, size * nmemb) / size;
  302. if (count < nmemb)
  303. {
  304. LOG_E("ERROR: file size error!");
  305. break;
  306. }
  307. ptr = (void *)((uint8_t *)ptr + (count * size));
  308. nmemb -= count;
  309. read_block += count;
  310. }
  311. return read_block;
  312. }
  313. typedef struct
  314. {
  315. Elf_Word st_name;
  316. Elf_Addr st_value;
  317. Elf_Word st_size;
  318. unsigned char st_info;
  319. unsigned char st_other;
  320. Elf_Half st_shndx;
  321. } Elf_sym;
  322. #ifdef RT_USING_USERSPACE
  323. struct map_range
  324. {
  325. void *start;
  326. size_t size;
  327. };
  328. static void expand_map_range(struct map_range *m, void *start, size_t size)
  329. {
  330. if (!m->start)
  331. {
  332. m->start = start;
  333. m->size = size;
  334. }
  335. else
  336. {
  337. void *end = (void *)((char*)start + size);
  338. void *mend = (void *)((char*)m->start + m->size);
  339. if (m->start > start)
  340. {
  341. m->start = start;
  342. }
  343. if (mend < end)
  344. {
  345. mend = end;
  346. }
  347. m->size = (char *)mend - (char *)m->start;
  348. }
  349. }
  350. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  351. {
  352. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  353. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  354. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  355. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  356. if (m1->size)
  357. {
  358. if (m1_start < (void *)USER_LOAD_VADDR)
  359. {
  360. return -1;
  361. }
  362. if (m1_start > (void *)USER_STACK_VSTART)
  363. {
  364. return -1;
  365. }
  366. if (m1_end < (void *)USER_LOAD_VADDR)
  367. {
  368. return -1;
  369. }
  370. if (m1_end > (void *)USER_STACK_VSTART)
  371. {
  372. return -1;
  373. }
  374. }
  375. if (m2->size)
  376. {
  377. if (m2_start < (void *)USER_LOAD_VADDR)
  378. {
  379. return -1;
  380. }
  381. if (m2_start > (void *)USER_STACK_VSTART)
  382. {
  383. return -1;
  384. }
  385. if (m2_end < (void *)USER_LOAD_VADDR)
  386. {
  387. return -1;
  388. }
  389. if (m2_end > (void *)USER_STACK_VSTART)
  390. {
  391. return -1;
  392. }
  393. }
  394. if ((m1->size != 0) && (m2->size != 0))
  395. {
  396. if (m1_start < m2_start)
  397. {
  398. if (m1_end > m2_start)
  399. {
  400. return -1;
  401. }
  402. }
  403. else /* m2_start <= m1_start */
  404. {
  405. if (m2_end > m1_start)
  406. {
  407. return -1;
  408. }
  409. }
  410. }
  411. return 0;
  412. }
  413. #endif
  414. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  415. {
  416. uint32_t i;
  417. uint32_t off = 0;
  418. size_t load_off = 0;
  419. char *p_section_str = 0;
  420. Elf_sym *dynsym = 0;
  421. Elf_Ehdr eheader;
  422. Elf_Phdr pheader;
  423. Elf_Shdr sheader;
  424. int result = RT_EOK;
  425. uint32_t magic;
  426. size_t read_len;
  427. void *got_start = 0;
  428. size_t got_size = 0;
  429. void *rel_dyn_start = 0;
  430. size_t rel_dyn_size = 0;
  431. size_t dynsym_off = 0;
  432. size_t dynsym_size = 0;
  433. #ifdef RT_USING_USERSPACE
  434. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  435. void *pa, *va;
  436. void *va_self;
  437. rt_mmu_info *m_info = &lwp->mmu_info;
  438. #endif
  439. if (len < sizeof eheader)
  440. {
  441. LOG_E("len < sizeof eheader!");
  442. return -RT_ERROR;
  443. }
  444. lseek(fd, 0, SEEK_SET);
  445. read_len = load_fread(&magic, 1, sizeof magic, fd);
  446. check_read(read_len, sizeof magic);
  447. if (memcmp(elf_magic, &magic, 4) != 0)
  448. {
  449. LOG_E("elf_magic not same, magic:0x%x!", magic);
  450. return -RT_ERROR;
  451. }
  452. lseek(fd, off, SEEK_SET);
  453. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  454. check_read(read_len, sizeof eheader);
  455. #ifndef ARCH_CPU_64BIT
  456. if (eheader.e_ident[4] != 1)
  457. { /* not 32bit */
  458. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  459. return -RT_ERROR;
  460. }
  461. #else
  462. if (eheader.e_ident[4] != 2)
  463. { /* not 64bit */
  464. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  465. return -RT_ERROR;
  466. }
  467. #endif
  468. if (eheader.e_ident[6] != 1)
  469. { /* ver not 1 */
  470. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  471. return -RT_ERROR;
  472. }
  473. if ((eheader.e_type != ET_DYN)
  474. #ifdef RT_USING_USERSPACE
  475. && (eheader.e_type != ET_EXEC)
  476. #endif
  477. )
  478. {
  479. /* not pie or exec elf */
  480. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  481. return -RT_ERROR;
  482. }
  483. #ifdef RT_USING_USERSPACE
  484. {
  485. off = eheader.e_phoff;
  486. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  487. {
  488. check_off(off, len);
  489. lseek(fd, off, SEEK_SET);
  490. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  491. check_read(read_len, sizeof pheader);
  492. if (pheader.p_type == PT_DYNAMIC)
  493. {
  494. /* load ld.so */
  495. return 1; /* 1 means dynamic */
  496. }
  497. }
  498. }
  499. #endif
  500. if (eheader.e_entry != 0)
  501. {
  502. if ((eheader.e_entry != USER_LOAD_VADDR)
  503. && (eheader.e_entry != LDSO_LOAD_VADDR))
  504. {
  505. /* the entry is invalidate */
  506. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  507. return -RT_ERROR;
  508. }
  509. }
  510. { /* load aux */
  511. uint8_t *process_header;
  512. size_t process_header_size;
  513. off = eheader.e_phoff;
  514. process_header_size = eheader.e_phnum * sizeof pheader;
  515. #ifdef RT_USING_USERSPACE
  516. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  517. {
  518. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  519. return -RT_ERROR;
  520. }
  521. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  522. if (!va)
  523. {
  524. LOG_E("lwp map user failed!");
  525. return -RT_ERROR;
  526. }
  527. pa = rt_hw_mmu_v2p(m_info, va);
  528. process_header = (uint8_t *)pa - PV_OFFSET;
  529. #else
  530. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  531. if (!process_header)
  532. {
  533. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  534. return -RT_ERROR;
  535. }
  536. #endif
  537. check_off(off, len);
  538. lseek(fd, off, SEEK_SET);
  539. read_len = load_fread(process_header, 1, process_header_size, fd);
  540. check_read(read_len, process_header_size);
  541. #ifdef RT_USING_USERSPACE
  542. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  543. #endif
  544. aux->item[1].key = AT_PAGESZ;
  545. #ifdef RT_USING_USERSPACE
  546. aux->item[1].value = ARCH_PAGE_SIZE;
  547. #else
  548. aux->item[1].value = RT_MM_PAGE_SIZE;
  549. #endif
  550. aux->item[2].key = AT_RANDOM;
  551. {
  552. uint32_t random_value = rt_tick_get();
  553. uint8_t *random;
  554. #ifdef RT_USING_USERSPACE
  555. uint8_t *krandom;
  556. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  557. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  558. krandom = (uint8_t *)krandom - PV_OFFSET;
  559. rt_memcpy(krandom, &random_value, sizeof random_value);
  560. #else
  561. random = (uint8_t *)(process_header + process_header_size);
  562. rt_memcpy(random, &random_value, sizeof random_value);
  563. #endif
  564. aux->item[2].value = (size_t)random;
  565. }
  566. aux->item[3].key = AT_PHDR;
  567. #ifdef RT_USING_USERSPACE
  568. aux->item[3].value = (size_t)va;
  569. #else
  570. aux->item[3].value = (size_t)process_header;
  571. #endif
  572. aux->item[4].key = AT_PHNUM;
  573. aux->item[4].value = eheader.e_phnum;
  574. aux->item[5].key = AT_PHENT;
  575. aux->item[5].value = sizeof pheader;
  576. #ifdef RT_USING_USERSPACE
  577. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  578. #endif
  579. }
  580. if (load_addr)
  581. {
  582. load_off = (size_t)load_addr;
  583. }
  584. #ifdef RT_USING_USERSPACE
  585. else
  586. {
  587. /* map user */
  588. off = eheader.e_shoff;
  589. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  590. {
  591. check_off(off, len);
  592. lseek(fd, off, SEEK_SET);
  593. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  594. check_read(read_len, sizeof sheader);
  595. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  596. {
  597. continue;
  598. }
  599. switch (sheader.sh_type)
  600. {
  601. case SHT_PROGBITS:
  602. if ((sheader.sh_flags & SHF_WRITE) == 0)
  603. {
  604. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  605. }
  606. else
  607. {
  608. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  609. }
  610. break;
  611. case SHT_NOBITS:
  612. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  613. break;
  614. default:
  615. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  616. break;
  617. }
  618. }
  619. if (user_area[0].size == 0)
  620. {
  621. /* no code */
  622. result = -RT_ERROR;
  623. goto _exit;
  624. }
  625. if (user_area[0].start == NULL)
  626. {
  627. /* DYN */
  628. load_off = USER_LOAD_VADDR;
  629. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  630. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  631. }
  632. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  633. {
  634. result = -RT_ERROR;
  635. goto _exit;
  636. }
  637. /* text and data */
  638. for (i = 0; i < 2; i++)
  639. {
  640. if (user_area[i].size != 0)
  641. {
  642. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  643. if (!va || (va != user_area[i].start))
  644. {
  645. result = -RT_ERROR;
  646. goto _exit;
  647. }
  648. }
  649. }
  650. lwp->text_size = user_area[0].size;
  651. }
  652. #else
  653. else
  654. {
  655. size_t start = -1UL;
  656. size_t end = 0UL;
  657. size_t total_size;
  658. off = eheader.e_shoff;
  659. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  660. {
  661. check_off(off, len);
  662. lseek(fd, off, SEEK_SET);
  663. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  664. check_read(read_len, sizeof sheader);
  665. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  666. {
  667. continue;
  668. }
  669. switch (sheader.sh_type)
  670. {
  671. case SHT_PROGBITS:
  672. case SHT_NOBITS:
  673. if (start > sheader.sh_addr)
  674. {
  675. start = sheader.sh_addr;
  676. }
  677. if (sheader.sh_addr + sheader.sh_size > end)
  678. {
  679. end = sheader.sh_addr + sheader.sh_size;
  680. }
  681. break;
  682. default:
  683. break;
  684. }
  685. }
  686. total_size = end - start;
  687. #ifdef RT_USING_CACHE
  688. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  689. #else
  690. load_off = (size_t)rt_malloc(total_size);
  691. #endif
  692. if (load_off == 0)
  693. {
  694. LOG_E("alloc text memory faild!");
  695. result = -RT_ENOMEM;
  696. goto _exit;
  697. }
  698. else
  699. {
  700. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  701. }
  702. lwp->load_off = load_off; /* for free */
  703. lwp->text_size = total_size;
  704. }
  705. #endif
  706. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  707. off = eheader.e_phoff;
  708. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  709. {
  710. check_off(off, len);
  711. lseek(fd, off, SEEK_SET);
  712. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  713. check_read(read_len, sizeof pheader);
  714. if (pheader.p_type == PT_LOAD)
  715. {
  716. if (pheader.p_filesz > pheader.p_memsz)
  717. {
  718. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  719. return -RT_ERROR;
  720. }
  721. check_off(pheader.p_offset, len);
  722. lseek(fd, pheader.p_offset, SEEK_SET);
  723. #ifdef RT_USING_USERSPACE
  724. {
  725. uint32_t size = pheader.p_filesz;
  726. size_t tmp_len = 0;
  727. va = (void *)(pheader.p_vaddr + load_addr);
  728. read_len = 0;
  729. while (size)
  730. {
  731. pa = rt_hw_mmu_v2p(m_info, va);
  732. va_self = (void *)((char *)pa - PV_OFFSET);
  733. LOG_D("va_self = %p pa = %p", va_self, pa);
  734. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  735. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  736. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  737. read_len += tmp_len;
  738. size -= tmp_len;
  739. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  740. }
  741. }
  742. #else
  743. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  744. #endif
  745. check_read(read_len, pheader.p_filesz);
  746. if (pheader.p_filesz < pheader.p_memsz)
  747. {
  748. #ifdef RT_USING_USERSPACE
  749. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  750. uint32_t size_s;
  751. uint32_t off;
  752. off = pheader.p_filesz & ARCH_PAGE_MASK;
  753. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  754. while (size)
  755. {
  756. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  757. pa = rt_hw_mmu_v2p(m_info, va);
  758. va_self = (void *)((char *)pa - PV_OFFSET);
  759. memset((void *)((char *)va_self + off), 0, size_s);
  760. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  761. off = 0;
  762. size -= size_s;
  763. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  764. }
  765. #else
  766. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  767. #endif
  768. }
  769. }
  770. }
  771. /* relocate */
  772. if (eheader.e_type == ET_DYN)
  773. {
  774. /* section info */
  775. off = eheader.e_shoff;
  776. /* find section string table */
  777. check_off(off, len);
  778. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  779. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  780. check_read(read_len, sizeof sheader);
  781. p_section_str = (char *)rt_malloc(sheader.sh_size);
  782. if (!p_section_str)
  783. {
  784. LOG_E("out of memory!");
  785. result = -ENOMEM;
  786. goto _exit;
  787. }
  788. check_off(sheader.sh_offset, len);
  789. lseek(fd, sheader.sh_offset, SEEK_SET);
  790. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  791. check_read(read_len, sheader.sh_size);
  792. check_off(off, len);
  793. lseek(fd, off, SEEK_SET);
  794. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  795. {
  796. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  797. check_read(read_len, sizeof sheader);
  798. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  799. {
  800. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  801. got_size = (size_t)sheader.sh_size;
  802. }
  803. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  804. {
  805. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  806. rel_dyn_size = (size_t)sheader.sh_size;
  807. }
  808. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  809. {
  810. dynsym_off = (size_t)sheader.sh_offset;
  811. dynsym_size = (size_t)sheader.sh_size;
  812. }
  813. }
  814. /* reloc */
  815. if (dynsym_size)
  816. {
  817. dynsym = rt_malloc(dynsym_size);
  818. if (!dynsym)
  819. {
  820. LOG_E("ERROR: Malloc error!");
  821. result = -ENOMEM;
  822. goto _exit;
  823. }
  824. check_off(dynsym_off, len);
  825. lseek(fd, dynsym_off, SEEK_SET);
  826. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  827. check_read(read_len, dynsym_size);
  828. }
  829. #ifdef RT_USING_USERSPACE
  830. arch_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  831. #else
  832. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  833. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  834. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  835. #endif
  836. }
  837. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  838. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  839. _exit:
  840. if (dynsym)
  841. {
  842. rt_free(dynsym);
  843. }
  844. if (p_section_str)
  845. {
  846. rt_free(p_section_str);
  847. }
  848. if (result != RT_EOK)
  849. {
  850. LOG_E("lwp load faild, %d", result);
  851. }
  852. return result;
  853. }
  854. #endif /* ARCH_MM_MMU */
  855. RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  856. {
  857. uint8_t *ptr;
  858. int ret = -1;
  859. int len;
  860. int fd = -1;
  861. /* check file name */
  862. RT_ASSERT(filename != RT_NULL);
  863. /* check lwp control block */
  864. RT_ASSERT(lwp != RT_NULL);
  865. /* copy file name to process name */
  866. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  867. if (load_addr != RT_NULL)
  868. {
  869. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  870. ptr = load_addr;
  871. }
  872. else
  873. {
  874. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  875. ptr = RT_NULL;
  876. }
  877. fd = open(filename, O_BINARY | O_RDONLY, 0);
  878. if (fd < 0)
  879. {
  880. LOG_E("ERROR: Can't open elf file %s!", filename);
  881. goto out;
  882. }
  883. len = lseek(fd, 0, SEEK_END);
  884. if (len < 0)
  885. {
  886. LOG_E("ERROR: File %s size error!", filename);
  887. goto out;
  888. }
  889. lseek(fd, 0, SEEK_SET);
  890. ret = load_elf(fd, len, lwp, ptr, aux);
  891. if ((ret != RT_EOK) && (ret != 1))
  892. {
  893. LOG_E("lwp load ret = %d", ret);
  894. }
  895. out:
  896. if (fd > 0)
  897. {
  898. close(fd);
  899. }
  900. return ret;
  901. }
  902. void lwp_cleanup(struct rt_thread *tid)
  903. {
  904. rt_base_t level;
  905. struct rt_lwp *lwp;
  906. struct tty_node *tty_head = RT_NULL;
  907. if (tid == NULL)
  908. {
  909. return;
  910. }
  911. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  912. level = rt_hw_interrupt_disable();
  913. lwp = (struct rt_lwp *)tid->lwp;
  914. lwp_tid_put(tid->tid);
  915. rt_list_remove(&tid->sibling);
  916. rt_hw_interrupt_enable(level);
  917. if (lwp->tty != RT_NULL)
  918. {
  919. tty_head = lwp->tty->head;
  920. }
  921. if (!lwp_ref_dec(lwp))
  922. {
  923. if (tty_head)
  924. {
  925. tty_pop(&tty_head, lwp);
  926. }
  927. }
  928. return;
  929. }
  930. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  931. {
  932. struct dfs_fd *d;
  933. struct dfs_fdtable *lwp_fdt;
  934. lwp_fdt = &lwp->fdt;
  935. /* init 4 fds */
  936. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  937. if (lwp_fdt->fds)
  938. {
  939. lwp_fdt->maxfd = 4;
  940. d = fd_get(0);
  941. fd_associate(lwp_fdt, 0, d);
  942. d = fd_get(1);
  943. fd_associate(lwp_fdt, 1, d);
  944. d = fd_get(2);
  945. fd_associate(lwp_fdt, 2, d);
  946. }
  947. return;
  948. }
  949. static void _lwp_thread_entry(void *parameter)
  950. {
  951. rt_thread_t tid;
  952. struct rt_lwp *lwp;
  953. tid = rt_thread_self();
  954. lwp = (struct rt_lwp *)tid->lwp;
  955. tid->cleanup = lwp_cleanup;
  956. tid->user_stack = RT_NULL;
  957. if (lwp->debug)
  958. {
  959. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  960. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  961. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  962. icache_invalid_all();
  963. }
  964. #ifdef ARCH_MM_MMU
  965. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  966. #else
  967. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  968. #endif /* ARCH_MM_MMU */
  969. }
  970. struct rt_lwp *lwp_self(void)
  971. {
  972. rt_thread_t tid;
  973. tid = rt_thread_self();
  974. if (tid)
  975. {
  976. return (struct rt_lwp *)tid->lwp;
  977. }
  978. return RT_NULL;
  979. }
  980. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  981. {
  982. int result;
  983. rt_base_t level;
  984. struct rt_lwp *lwp;
  985. char *thread_name;
  986. char *argv_last = argv[argc - 1];
  987. int bg = 0;
  988. struct process_aux *aux;
  989. int tid = 0;
  990. int ret;
  991. if (filename == RT_NULL)
  992. {
  993. return -RT_ERROR;
  994. }
  995. lwp = lwp_new();
  996. if (lwp == RT_NULL)
  997. {
  998. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  999. return -RT_ENOMEM;
  1000. }
  1001. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1002. if ((tid = lwp_tid_get()) == 0)
  1003. {
  1004. lwp_ref_dec(lwp);
  1005. return -ENOMEM;
  1006. }
  1007. #ifdef RT_USING_USERSPACE
  1008. if (lwp_user_space_init(lwp) != 0)
  1009. {
  1010. lwp_tid_put(tid);
  1011. lwp_ref_dec(lwp);
  1012. return -ENOMEM;
  1013. }
  1014. #endif
  1015. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1016. {
  1017. argc--;
  1018. bg = 1;
  1019. }
  1020. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1021. {
  1022. lwp_tid_put(tid);
  1023. lwp_ref_dec(lwp);
  1024. return -ENOMEM;
  1025. }
  1026. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1027. #ifdef ARCH_MM_MMU
  1028. if (result == 1)
  1029. {
  1030. /* dynmaic */
  1031. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1032. result = load_ldso(lwp, filename, argv, envp);
  1033. }
  1034. #endif /* ARCH_MM_MMU */
  1035. if (result == RT_EOK)
  1036. {
  1037. rt_thread_t thread = RT_NULL;
  1038. rt_uint32_t priority = 25, tick = 200;
  1039. lwp_copy_stdio_fdt(lwp);
  1040. /* obtain the base name */
  1041. thread_name = strrchr(filename, '/');
  1042. thread_name = thread_name ? thread_name + 1 : filename;
  1043. #ifndef ARCH_MM_MMU
  1044. struct lwp_app_head *app_head = lwp->text_entry;
  1045. if (app_head->priority)
  1046. {
  1047. priority = app_head->priority;
  1048. }
  1049. if (app_head->tick)
  1050. {
  1051. tick = app_head->tick;
  1052. }
  1053. #endif /* not defined ARCH_MM_MMU */
  1054. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1055. LWP_TASK_STACK_SIZE, priority, tick);
  1056. if (thread != RT_NULL)
  1057. {
  1058. struct rt_lwp *self_lwp;
  1059. thread->tid = tid;
  1060. lwp_tid_set_thread(tid, thread);
  1061. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1062. (rt_size_t)thread->stack_addr + thread->stack_size);
  1063. level = rt_hw_interrupt_disable();
  1064. self_lwp = lwp_self();
  1065. if (self_lwp)
  1066. {
  1067. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1068. lwp->__pgrp = tid;
  1069. lwp->session = self_lwp->session;
  1070. /* lwp add to children link */
  1071. lwp->sibling = self_lwp->first_child;
  1072. self_lwp->first_child = lwp;
  1073. lwp->parent = self_lwp;
  1074. }
  1075. else
  1076. {
  1077. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1078. lwp->__pgrp = tid;
  1079. }
  1080. if (!bg)
  1081. {
  1082. if (lwp->session == -1)
  1083. {
  1084. struct tty_struct *tty = RT_NULL;
  1085. struct rt_lwp *old_lwp;
  1086. tty = (struct tty_struct *)console_tty_get();
  1087. old_lwp = tty->foreground;
  1088. rt_spin_lock(&tty->spinlock);
  1089. ret = tty_push(&tty->head, old_lwp);
  1090. rt_spin_unlock(&tty->spinlock);
  1091. if (ret < 0)
  1092. {
  1093. lwp_tid_put(tid);
  1094. lwp_ref_dec(lwp);
  1095. LOG_E("malloc fail!\n");
  1096. return -ENOMEM;
  1097. }
  1098. lwp->tty = tty;
  1099. lwp->tty->pgrp = lwp->__pgrp;
  1100. lwp->tty->session = lwp->session;
  1101. lwp->tty->foreground = lwp;
  1102. tcgetattr(1, &stdin_termios);
  1103. old_stdin_termios = stdin_termios;
  1104. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1105. tcsetattr(1, 0, &stdin_termios);
  1106. }
  1107. else
  1108. {
  1109. if (self_lwp != RT_NULL)
  1110. {
  1111. rt_spin_lock(&self_lwp->tty->spinlock);
  1112. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1113. rt_spin_unlock(&self_lwp->tty->spinlock);
  1114. if (ret < 0)
  1115. {
  1116. lwp_tid_put(tid);
  1117. lwp_ref_dec(lwp);
  1118. LOG_E("malloc fail!\n");
  1119. return -ENOMEM;
  1120. }
  1121. lwp->tty = self_lwp->tty;
  1122. lwp->tty->pgrp = lwp->__pgrp;
  1123. lwp->tty->session = lwp->session;
  1124. lwp->tty->foreground = lwp;
  1125. }
  1126. else
  1127. {
  1128. lwp->tty = RT_NULL;
  1129. }
  1130. }
  1131. }
  1132. thread->lwp = lwp;
  1133. #ifndef ARCH_MM_MMU
  1134. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1135. thread->user_stack = app_head->stack_offset ?
  1136. (void *)(app_head->stack_offset -
  1137. app_head->data_offset +
  1138. (uint32_t)lwp->data_entry) : RT_NULL;
  1139. thread->user_stack_size = app_head->stack_size;
  1140. /* init data area */
  1141. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1142. /* init user stack */
  1143. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1144. #endif /* not defined ARCH_MM_MMU */
  1145. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1146. if (debug && rt_dbg_ops)
  1147. {
  1148. lwp->debug = debug;
  1149. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1150. }
  1151. rt_hw_interrupt_enable(level);
  1152. rt_thread_startup(thread);
  1153. return lwp_to_pid(lwp);
  1154. }
  1155. }
  1156. lwp_tid_put(tid);
  1157. lwp_ref_dec(lwp);
  1158. return -RT_ERROR;
  1159. }
  1160. #ifdef RT_USING_MUSL
  1161. extern char **__environ;
  1162. #else
  1163. char __environ = 0;
  1164. #endif
  1165. pid_t exec(char *filename, int debug, int argc, char **argv)
  1166. {
  1167. return lwp_execve(filename, debug, argc, argv, __environ);
  1168. }
  1169. #ifdef ARCH_MM_MMU
  1170. void lwp_user_setting_save(rt_thread_t thread)
  1171. {
  1172. if (thread)
  1173. {
  1174. thread->thread_idr = arch_get_tidr();
  1175. }
  1176. }
  1177. void lwp_user_setting_restore(rt_thread_t thread)
  1178. {
  1179. if (!thread)
  1180. {
  1181. return;
  1182. }
  1183. #if !defined(ARCH_RISCV64)
  1184. /* tidr will be set in RESTORE_ALL in risc-v */
  1185. arch_set_tidr(thread->thread_idr);
  1186. #endif
  1187. if (rt_dbg_ops)
  1188. {
  1189. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1190. if (l != 0)
  1191. {
  1192. rt_hw_set_process_id((size_t)l->pid);
  1193. }
  1194. else
  1195. {
  1196. rt_hw_set_process_id(0);
  1197. }
  1198. if (l && l->debug)
  1199. {
  1200. uint32_t step_type = 0;
  1201. step_type = dbg_step_type();
  1202. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1203. {
  1204. dbg_activate_step();
  1205. }
  1206. else
  1207. {
  1208. dbg_deactivate_step();
  1209. }
  1210. }
  1211. }
  1212. }
  1213. #endif /* ARCH_MM_MMU */