lwp.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_posix.h>
  16. #include <lwp_elf.h>
  17. #ifndef RT_USING_DFS
  18. #error "lwp need file system(RT_USING_DFS)"
  19. #endif
  20. #include "lwp.h"
  21. #include "lwp_arch.h"
  22. #include "console.h"
  23. #define DBG_TAG "LWP"
  24. #define DBG_LVL DBG_WARNING
  25. #include <rtdbg.h>
  26. #ifdef RT_USING_USERSPACE
  27. #include <lwp_mm_area.h>
  28. #include <lwp_user_mm.h>
  29. #endif /* end of RT_USING_USERSPACE */
  30. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  31. #ifdef DFS_USING_WORKDIR
  32. extern char working_directory[];
  33. #endif
  34. static struct termios stdin_termios, old_stdin_termios;
  35. extern void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack);
  36. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  37. struct termios *get_old_termios(void)
  38. {
  39. return &old_stdin_termios;
  40. }
  41. void lwp_setcwd(char *buf)
  42. {
  43. struct rt_lwp *lwp = RT_NULL;
  44. if(strlen(buf) >= DFS_PATH_MAX)
  45. {
  46. rt_kprintf("buf too long!\n");
  47. return ;
  48. }
  49. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  50. if (lwp)
  51. {
  52. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  53. }
  54. else
  55. {
  56. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  57. }
  58. return ;
  59. }
  60. char *lwp_getcwd(void)
  61. {
  62. char *dir_buf = RT_NULL;
  63. struct rt_lwp *lwp = RT_NULL;
  64. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  65. if (lwp)
  66. {
  67. if(lwp->working_directory[0] != '/')
  68. {
  69. dir_buf = &working_directory[0];
  70. }
  71. else
  72. {
  73. dir_buf = &lwp->working_directory[0];
  74. }
  75. }
  76. else
  77. dir_buf = &working_directory[0];
  78. return dir_buf;
  79. }
  80. /**
  81. * RT-Thread light-weight process
  82. */
  83. void lwp_set_kernel_sp(uint32_t *sp)
  84. {
  85. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  86. }
  87. uint32_t *lwp_get_kernel_sp(void)
  88. {
  89. #ifdef RT_USING_USERSPACE
  90. return (uint32_t *)rt_thread_self()->sp;
  91. #else
  92. uint32_t* kernel_sp;
  93. extern rt_uint32_t rt_interrupt_from_thread;
  94. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  95. if (rt_thread_switch_interrupt_flag)
  96. {
  97. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  98. }
  99. else
  100. {
  101. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  102. }
  103. return kernel_sp;
  104. #endif
  105. }
  106. #ifdef RT_USING_USERSPACE
  107. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  108. {
  109. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  110. int *args;
  111. char *str;
  112. char *str_k;
  113. char **new_argve;
  114. int i;
  115. int len;
  116. size_t *args_k;
  117. struct process_aux *aux;
  118. for (i = 0; i < argc; i++)
  119. {
  120. size += (rt_strlen(argv[i]) + 1);
  121. }
  122. size += (sizeof(size_t) * argc);
  123. i = 0;
  124. if (envp)
  125. {
  126. while (envp[i] != 0)
  127. {
  128. size += (rt_strlen(envp[i]) + 1);
  129. size += sizeof(size_t);
  130. i++;
  131. }
  132. }
  133. /* for aux */
  134. size += sizeof(struct process_aux);
  135. if (size > ARCH_PAGE_SIZE)
  136. {
  137. return RT_NULL;
  138. }
  139. /* args = (int *)lwp_map_user(lwp, 0, size); */
  140. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  141. if (args == RT_NULL)
  142. {
  143. return RT_NULL;
  144. }
  145. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  146. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  147. /* argc, argv[], 0, envp[], 0 , aux[] */
  148. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  149. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  150. new_argve = (char **)&args_k[1];
  151. args_k[0] = argc;
  152. for (i = 0; i < argc; i++)
  153. {
  154. len = rt_strlen(argv[i]) + 1;
  155. new_argve[i] = str;
  156. rt_memcpy(str_k, argv[i], len);
  157. str += len;
  158. str_k += len;
  159. }
  160. new_argve[i] = 0;
  161. i++;
  162. new_argve[i] = 0;
  163. if (envp)
  164. {
  165. int j;
  166. for (j = 0; envp[j] != 0; j++)
  167. {
  168. len = rt_strlen(envp[j]) + 1;
  169. new_argve[i] = str;
  170. rt_memcpy(str_k, envp[j], len);
  171. str += len;
  172. str_k += len;
  173. i++;
  174. }
  175. new_argve[i] = 0;
  176. }
  177. i++;
  178. /* aux */
  179. aux = (struct process_aux *)(new_argve + i);
  180. aux->item[0].key = AT_EXECFN;
  181. aux->item[0].value = (size_t)(size_t)new_argve[0];
  182. i += AUX_ARRAY_ITEMS_NR * 2;
  183. new_argve[i] = 0;
  184. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  185. lwp->args = args;
  186. return aux;
  187. }
  188. #else
  189. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  190. {
  191. #ifdef ARCH_MM_MMU
  192. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  193. struct process_aux *aux;
  194. #else
  195. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  196. #endif /* ARCH_MM_MMU */
  197. int *args;
  198. char *str;
  199. char **new_argve;
  200. int i;
  201. int len;
  202. for (i = 0; i < argc; i++)
  203. {
  204. size += (rt_strlen(argv[i]) + 1);
  205. }
  206. size += (sizeof(int) * argc);
  207. i = 0;
  208. if (envp)
  209. {
  210. while (envp[i] != 0)
  211. {
  212. size += (rt_strlen(envp[i]) + 1);
  213. size += sizeof(int);
  214. i++;
  215. }
  216. }
  217. #ifdef ARCH_MM_MMU
  218. /* for aux */
  219. size += sizeof(struct process_aux);
  220. args = (int *)rt_malloc(size);
  221. if (args == RT_NULL)
  222. {
  223. return RT_NULL;
  224. }
  225. /* argc, argv[], 0, envp[], 0 */
  226. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  227. #else
  228. args = (int *)rt_malloc(size);
  229. if (args == RT_NULL)
  230. {
  231. return RT_NULL;
  232. }
  233. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  234. #endif /* ARCH_MM_MMU */
  235. new_argve = (char **)&args[1];
  236. args[0] = argc;
  237. for (i = 0; i < argc; i++)
  238. {
  239. len = rt_strlen(argv[i]) + 1;
  240. new_argve[i] = str;
  241. rt_memcpy(str, argv[i], len);
  242. str += len;
  243. }
  244. new_argve[i] = 0;
  245. i++;
  246. new_argve[i] = 0;
  247. if (envp)
  248. {
  249. int j;
  250. for (j = 0; envp[j] != 0; j++)
  251. {
  252. len = rt_strlen(envp[j]) + 1;
  253. new_argve[i] = str;
  254. rt_memcpy(str, envp[j], len);
  255. str += len;
  256. i++;
  257. }
  258. new_argve[i] = 0;
  259. }
  260. #ifdef ARCH_MM_MMU
  261. /* aux */
  262. aux = (struct process_aux *)(new_argve + i);
  263. aux->item[0].key = AT_EXECFN;
  264. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  265. i += AUX_ARRAY_ITEMS_NR * 2;
  266. new_argve[i] = 0;
  267. lwp->args = args;
  268. return aux;
  269. #else
  270. lwp->args = args;
  271. lwp->args_length = size;
  272. return (struct process_aux *)(new_argve + i);
  273. #endif /* ARCH_MM_MMU */
  274. }
  275. #endif
  276. #ifdef ARCH_MM_MMU
  277. #define check_off(voff, vlen) \
  278. do \
  279. { \
  280. if (voff > vlen) \
  281. { \
  282. result = -RT_ERROR; \
  283. goto _exit; \
  284. } \
  285. } while (0)
  286. #define check_read(vrlen, vrlen_want) \
  287. do \
  288. { \
  289. if (vrlen < vrlen_want) \
  290. { \
  291. result = -RT_ERROR; \
  292. goto _exit; \
  293. } \
  294. } while (0)
  295. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  296. {
  297. size_t read_block = 0;
  298. while (nmemb)
  299. {
  300. size_t count;
  301. count = read(fd, ptr, size * nmemb) / size;
  302. if (count < nmemb)
  303. {
  304. LOG_E("ERROR: file size error!");
  305. break;
  306. }
  307. ptr = (void *)((uint8_t *)ptr + (count * size));
  308. nmemb -= count;
  309. read_block += count;
  310. }
  311. return read_block;
  312. }
  313. typedef struct
  314. {
  315. Elf_Word st_name;
  316. Elf_Addr st_value;
  317. Elf_Word st_size;
  318. unsigned char st_info;
  319. unsigned char st_other;
  320. Elf_Half st_shndx;
  321. } Elf_sym;
  322. #ifdef RT_USING_USERSPACE
  323. void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  324. #else
  325. void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  326. #endif
  327. #ifdef RT_USING_USERSPACE
  328. struct map_range
  329. {
  330. void *start;
  331. size_t size;
  332. };
  333. static void expand_map_range(struct map_range *m, void *start, size_t size)
  334. {
  335. if (!m->start)
  336. {
  337. m->start = start;
  338. m->size = size;
  339. }
  340. else
  341. {
  342. void *end = (void *)((char*)start + size);
  343. void *mend = (void *)((char*)m->start + m->size);
  344. if (m->start > start)
  345. {
  346. m->start = start;
  347. }
  348. if (mend < end)
  349. {
  350. mend = end;
  351. }
  352. m->size = (char *)mend - (char *)m->start;
  353. }
  354. }
  355. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  356. {
  357. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  358. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  359. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  360. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  361. if (m1->size)
  362. {
  363. if (m1_start < (void *)USER_LOAD_VADDR)
  364. {
  365. return -1;
  366. }
  367. if (m1_start > (void *)USER_STACK_VSTART)
  368. {
  369. return -1;
  370. }
  371. if (m1_end < (void *)USER_LOAD_VADDR)
  372. {
  373. return -1;
  374. }
  375. if (m1_end > (void *)USER_STACK_VSTART)
  376. {
  377. return -1;
  378. }
  379. }
  380. if (m2->size)
  381. {
  382. if (m2_start < (void *)USER_LOAD_VADDR)
  383. {
  384. return -1;
  385. }
  386. if (m2_start > (void *)USER_STACK_VSTART)
  387. {
  388. return -1;
  389. }
  390. if (m2_end < (void *)USER_LOAD_VADDR)
  391. {
  392. return -1;
  393. }
  394. if (m2_end > (void *)USER_STACK_VSTART)
  395. {
  396. return -1;
  397. }
  398. }
  399. if ((m1->size != 0) && (m2->size != 0))
  400. {
  401. if (m1_start < m2_start)
  402. {
  403. if (m1_end > m2_start)
  404. {
  405. return -1;
  406. }
  407. }
  408. else /* m2_start <= m1_start */
  409. {
  410. if (m2_end > m1_start)
  411. {
  412. return -1;
  413. }
  414. }
  415. }
  416. return 0;
  417. }
  418. #endif
  419. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  420. {
  421. uint32_t i;
  422. uint32_t off = 0;
  423. size_t load_off = 0;
  424. char *p_section_str = 0;
  425. Elf_sym *dynsym = 0;
  426. Elf_Ehdr eheader;
  427. Elf_Phdr pheader;
  428. Elf_Shdr sheader;
  429. int result = RT_EOK;
  430. uint32_t magic;
  431. size_t read_len;
  432. void *got_start = 0;
  433. size_t got_size = 0;
  434. void *rel_dyn_start = 0;
  435. size_t rel_dyn_size = 0;
  436. size_t dynsym_off = 0;
  437. size_t dynsym_size = 0;
  438. #ifdef RT_USING_USERSPACE
  439. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  440. void *pa, *va;
  441. void *va_self;
  442. rt_mmu_info *m_info = &lwp->mmu_info;
  443. #endif
  444. if (len < sizeof eheader)
  445. {
  446. LOG_E("len < sizeof eheader!");
  447. return -RT_ERROR;
  448. }
  449. lseek(fd, 0, SEEK_SET);
  450. read_len = load_fread(&magic, 1, sizeof magic, fd);
  451. check_read(read_len, sizeof magic);
  452. if (memcmp(elf_magic, &magic, 4) != 0)
  453. {
  454. LOG_E("elf_magic not same, magic:0x%x!", magic);
  455. return -RT_ERROR;
  456. }
  457. lseek(fd, off, SEEK_SET);
  458. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  459. check_read(read_len, sizeof eheader);
  460. #ifndef ARCH_CPU_64BIT
  461. if (eheader.e_ident[4] != 1)
  462. { /* not 32bit */
  463. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  464. return -RT_ERROR;
  465. }
  466. #else
  467. if (eheader.e_ident[4] != 2)
  468. { /* not 64bit */
  469. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  470. return -RT_ERROR;
  471. }
  472. #endif
  473. if (eheader.e_ident[6] != 1)
  474. { /* ver not 1 */
  475. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  476. return -RT_ERROR;
  477. }
  478. if ((eheader.e_type != ET_DYN)
  479. #ifdef RT_USING_USERSPACE
  480. && (eheader.e_type != ET_EXEC)
  481. #endif
  482. )
  483. {
  484. /* not pie or exec elf */
  485. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  486. return -RT_ERROR;
  487. }
  488. #ifdef RT_USING_USERSPACE
  489. {
  490. off = eheader.e_phoff;
  491. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  492. {
  493. check_off(off, len);
  494. lseek(fd, off, SEEK_SET);
  495. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  496. check_read(read_len, sizeof pheader);
  497. if (pheader.p_type == PT_DYNAMIC)
  498. {
  499. /* load ld.so */
  500. return 1; /* 1 means dynamic */
  501. }
  502. }
  503. }
  504. #endif
  505. if (eheader.e_entry != 0)
  506. {
  507. if ((eheader.e_entry != USER_LOAD_VADDR)
  508. && (eheader.e_entry != LDSO_LOAD_VADDR))
  509. {
  510. /* the entry is invalidate */
  511. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  512. return -RT_ERROR;
  513. }
  514. }
  515. { /* load aux */
  516. uint8_t *process_header;
  517. size_t process_header_size;
  518. off = eheader.e_phoff;
  519. process_header_size = eheader.e_phnum * sizeof pheader;
  520. #ifdef RT_USING_USERSPACE
  521. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  522. {
  523. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  524. return -RT_ERROR;
  525. }
  526. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  527. if (!va)
  528. {
  529. LOG_E("lwp map user failed!");
  530. return -RT_ERROR;
  531. }
  532. pa = rt_hw_mmu_v2p(m_info, va);
  533. process_header = (uint8_t *)pa - PV_OFFSET;
  534. #else
  535. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  536. if (!process_header)
  537. {
  538. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  539. return -RT_ERROR;
  540. }
  541. #endif
  542. check_off(off, len);
  543. lseek(fd, off, SEEK_SET);
  544. read_len = load_fread(process_header, 1, process_header_size, fd);
  545. check_read(read_len, process_header_size);
  546. #ifdef RT_USING_USERSPACE
  547. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  548. #endif
  549. aux->item[1].key = AT_PAGESZ;
  550. #ifdef RT_USING_USERSPACE
  551. aux->item[1].value = ARCH_PAGE_SIZE;
  552. #else
  553. aux->item[1].value = RT_MM_PAGE_SIZE;
  554. #endif
  555. aux->item[2].key = AT_RANDOM;
  556. {
  557. uint32_t random_value = rt_tick_get();
  558. uint8_t *random;
  559. #ifdef RT_USING_USERSPACE
  560. uint8_t *krandom;
  561. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  562. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  563. krandom = (uint8_t *)krandom - PV_OFFSET;
  564. rt_memcpy(krandom, &random_value, sizeof random_value);
  565. #else
  566. random = (uint8_t *)(process_header + process_header_size);
  567. rt_memcpy(random, &random_value, sizeof random_value);
  568. #endif
  569. aux->item[2].value = (size_t)random;
  570. }
  571. aux->item[3].key = AT_PHDR;
  572. #ifdef RT_USING_USERSPACE
  573. aux->item[3].value = (size_t)va;
  574. #else
  575. aux->item[3].value = (size_t)process_header;
  576. #endif
  577. aux->item[4].key = AT_PHNUM;
  578. aux->item[4].value = eheader.e_phnum;
  579. aux->item[5].key = AT_PHENT;
  580. aux->item[5].value = sizeof pheader;
  581. #ifdef RT_USING_USERSPACE
  582. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  583. #endif
  584. }
  585. if (load_addr)
  586. {
  587. load_off = (size_t)load_addr;
  588. }
  589. #ifdef RT_USING_USERSPACE
  590. else
  591. {
  592. /* map user */
  593. off = eheader.e_shoff;
  594. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  595. {
  596. check_off(off, len);
  597. lseek(fd, off, SEEK_SET);
  598. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  599. check_read(read_len, sizeof sheader);
  600. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  601. {
  602. continue;
  603. }
  604. switch (sheader.sh_type)
  605. {
  606. case SHT_PROGBITS:
  607. if ((sheader.sh_flags & SHF_WRITE) == 0)
  608. {
  609. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  610. }
  611. else
  612. {
  613. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  614. }
  615. break;
  616. case SHT_NOBITS:
  617. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  618. break;
  619. default:
  620. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  621. break;
  622. }
  623. }
  624. if (user_area[0].size == 0)
  625. {
  626. /* no code */
  627. result = -RT_ERROR;
  628. goto _exit;
  629. }
  630. if (user_area[0].start == NULL)
  631. {
  632. /* DYN */
  633. load_off = USER_LOAD_VADDR;
  634. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  635. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  636. }
  637. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  638. {
  639. result = -RT_ERROR;
  640. goto _exit;
  641. }
  642. /* text and data */
  643. for (i = 0; i < 2; i++)
  644. {
  645. if (user_area[i].size != 0)
  646. {
  647. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  648. if (!va || (va != user_area[i].start))
  649. {
  650. result = -RT_ERROR;
  651. goto _exit;
  652. }
  653. }
  654. }
  655. lwp->text_size = user_area[0].size;
  656. }
  657. #else
  658. else
  659. {
  660. size_t start = -1UL;
  661. size_t end = 0UL;
  662. size_t total_size;
  663. off = eheader.e_shoff;
  664. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  665. {
  666. check_off(off, len);
  667. lseek(fd, off, SEEK_SET);
  668. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  669. check_read(read_len, sizeof sheader);
  670. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  671. {
  672. continue;
  673. }
  674. switch (sheader.sh_type)
  675. {
  676. case SHT_PROGBITS:
  677. case SHT_NOBITS:
  678. if (start > sheader.sh_addr)
  679. {
  680. start = sheader.sh_addr;
  681. }
  682. if (sheader.sh_addr + sheader.sh_size > end)
  683. {
  684. end = sheader.sh_addr + sheader.sh_size;
  685. }
  686. break;
  687. default:
  688. break;
  689. }
  690. }
  691. total_size = end - start;
  692. #ifdef RT_USING_CACHE
  693. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  694. #else
  695. load_off = (size_t)rt_malloc(total_size);
  696. #endif
  697. if (load_off == 0)
  698. {
  699. LOG_E("alloc text memory faild!");
  700. result = -RT_ENOMEM;
  701. goto _exit;
  702. }
  703. else
  704. {
  705. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  706. }
  707. lwp->load_off = load_off; /* for free */
  708. lwp->text_size = total_size;
  709. }
  710. #endif
  711. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  712. off = eheader.e_phoff;
  713. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  714. {
  715. check_off(off, len);
  716. lseek(fd, off, SEEK_SET);
  717. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  718. check_read(read_len, sizeof pheader);
  719. if (pheader.p_type == PT_LOAD)
  720. {
  721. if (pheader.p_filesz > pheader.p_memsz)
  722. {
  723. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  724. return -RT_ERROR;
  725. }
  726. check_off(pheader.p_offset, len);
  727. lseek(fd, pheader.p_offset, SEEK_SET);
  728. #ifdef RT_USING_USERSPACE
  729. {
  730. uint32_t size = pheader.p_filesz;
  731. size_t tmp_len = 0;
  732. va = (void *)(pheader.p_vaddr + load_addr);
  733. read_len = 0;
  734. while (size)
  735. {
  736. pa = rt_hw_mmu_v2p(m_info, va);
  737. va_self = (void *)((char *)pa - PV_OFFSET);
  738. LOG_D("va_self = %p pa = %p", va_self, pa);
  739. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  740. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  741. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  742. read_len += tmp_len;
  743. size -= tmp_len;
  744. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  745. }
  746. }
  747. #else
  748. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  749. #endif
  750. check_read(read_len, pheader.p_filesz);
  751. if (pheader.p_filesz < pheader.p_memsz)
  752. {
  753. #ifdef RT_USING_USERSPACE
  754. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  755. uint32_t size_s;
  756. uint32_t off;
  757. off = pheader.p_filesz & ARCH_PAGE_MASK;
  758. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  759. while (size)
  760. {
  761. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  762. pa = rt_hw_mmu_v2p(m_info, va);
  763. va_self = (void *)((char *)pa - PV_OFFSET);
  764. memset((void *)((char *)va_self + off), 0, size_s);
  765. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  766. off = 0;
  767. size -= size_s;
  768. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  769. }
  770. #else
  771. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  772. #endif
  773. }
  774. }
  775. }
  776. /* relocate */
  777. if (eheader.e_type == ET_DYN)
  778. {
  779. /* section info */
  780. off = eheader.e_shoff;
  781. /* find section string table */
  782. check_off(off, len);
  783. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  784. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  785. check_read(read_len, sizeof sheader);
  786. p_section_str = (char *)rt_malloc(sheader.sh_size);
  787. if (!p_section_str)
  788. {
  789. LOG_E("out of memory!");
  790. result = -ENOMEM;
  791. goto _exit;
  792. }
  793. check_off(sheader.sh_offset, len);
  794. lseek(fd, sheader.sh_offset, SEEK_SET);
  795. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  796. check_read(read_len, sheader.sh_size);
  797. check_off(off, len);
  798. lseek(fd, off, SEEK_SET);
  799. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  800. {
  801. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  802. check_read(read_len, sizeof sheader);
  803. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  804. {
  805. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  806. got_size = (size_t)sheader.sh_size;
  807. }
  808. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  809. {
  810. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  811. rel_dyn_size = (size_t)sheader.sh_size;
  812. }
  813. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  814. {
  815. dynsym_off = (size_t)sheader.sh_offset;
  816. dynsym_size = (size_t)sheader.sh_size;
  817. }
  818. }
  819. /* reloc */
  820. if (dynsym_size)
  821. {
  822. dynsym = rt_malloc(dynsym_size);
  823. if (!dynsym)
  824. {
  825. LOG_E("ERROR: Malloc error!");
  826. result = -ENOMEM;
  827. goto _exit;
  828. }
  829. check_off(dynsym_off, len);
  830. lseek(fd, dynsym_off, SEEK_SET);
  831. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  832. check_read(read_len, dynsym_size);
  833. }
  834. #ifdef RT_USING_USERSPACE
  835. lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  836. #else
  837. lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  838. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  839. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  840. #endif
  841. }
  842. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  843. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  844. _exit:
  845. if (dynsym)
  846. {
  847. rt_free(dynsym);
  848. }
  849. if (p_section_str)
  850. {
  851. rt_free(p_section_str);
  852. }
  853. if (result != RT_EOK)
  854. {
  855. LOG_E("lwp load faild, %d", result);
  856. }
  857. return result;
  858. }
  859. #endif /* ARCH_MM_MMU */
  860. RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  861. {
  862. uint8_t *ptr;
  863. int ret = -1;
  864. int len;
  865. int fd = -1;
  866. /* check file name */
  867. RT_ASSERT(filename != RT_NULL);
  868. /* check lwp control block */
  869. RT_ASSERT(lwp != RT_NULL);
  870. /* copy file name to process name */
  871. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  872. if (load_addr != RT_NULL)
  873. {
  874. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  875. ptr = load_addr;
  876. }
  877. else
  878. {
  879. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  880. ptr = RT_NULL;
  881. }
  882. fd = open(filename, O_BINARY | O_RDONLY, 0);
  883. if (fd < 0)
  884. {
  885. LOG_E("ERROR: Can't open elf file %s!", filename);
  886. goto out;
  887. }
  888. len = lseek(fd, 0, SEEK_END);
  889. if (len < 0)
  890. {
  891. LOG_E("ERROR: File %s size error!", filename);
  892. goto out;
  893. }
  894. lseek(fd, 0, SEEK_SET);
  895. ret = load_elf(fd, len, lwp, ptr, aux);
  896. if ((ret != RT_EOK) && (ret != 1))
  897. {
  898. LOG_E("lwp load ret = %d", ret);
  899. }
  900. out:
  901. if (fd > 0)
  902. {
  903. close(fd);
  904. }
  905. return ret;
  906. }
  907. void lwp_cleanup(struct rt_thread *tid)
  908. {
  909. rt_base_t level;
  910. struct rt_lwp *lwp;
  911. struct tty_node *tty_head = RT_NULL;
  912. if (tid == NULL)
  913. {
  914. return;
  915. }
  916. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  917. level = rt_hw_interrupt_disable();
  918. lwp = (struct rt_lwp *)tid->lwp;
  919. lwp_tid_put(tid->tid);
  920. rt_list_remove(&tid->sibling);
  921. rt_hw_interrupt_enable(level);
  922. if (lwp->tty != RT_NULL)
  923. {
  924. tty_head = lwp->tty->head;
  925. }
  926. if (!lwp_ref_dec(lwp))
  927. {
  928. if (tty_head)
  929. {
  930. tty_pop(&tty_head, lwp);
  931. }
  932. }
  933. return;
  934. }
  935. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  936. {
  937. struct dfs_fd *d;
  938. struct dfs_fdtable *lwp_fdt;
  939. lwp_fdt = &lwp->fdt;
  940. /* init 4 fds */
  941. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  942. if (lwp_fdt->fds)
  943. {
  944. lwp_fdt->maxfd = 4;
  945. d = fd_get(0);
  946. fd_associate(lwp_fdt, 0, d);
  947. d = fd_get(1);
  948. fd_associate(lwp_fdt, 1, d);
  949. d = fd_get(2);
  950. fd_associate(lwp_fdt, 2, d);
  951. }
  952. return;
  953. }
  954. static void lwp_thread_entry(void *parameter)
  955. {
  956. rt_thread_t tid;
  957. struct rt_lwp *lwp;
  958. tid = rt_thread_self();
  959. lwp = (struct rt_lwp *)tid->lwp;
  960. tid->cleanup = lwp_cleanup;
  961. tid->user_stack = RT_NULL;
  962. if (lwp->debug)
  963. {
  964. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  965. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  966. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  967. icache_invalid_all();
  968. }
  969. #ifdef ARCH_MM_MMU
  970. lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  971. #else
  972. lwp_user_entry(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  973. #endif /* ARCH_MM_MMU */
  974. }
  975. struct rt_lwp *lwp_self(void)
  976. {
  977. rt_thread_t tid;
  978. tid = rt_thread_self();
  979. if (tid)
  980. {
  981. return (struct rt_lwp *)tid->lwp;
  982. }
  983. return RT_NULL;
  984. }
  985. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  986. {
  987. int result;
  988. rt_base_t level;
  989. struct rt_lwp *lwp;
  990. char *thread_name;
  991. char *argv_last = argv[argc - 1];
  992. int bg = 0;
  993. struct process_aux *aux;
  994. int tid = 0;
  995. int ret;
  996. if (filename == RT_NULL)
  997. {
  998. return -RT_ERROR;
  999. }
  1000. lwp = lwp_new();
  1001. if (lwp == RT_NULL)
  1002. {
  1003. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1004. return -RT_ENOMEM;
  1005. }
  1006. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1007. if ((tid = lwp_tid_get()) == 0)
  1008. {
  1009. lwp_ref_dec(lwp);
  1010. return -ENOMEM;
  1011. }
  1012. #ifdef RT_USING_USERSPACE
  1013. if (lwp_user_space_init(lwp) != 0)
  1014. {
  1015. lwp_tid_put(tid);
  1016. lwp_ref_dec(lwp);
  1017. return -ENOMEM;
  1018. }
  1019. #endif
  1020. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1021. {
  1022. argc--;
  1023. bg = 1;
  1024. }
  1025. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1026. {
  1027. lwp_tid_put(tid);
  1028. lwp_ref_dec(lwp);
  1029. return -ENOMEM;
  1030. }
  1031. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1032. #ifdef ARCH_MM_MMU
  1033. if (result == 1)
  1034. {
  1035. /* dynmaic */
  1036. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1037. result = load_ldso(lwp, filename, argv, envp);
  1038. }
  1039. #endif /* ARCH_MM_MMU */
  1040. if (result == RT_EOK)
  1041. {
  1042. rt_thread_t thread = RT_NULL;
  1043. rt_uint32_t priority = 25, tick = 200;
  1044. lwp_copy_stdio_fdt(lwp);
  1045. /* obtain the base name */
  1046. thread_name = strrchr(filename, '/');
  1047. thread_name = thread_name ? thread_name + 1 : filename;
  1048. #ifndef ARCH_MM_MMU
  1049. struct lwp_app_head *app_head = lwp->text_entry;
  1050. if (app_head->priority)
  1051. {
  1052. priority = app_head->priority;
  1053. }
  1054. if (app_head->tick)
  1055. {
  1056. tick = app_head->tick;
  1057. }
  1058. #endif /* not defined ARCH_MM_MMU */
  1059. thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
  1060. LWP_TASK_STACK_SIZE, priority, tick);
  1061. if (thread != RT_NULL)
  1062. {
  1063. struct rt_lwp *self_lwp;
  1064. thread->tid = tid;
  1065. lwp_tid_set_thread(tid, thread);
  1066. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1067. (rt_size_t)thread->stack_addr + thread->stack_size);
  1068. level = rt_hw_interrupt_disable();
  1069. self_lwp = lwp_self();
  1070. if (self_lwp)
  1071. {
  1072. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1073. lwp->__pgrp = tid;
  1074. lwp->session = self_lwp->session;
  1075. /* lwp add to children link */
  1076. lwp->sibling = self_lwp->first_child;
  1077. self_lwp->first_child = lwp;
  1078. lwp->parent = self_lwp;
  1079. }
  1080. else
  1081. {
  1082. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1083. lwp->__pgrp = tid;
  1084. }
  1085. if (!bg)
  1086. {
  1087. if (lwp->session == -1)
  1088. {
  1089. struct tty_struct *tty = RT_NULL;
  1090. struct rt_lwp *old_lwp;
  1091. tty = (struct tty_struct *)console_tty_get();
  1092. old_lwp = tty->foreground;
  1093. rt_spin_lock(&tty->spinlock);
  1094. ret = tty_push(&tty->head, old_lwp);
  1095. rt_spin_unlock(&tty->spinlock);
  1096. if (ret < 0)
  1097. {
  1098. lwp_tid_put(tid);
  1099. lwp_ref_dec(lwp);
  1100. LOG_E("malloc fail!\n");
  1101. return -ENOMEM;
  1102. }
  1103. lwp->tty = tty;
  1104. lwp->tty->pgrp = lwp->__pgrp;
  1105. lwp->tty->session = lwp->session;
  1106. lwp->tty->foreground = lwp;
  1107. tcgetattr(1, &stdin_termios);
  1108. old_stdin_termios = stdin_termios;
  1109. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1110. tcsetattr(1, 0, &stdin_termios);
  1111. }
  1112. else
  1113. {
  1114. if (self_lwp != RT_NULL)
  1115. {
  1116. rt_spin_lock(&self_lwp->tty->spinlock);
  1117. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1118. rt_spin_unlock(&self_lwp->tty->spinlock);
  1119. if (ret < 0)
  1120. {
  1121. lwp_tid_put(tid);
  1122. lwp_ref_dec(lwp);
  1123. LOG_E("malloc fail!\n");
  1124. return -ENOMEM;
  1125. }
  1126. lwp->tty = self_lwp->tty;
  1127. lwp->tty->pgrp = lwp->__pgrp;
  1128. lwp->tty->session = lwp->session;
  1129. lwp->tty->foreground = lwp;
  1130. }
  1131. else
  1132. {
  1133. lwp->tty = RT_NULL;
  1134. }
  1135. }
  1136. }
  1137. thread->lwp = lwp;
  1138. #ifndef ARCH_MM_MMU
  1139. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1140. thread->user_stack = app_head->stack_offset ?
  1141. (void *)(app_head->stack_offset -
  1142. app_head->data_offset +
  1143. (uint32_t)lwp->data_entry) : RT_NULL;
  1144. thread->user_stack_size = app_head->stack_size;
  1145. /* init data area */
  1146. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1147. /* init user stack */
  1148. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1149. #endif /* not defined ARCH_MM_MMU */
  1150. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1151. if (debug && rt_dbg_ops)
  1152. {
  1153. lwp->debug = debug;
  1154. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1155. }
  1156. rt_hw_interrupt_enable(level);
  1157. rt_thread_startup(thread);
  1158. return lwp_to_pid(lwp);
  1159. }
  1160. }
  1161. lwp_tid_put(tid);
  1162. lwp_ref_dec(lwp);
  1163. return -RT_ERROR;
  1164. }
  1165. #ifdef RT_USING_MUSL
  1166. extern char **__environ;
  1167. #else
  1168. char __environ = 0;
  1169. #endif
  1170. pid_t exec(char *filename, int debug, int argc, char **argv)
  1171. {
  1172. return lwp_execve(filename, debug, argc, argv, __environ);
  1173. }
  1174. #ifdef ARCH_MM_MMU
  1175. void lwp_user_setting_save(rt_thread_t thread)
  1176. {
  1177. if (thread)
  1178. {
  1179. thread->thread_idr = rt_cpu_get_thread_idr();
  1180. }
  1181. }
  1182. void lwp_user_setting_restore(rt_thread_t thread)
  1183. {
  1184. if (!thread)
  1185. {
  1186. return;
  1187. }
  1188. rt_cpu_set_thread_idr(thread->thread_idr);
  1189. if (rt_dbg_ops)
  1190. {
  1191. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1192. if (l != 0)
  1193. {
  1194. rt_hw_set_process_id((size_t)l->pid);
  1195. }
  1196. else
  1197. {
  1198. rt_hw_set_process_id(0);
  1199. }
  1200. if (l && l->debug)
  1201. {
  1202. uint32_t step_type = 0;
  1203. step_type = dbg_step_type();
  1204. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1205. {
  1206. dbg_activate_step();
  1207. }
  1208. else
  1209. {
  1210. dbg_deactivate_step();
  1211. }
  1212. }
  1213. }
  1214. }
  1215. #endif /* ARCH_MM_MMU */