lwp.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_file.h>
  16. #include <unistd.h>
  17. #include <stdio.h> /* rename() */
  18. #include <fcntl.h>
  19. #include <sys/stat.h>
  20. #include <sys/statfs.h> /* statfs() */
  21. #include <lwp_elf.h>
  22. #ifndef RT_USING_DFS
  23. #error "lwp need file system(RT_USING_DFS)"
  24. #endif
  25. #include "lwp.h"
  26. #include "lwp_arch.h"
  27. #include "lwp_arch_comm.h"
  28. #include "console.h"
  29. #define DBG_TAG "LWP"
  30. #define DBG_LVL DBG_WARNING
  31. #include <rtdbg.h>
  32. #ifdef ARCH_MM_MMU
  33. #include <lwp_user_mm.h>
  34. #endif /* end of ARCH_MM_MMU */
  35. #ifndef O_DIRECTORY
  36. #define O_DIRECTORY 0x200000
  37. #endif
  38. #ifndef O_BINARY
  39. #define O_BINARY 0x10000
  40. #endif
  41. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  42. #ifdef DFS_USING_WORKDIR
  43. extern char working_directory[];
  44. #endif
  45. static struct termios stdin_termios, old_stdin_termios;
  46. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  47. struct termios *get_old_termios(void)
  48. {
  49. return &old_stdin_termios;
  50. }
  51. void lwp_setcwd(char *buf)
  52. {
  53. struct rt_lwp *lwp = RT_NULL;
  54. if(strlen(buf) >= DFS_PATH_MAX)
  55. {
  56. rt_kprintf("buf too long!\n");
  57. return ;
  58. }
  59. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  60. if (lwp)
  61. {
  62. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  63. }
  64. else
  65. {
  66. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  67. }
  68. return ;
  69. }
  70. char *lwp_getcwd(void)
  71. {
  72. char *dir_buf = RT_NULL;
  73. struct rt_lwp *lwp = RT_NULL;
  74. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  75. if (lwp)
  76. {
  77. if(lwp->working_directory[0] != '/')
  78. {
  79. dir_buf = &working_directory[0];
  80. }
  81. else
  82. {
  83. dir_buf = &lwp->working_directory[0];
  84. }
  85. }
  86. else
  87. dir_buf = &working_directory[0];
  88. return dir_buf;
  89. }
  90. /**
  91. * RT-Thread light-weight process
  92. */
  93. void lwp_set_kernel_sp(uint32_t *sp)
  94. {
  95. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  96. }
  97. uint32_t *lwp_get_kernel_sp(void)
  98. {
  99. #ifdef ARCH_MM_MMU
  100. return (uint32_t *)rt_thread_self()->sp;
  101. #else
  102. uint32_t* kernel_sp;
  103. extern rt_uint32_t rt_interrupt_from_thread;
  104. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  105. if (rt_thread_switch_interrupt_flag)
  106. {
  107. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  108. }
  109. else
  110. {
  111. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  112. }
  113. return kernel_sp;
  114. #endif
  115. }
  116. #ifdef ARCH_MM_MMU
  117. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  118. {
  119. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  120. int *args;
  121. char *str;
  122. char *str_k;
  123. char **new_argve;
  124. int i;
  125. int len;
  126. size_t *args_k;
  127. struct process_aux *aux;
  128. for (i = 0; i < argc; i++)
  129. {
  130. size += (rt_strlen(argv[i]) + 1);
  131. }
  132. size += (sizeof(size_t) * argc);
  133. i = 0;
  134. if (envp)
  135. {
  136. while (envp[i] != 0)
  137. {
  138. size += (rt_strlen(envp[i]) + 1);
  139. size += sizeof(size_t);
  140. i++;
  141. }
  142. }
  143. /* for aux */
  144. size += sizeof(struct process_aux);
  145. if (size > ARCH_PAGE_SIZE)
  146. {
  147. return RT_NULL;
  148. }
  149. /* args = (int *)lwp_map_user(lwp, 0, size); */
  150. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  151. if (args == RT_NULL)
  152. {
  153. return RT_NULL;
  154. }
  155. args_k = (size_t *)lwp_v2p(lwp, args);
  156. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  157. /* argc, argv[], 0, envp[], 0 , aux[] */
  158. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  159. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  160. new_argve = (char **)&args_k[1];
  161. args_k[0] = argc;
  162. for (i = 0; i < argc; i++)
  163. {
  164. len = rt_strlen(argv[i]) + 1;
  165. new_argve[i] = str;
  166. rt_memcpy(str_k, argv[i], len);
  167. str += len;
  168. str_k += len;
  169. }
  170. new_argve[i] = 0;
  171. i++;
  172. new_argve[i] = 0;
  173. if (envp)
  174. {
  175. int j;
  176. for (j = 0; envp[j] != 0; j++)
  177. {
  178. len = rt_strlen(envp[j]) + 1;
  179. new_argve[i] = str;
  180. rt_memcpy(str_k, envp[j], len);
  181. str += len;
  182. str_k += len;
  183. i++;
  184. }
  185. new_argve[i] = 0;
  186. }
  187. i++;
  188. /* aux */
  189. aux = (struct process_aux *)(new_argve + i);
  190. aux->item[0].key = AT_EXECFN;
  191. aux->item[0].value = (size_t)(size_t)new_argve[0];
  192. i += AUX_ARRAY_ITEMS_NR * 2;
  193. new_argve[i] = 0;
  194. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  195. lwp->args = args;
  196. return aux;
  197. }
  198. #else
  199. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  200. {
  201. #ifdef ARCH_MM_MMU
  202. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  203. struct process_aux *aux;
  204. #else
  205. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  206. #endif /* ARCH_MM_MMU */
  207. int *args;
  208. char *str;
  209. char **new_argve;
  210. int i;
  211. int len;
  212. for (i = 0; i < argc; i++)
  213. {
  214. size += (rt_strlen(argv[i]) + 1);
  215. }
  216. size += (sizeof(int) * argc);
  217. i = 0;
  218. if (envp)
  219. {
  220. while (envp[i] != 0)
  221. {
  222. size += (rt_strlen(envp[i]) + 1);
  223. size += sizeof(int);
  224. i++;
  225. }
  226. }
  227. #ifdef ARCH_MM_MMU
  228. /* for aux */
  229. size += sizeof(struct process_aux);
  230. args = (int *)rt_malloc(size);
  231. if (args == RT_NULL)
  232. {
  233. return RT_NULL;
  234. }
  235. /* argc, argv[], 0, envp[], 0 */
  236. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  237. #else
  238. args = (int *)rt_malloc(size);
  239. if (args == RT_NULL)
  240. {
  241. return RT_NULL;
  242. }
  243. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  244. #endif /* ARCH_MM_MMU */
  245. new_argve = (char **)&args[1];
  246. args[0] = argc;
  247. for (i = 0; i < argc; i++)
  248. {
  249. len = rt_strlen(argv[i]) + 1;
  250. new_argve[i] = str;
  251. rt_memcpy(str, argv[i], len);
  252. str += len;
  253. }
  254. new_argve[i] = 0;
  255. i++;
  256. new_argve[i] = 0;
  257. if (envp)
  258. {
  259. int j;
  260. for (j = 0; envp[j] != 0; j++)
  261. {
  262. len = rt_strlen(envp[j]) + 1;
  263. new_argve[i] = str;
  264. rt_memcpy(str, envp[j], len);
  265. str += len;
  266. i++;
  267. }
  268. new_argve[i] = 0;
  269. }
  270. #ifdef ARCH_MM_MMU
  271. /* aux */
  272. aux = (struct process_aux *)(new_argve + i);
  273. aux->item[0].key = AT_EXECFN;
  274. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  275. i += AUX_ARRAY_ITEMS_NR * 2;
  276. new_argve[i] = 0;
  277. lwp->args = args;
  278. return aux;
  279. #else
  280. lwp->args = args;
  281. lwp->args_length = size;
  282. return (struct process_aux *)(new_argve + i);
  283. #endif /* ARCH_MM_MMU */
  284. }
  285. #endif
  286. #ifdef ARCH_MM_MMU
  287. #define check_off(voff, vlen) \
  288. do \
  289. { \
  290. if (voff > vlen) \
  291. { \
  292. result = -RT_ERROR; \
  293. goto _exit; \
  294. } \
  295. } while (0)
  296. #define check_read(vrlen, vrlen_want) \
  297. do \
  298. { \
  299. if (vrlen < vrlen_want) \
  300. { \
  301. result = -RT_ERROR; \
  302. goto _exit; \
  303. } \
  304. } while (0)
  305. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  306. {
  307. size_t read_block = 0;
  308. while (nmemb)
  309. {
  310. size_t count;
  311. count = read(fd, ptr, size * nmemb) / size;
  312. if (count < nmemb)
  313. {
  314. LOG_E("ERROR: file size error!");
  315. break;
  316. }
  317. ptr = (void *)((uint8_t *)ptr + (count * size));
  318. nmemb -= count;
  319. read_block += count;
  320. }
  321. return read_block;
  322. }
  323. typedef struct
  324. {
  325. Elf_Word st_name;
  326. Elf_Addr st_value;
  327. Elf_Word st_size;
  328. unsigned char st_info;
  329. unsigned char st_other;
  330. Elf_Half st_shndx;
  331. } Elf_sym;
  332. #ifdef ARCH_MM_MMU
  333. struct map_range
  334. {
  335. void *start;
  336. size_t size;
  337. };
  338. static void expand_map_range(struct map_range *m, void *start, size_t size)
  339. {
  340. if (!m->start)
  341. {
  342. m->start = start;
  343. m->size = size;
  344. }
  345. else
  346. {
  347. void *end = (void *)((char*)start + size);
  348. void *mend = (void *)((char*)m->start + m->size);
  349. if (m->start > start)
  350. {
  351. m->start = start;
  352. }
  353. if (mend < end)
  354. {
  355. mend = end;
  356. }
  357. m->size = (char *)mend - (char *)m->start;
  358. }
  359. }
  360. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  361. {
  362. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  363. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  364. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  365. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  366. if (m1->size)
  367. {
  368. if (m1_start < (void *)USER_LOAD_VADDR)
  369. {
  370. return -1;
  371. }
  372. if (m1_start > (void *)USER_STACK_VSTART)
  373. {
  374. return -1;
  375. }
  376. if (m1_end < (void *)USER_LOAD_VADDR)
  377. {
  378. return -1;
  379. }
  380. if (m1_end > (void *)USER_STACK_VSTART)
  381. {
  382. return -1;
  383. }
  384. }
  385. if (m2->size)
  386. {
  387. if (m2_start < (void *)USER_LOAD_VADDR)
  388. {
  389. return -1;
  390. }
  391. if (m2_start > (void *)USER_STACK_VSTART)
  392. {
  393. return -1;
  394. }
  395. if (m2_end < (void *)USER_LOAD_VADDR)
  396. {
  397. return -1;
  398. }
  399. if (m2_end > (void *)USER_STACK_VSTART)
  400. {
  401. return -1;
  402. }
  403. }
  404. if ((m1->size != 0) && (m2->size != 0))
  405. {
  406. if (m1_start < m2_start)
  407. {
  408. if (m1_end > m2_start)
  409. {
  410. return -1;
  411. }
  412. }
  413. else /* m2_start <= m1_start */
  414. {
  415. if (m2_end > m1_start)
  416. {
  417. return -1;
  418. }
  419. }
  420. }
  421. return 0;
  422. }
  423. #endif
  424. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  425. {
  426. uint32_t i;
  427. uint32_t off = 0;
  428. size_t load_off = 0;
  429. char *p_section_str = 0;
  430. Elf_sym *dynsym = 0;
  431. Elf_Ehdr eheader;
  432. Elf_Phdr pheader;
  433. Elf_Shdr sheader;
  434. int result = RT_EOK;
  435. uint32_t magic;
  436. size_t read_len;
  437. void *got_start = 0;
  438. size_t got_size = 0;
  439. void *rel_dyn_start = 0;
  440. size_t rel_dyn_size = 0;
  441. size_t dynsym_off = 0;
  442. size_t dynsym_size = 0;
  443. #ifdef ARCH_MM_MMU
  444. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  445. void *pa, *va;
  446. void *va_self;
  447. #endif
  448. if (len < sizeof eheader)
  449. {
  450. LOG_E("len < sizeof eheader!");
  451. return -RT_ERROR;
  452. }
  453. lseek(fd, 0, SEEK_SET);
  454. read_len = load_fread(&magic, 1, sizeof magic, fd);
  455. check_read(read_len, sizeof magic);
  456. if (memcmp(elf_magic, &magic, 4) != 0)
  457. {
  458. LOG_E("elf_magic not same, magic:0x%x!", magic);
  459. return -RT_ERROR;
  460. }
  461. lseek(fd, off, SEEK_SET);
  462. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  463. check_read(read_len, sizeof eheader);
  464. #ifndef ARCH_CPU_64BIT
  465. if (eheader.e_ident[4] != 1)
  466. { /* not 32bit */
  467. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  468. return -RT_ERROR;
  469. }
  470. #else
  471. if (eheader.e_ident[4] != 2)
  472. { /* not 64bit */
  473. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  474. return -RT_ERROR;
  475. }
  476. #endif
  477. if (eheader.e_ident[6] != 1)
  478. { /* ver not 1 */
  479. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  480. return -RT_ERROR;
  481. }
  482. if ((eheader.e_type != ET_DYN)
  483. #ifdef ARCH_MM_MMU
  484. && (eheader.e_type != ET_EXEC)
  485. #endif
  486. )
  487. {
  488. /* not pie or exec elf */
  489. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  490. return -RT_ERROR;
  491. }
  492. #ifdef ARCH_MM_MMU
  493. {
  494. off = eheader.e_phoff;
  495. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  496. {
  497. check_off(off, len);
  498. lseek(fd, off, SEEK_SET);
  499. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  500. check_read(read_len, sizeof pheader);
  501. if (pheader.p_type == PT_DYNAMIC)
  502. {
  503. /* load ld.so */
  504. return 1; /* 1 means dynamic */
  505. }
  506. }
  507. }
  508. #endif
  509. if (eheader.e_entry != 0)
  510. {
  511. if ((eheader.e_entry != USER_LOAD_VADDR)
  512. && (eheader.e_entry != LDSO_LOAD_VADDR))
  513. {
  514. /* the entry is invalidate */
  515. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  516. return -RT_ERROR;
  517. }
  518. }
  519. { /* load aux */
  520. uint8_t *process_header;
  521. size_t process_header_size;
  522. off = eheader.e_phoff;
  523. process_header_size = eheader.e_phnum * sizeof pheader;
  524. #ifdef ARCH_MM_MMU
  525. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  526. {
  527. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  528. return -RT_ERROR;
  529. }
  530. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  531. if (!va)
  532. {
  533. LOG_E("lwp map user failed!");
  534. return -RT_ERROR;
  535. }
  536. pa = lwp_v2p(lwp, va);
  537. process_header = (uint8_t *)pa - PV_OFFSET;
  538. #else
  539. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  540. if (!process_header)
  541. {
  542. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  543. return -RT_ERROR;
  544. }
  545. #endif
  546. check_off(off, len);
  547. lseek(fd, off, SEEK_SET);
  548. read_len = load_fread(process_header, 1, process_header_size, fd);
  549. check_read(read_len, process_header_size);
  550. #ifdef ARCH_MM_MMU
  551. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  552. #endif
  553. aux->item[1].key = AT_PAGESZ;
  554. #ifdef ARCH_MM_MMU
  555. aux->item[1].value = ARCH_PAGE_SIZE;
  556. #else
  557. aux->item[1].value = RT_MM_PAGE_SIZE;
  558. #endif
  559. aux->item[2].key = AT_RANDOM;
  560. {
  561. uint32_t random_value = rt_tick_get();
  562. uint8_t *random;
  563. #ifdef ARCH_MM_MMU
  564. uint8_t *krandom;
  565. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  566. krandom = (uint8_t *)lwp_v2p(lwp, random);
  567. krandom = (uint8_t *)krandom - PV_OFFSET;
  568. rt_memcpy(krandom, &random_value, sizeof random_value);
  569. #else
  570. random = (uint8_t *)(process_header + process_header_size);
  571. rt_memcpy(random, &random_value, sizeof random_value);
  572. #endif
  573. aux->item[2].value = (size_t)random;
  574. }
  575. aux->item[3].key = AT_PHDR;
  576. #ifdef ARCH_MM_MMU
  577. aux->item[3].value = (size_t)va;
  578. #else
  579. aux->item[3].value = (size_t)process_header;
  580. #endif
  581. aux->item[4].key = AT_PHNUM;
  582. aux->item[4].value = eheader.e_phnum;
  583. aux->item[5].key = AT_PHENT;
  584. aux->item[5].value = sizeof pheader;
  585. #ifdef ARCH_MM_MMU
  586. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  587. #endif
  588. }
  589. if (load_addr)
  590. {
  591. load_off = (size_t)load_addr;
  592. }
  593. #ifdef ARCH_MM_MMU
  594. else
  595. {
  596. /* map user */
  597. off = eheader.e_shoff;
  598. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  599. {
  600. check_off(off, len);
  601. lseek(fd, off, SEEK_SET);
  602. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  603. check_read(read_len, sizeof sheader);
  604. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  605. {
  606. continue;
  607. }
  608. switch (sheader.sh_type)
  609. {
  610. case SHT_PROGBITS:
  611. if ((sheader.sh_flags & SHF_WRITE) == 0)
  612. {
  613. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  614. }
  615. else
  616. {
  617. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  618. }
  619. break;
  620. case SHT_NOBITS:
  621. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  622. break;
  623. default:
  624. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  625. break;
  626. }
  627. }
  628. if (user_area[0].size == 0)
  629. {
  630. /* no code */
  631. result = -RT_ERROR;
  632. goto _exit;
  633. }
  634. if (user_area[0].start == NULL)
  635. {
  636. /* DYN */
  637. load_off = USER_LOAD_VADDR;
  638. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  639. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  640. }
  641. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  642. {
  643. result = -RT_ERROR;
  644. goto _exit;
  645. }
  646. /* text and data */
  647. for (i = 0; i < 2; i++)
  648. {
  649. if (user_area[i].size != 0)
  650. {
  651. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  652. if (!va || (va != user_area[i].start))
  653. {
  654. result = -RT_ERROR;
  655. goto _exit;
  656. }
  657. }
  658. }
  659. lwp->text_size = user_area[0].size;
  660. }
  661. #else
  662. else
  663. {
  664. size_t start = -1UL;
  665. size_t end = 0UL;
  666. size_t total_size;
  667. off = eheader.e_shoff;
  668. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  669. {
  670. check_off(off, len);
  671. lseek(fd, off, SEEK_SET);
  672. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  673. check_read(read_len, sizeof sheader);
  674. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  675. {
  676. continue;
  677. }
  678. switch (sheader.sh_type)
  679. {
  680. case SHT_PROGBITS:
  681. case SHT_NOBITS:
  682. if (start > sheader.sh_addr)
  683. {
  684. start = sheader.sh_addr;
  685. }
  686. if (sheader.sh_addr + sheader.sh_size > end)
  687. {
  688. end = sheader.sh_addr + sheader.sh_size;
  689. }
  690. break;
  691. default:
  692. break;
  693. }
  694. }
  695. total_size = end - start;
  696. #ifdef RT_USING_CACHE
  697. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  698. #else
  699. load_off = (size_t)rt_malloc(total_size);
  700. #endif
  701. if (load_off == 0)
  702. {
  703. LOG_E("alloc text memory faild!");
  704. result = -RT_ENOMEM;
  705. goto _exit;
  706. }
  707. else
  708. {
  709. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  710. }
  711. lwp->load_off = load_off; /* for free */
  712. lwp->text_size = total_size;
  713. }
  714. #endif
  715. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  716. off = eheader.e_phoff;
  717. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  718. {
  719. check_off(off, len);
  720. lseek(fd, off, SEEK_SET);
  721. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  722. check_read(read_len, sizeof pheader);
  723. if (pheader.p_type == PT_LOAD)
  724. {
  725. if (pheader.p_filesz > pheader.p_memsz)
  726. {
  727. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  728. return -RT_ERROR;
  729. }
  730. check_off(pheader.p_offset, len);
  731. lseek(fd, pheader.p_offset, SEEK_SET);
  732. #ifdef ARCH_MM_MMU
  733. {
  734. uint32_t size = pheader.p_filesz;
  735. size_t tmp_len = 0;
  736. va = (void *)(pheader.p_vaddr + load_addr);
  737. read_len = 0;
  738. while (size)
  739. {
  740. pa = lwp_v2p(lwp, va);
  741. va_self = (void *)((char *)pa - PV_OFFSET);
  742. LOG_D("va_self = %p pa = %p", va_self, pa);
  743. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  744. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  745. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  746. read_len += tmp_len;
  747. size -= tmp_len;
  748. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  749. }
  750. }
  751. #else
  752. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  753. #endif
  754. check_read(read_len, pheader.p_filesz);
  755. if (pheader.p_filesz < pheader.p_memsz)
  756. {
  757. #ifdef ARCH_MM_MMU
  758. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  759. uint32_t size_s;
  760. uint32_t off;
  761. off = pheader.p_filesz & ARCH_PAGE_MASK;
  762. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  763. while (size)
  764. {
  765. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  766. pa = lwp_v2p(lwp, va);
  767. va_self = (void *)((char *)pa - PV_OFFSET);
  768. memset((void *)((char *)va_self + off), 0, size_s);
  769. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  770. off = 0;
  771. size -= size_s;
  772. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  773. }
  774. #else
  775. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  776. #endif
  777. }
  778. }
  779. }
  780. /* relocate */
  781. if (eheader.e_type == ET_DYN)
  782. {
  783. /* section info */
  784. off = eheader.e_shoff;
  785. /* find section string table */
  786. check_off(off, len);
  787. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  788. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  789. check_read(read_len, sizeof sheader);
  790. p_section_str = (char *)rt_malloc(sheader.sh_size);
  791. if (!p_section_str)
  792. {
  793. LOG_E("out of memory!");
  794. result = -ENOMEM;
  795. goto _exit;
  796. }
  797. check_off(sheader.sh_offset, len);
  798. lseek(fd, sheader.sh_offset, SEEK_SET);
  799. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  800. check_read(read_len, sheader.sh_size);
  801. check_off(off, len);
  802. lseek(fd, off, SEEK_SET);
  803. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  804. {
  805. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  806. check_read(read_len, sizeof sheader);
  807. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  808. {
  809. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  810. got_size = (size_t)sheader.sh_size;
  811. }
  812. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  813. {
  814. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  815. rel_dyn_size = (size_t)sheader.sh_size;
  816. }
  817. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  818. {
  819. dynsym_off = (size_t)sheader.sh_offset;
  820. dynsym_size = (size_t)sheader.sh_size;
  821. }
  822. }
  823. /* reloc */
  824. if (dynsym_size)
  825. {
  826. dynsym = rt_malloc(dynsym_size);
  827. if (!dynsym)
  828. {
  829. LOG_E("ERROR: Malloc error!");
  830. result = -ENOMEM;
  831. goto _exit;
  832. }
  833. check_off(dynsym_off, len);
  834. lseek(fd, dynsym_off, SEEK_SET);
  835. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  836. check_read(read_len, dynsym_size);
  837. }
  838. #ifdef ARCH_MM_MMU
  839. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  840. #else
  841. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  842. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  843. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  844. #endif
  845. }
  846. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  847. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  848. _exit:
  849. if (dynsym)
  850. {
  851. rt_free(dynsym);
  852. }
  853. if (p_section_str)
  854. {
  855. rt_free(p_section_str);
  856. }
  857. if (result != RT_EOK)
  858. {
  859. LOG_E("lwp load faild, %d", result);
  860. }
  861. return result;
  862. }
  863. #endif /* ARCH_MM_MMU */
  864. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  865. {
  866. uint8_t *ptr;
  867. int ret = -1;
  868. int len;
  869. int fd = -1;
  870. /* check file name */
  871. RT_ASSERT(filename != RT_NULL);
  872. /* check lwp control block */
  873. RT_ASSERT(lwp != RT_NULL);
  874. /* copy file name to process name */
  875. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  876. if (load_addr != RT_NULL)
  877. {
  878. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  879. ptr = load_addr;
  880. }
  881. else
  882. {
  883. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  884. ptr = RT_NULL;
  885. }
  886. fd = open(filename, O_BINARY | O_RDONLY, 0);
  887. if (fd < 0)
  888. {
  889. LOG_E("ERROR: Can't open elf file %s!", filename);
  890. goto out;
  891. }
  892. len = lseek(fd, 0, SEEK_END);
  893. if (len < 0)
  894. {
  895. LOG_E("ERROR: File %s size error!", filename);
  896. goto out;
  897. }
  898. lseek(fd, 0, SEEK_SET);
  899. ret = load_elf(fd, len, lwp, ptr, aux);
  900. if ((ret != RT_EOK) && (ret != 1))
  901. {
  902. LOG_E("lwp load ret = %d", ret);
  903. }
  904. out:
  905. if (fd > 0)
  906. {
  907. close(fd);
  908. }
  909. return ret;
  910. }
  911. void lwp_cleanup(struct rt_thread *tid)
  912. {
  913. rt_base_t level;
  914. struct rt_lwp *lwp;
  915. struct tty_node *tty_head = RT_NULL;
  916. if (tid == NULL)
  917. {
  918. return;
  919. }
  920. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->parent.name, tid->stack_addr);
  921. level = rt_hw_interrupt_disable();
  922. lwp = (struct rt_lwp *)tid->lwp;
  923. lwp_tid_put(tid->tid);
  924. rt_list_remove(&tid->sibling);
  925. rt_hw_interrupt_enable(level);
  926. if (lwp->tty != RT_NULL)
  927. {
  928. tty_head = lwp->tty->head;
  929. }
  930. if (!lwp_ref_dec(lwp))
  931. {
  932. if (tty_head)
  933. {
  934. tty_pop(&tty_head, lwp);
  935. }
  936. }
  937. return;
  938. }
  939. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  940. {
  941. struct dfs_fd *d;
  942. struct dfs_fdtable *lwp_fdt;
  943. lwp_fdt = &lwp->fdt;
  944. /* init 4 fds */
  945. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  946. if (lwp_fdt->fds)
  947. {
  948. lwp_fdt->maxfd = 4;
  949. d = fd_get(0);
  950. fd_associate(lwp_fdt, 0, d);
  951. d = fd_get(1);
  952. fd_associate(lwp_fdt, 1, d);
  953. d = fd_get(2);
  954. fd_associate(lwp_fdt, 2, d);
  955. }
  956. return;
  957. }
  958. static void _lwp_thread_entry(void *parameter)
  959. {
  960. rt_thread_t tid;
  961. struct rt_lwp *lwp;
  962. tid = rt_thread_self();
  963. lwp = (struct rt_lwp *)tid->lwp;
  964. tid->cleanup = lwp_cleanup;
  965. tid->user_stack = RT_NULL;
  966. if (lwp->debug)
  967. {
  968. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  969. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  970. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  971. icache_invalid_all();
  972. }
  973. rt_hw_icache_invalidate_all();
  974. #ifdef ARCH_MM_MMU
  975. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  976. #else
  977. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  978. #endif /* ARCH_MM_MMU */
  979. }
  980. struct rt_lwp *lwp_self(void)
  981. {
  982. rt_thread_t tid;
  983. tid = rt_thread_self();
  984. if (tid)
  985. {
  986. return (struct rt_lwp *)tid->lwp;
  987. }
  988. return RT_NULL;
  989. }
  990. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  991. {
  992. int result;
  993. rt_base_t level;
  994. struct rt_lwp *lwp;
  995. char *thread_name;
  996. char *argv_last = argv[argc - 1];
  997. int bg = 0;
  998. struct process_aux *aux;
  999. int tid = 0;
  1000. int ret;
  1001. if (filename == RT_NULL)
  1002. {
  1003. return -RT_ERROR;
  1004. }
  1005. lwp = lwp_new();
  1006. if (lwp == RT_NULL)
  1007. {
  1008. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1009. return -RT_ENOMEM;
  1010. }
  1011. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1012. if ((tid = lwp_tid_get()) == 0)
  1013. {
  1014. lwp_ref_dec(lwp);
  1015. return -ENOMEM;
  1016. }
  1017. #ifdef ARCH_MM_MMU
  1018. if (lwp_user_space_init(lwp, 0) != 0)
  1019. {
  1020. lwp_tid_put(tid);
  1021. lwp_ref_dec(lwp);
  1022. return -ENOMEM;
  1023. }
  1024. #endif
  1025. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1026. {
  1027. argc--;
  1028. bg = 1;
  1029. }
  1030. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1031. {
  1032. lwp_tid_put(tid);
  1033. lwp_ref_dec(lwp);
  1034. return -ENOMEM;
  1035. }
  1036. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1037. #ifdef ARCH_MM_MMU
  1038. if (result == 1)
  1039. {
  1040. /* dynmaic */
  1041. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1042. result = load_ldso(lwp, filename, argv, envp);
  1043. }
  1044. #endif /* ARCH_MM_MMU */
  1045. if (result == RT_EOK)
  1046. {
  1047. rt_thread_t thread = RT_NULL;
  1048. rt_uint32_t priority = 25, tick = 200;
  1049. lwp_copy_stdio_fdt(lwp);
  1050. /* obtain the base name */
  1051. thread_name = strrchr(filename, '/');
  1052. thread_name = thread_name ? thread_name + 1 : filename;
  1053. #ifndef ARCH_MM_MMU
  1054. struct lwp_app_head *app_head = lwp->text_entry;
  1055. if (app_head->priority)
  1056. {
  1057. priority = app_head->priority;
  1058. }
  1059. if (app_head->tick)
  1060. {
  1061. tick = app_head->tick;
  1062. }
  1063. #endif /* not defined ARCH_MM_MMU */
  1064. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1065. LWP_TASK_STACK_SIZE, priority, tick);
  1066. if (thread != RT_NULL)
  1067. {
  1068. struct rt_lwp *self_lwp;
  1069. thread->tid = tid;
  1070. lwp_tid_set_thread(tid, thread);
  1071. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1072. (rt_size_t)thread->stack_addr + thread->stack_size);
  1073. level = rt_hw_interrupt_disable();
  1074. self_lwp = lwp_self();
  1075. if (self_lwp)
  1076. {
  1077. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1078. lwp->__pgrp = tid;
  1079. lwp->session = self_lwp->session;
  1080. /* lwp add to children link */
  1081. lwp->sibling = self_lwp->first_child;
  1082. self_lwp->first_child = lwp;
  1083. lwp->parent = self_lwp;
  1084. }
  1085. else
  1086. {
  1087. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1088. lwp->__pgrp = tid;
  1089. }
  1090. if (!bg)
  1091. {
  1092. if (lwp->session == -1)
  1093. {
  1094. struct tty_struct *tty = RT_NULL;
  1095. struct rt_lwp *old_lwp;
  1096. tty = (struct tty_struct *)console_tty_get();
  1097. old_lwp = tty->foreground;
  1098. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1099. ret = tty_push(&tty->head, old_lwp);
  1100. rt_mutex_release(&tty->lock);
  1101. if (ret < 0)
  1102. {
  1103. lwp_tid_put(tid);
  1104. lwp_ref_dec(lwp);
  1105. LOG_E("malloc fail!\n");
  1106. return -ENOMEM;
  1107. }
  1108. lwp->tty = tty;
  1109. lwp->tty->pgrp = lwp->__pgrp;
  1110. lwp->tty->session = lwp->session;
  1111. lwp->tty->foreground = lwp;
  1112. tcgetattr(1, &stdin_termios);
  1113. old_stdin_termios = stdin_termios;
  1114. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1115. tcsetattr(1, 0, &stdin_termios);
  1116. }
  1117. else
  1118. {
  1119. if (self_lwp != RT_NULL)
  1120. {
  1121. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1122. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1123. rt_mutex_release(&self_lwp->tty->lock);
  1124. if (ret < 0)
  1125. {
  1126. lwp_tid_put(tid);
  1127. lwp_ref_dec(lwp);
  1128. LOG_E("malloc fail!\n");
  1129. return -ENOMEM;
  1130. }
  1131. lwp->tty = self_lwp->tty;
  1132. lwp->tty->pgrp = lwp->__pgrp;
  1133. lwp->tty->session = lwp->session;
  1134. lwp->tty->foreground = lwp;
  1135. }
  1136. else
  1137. {
  1138. lwp->tty = RT_NULL;
  1139. }
  1140. }
  1141. }
  1142. else
  1143. {
  1144. lwp->background = RT_TRUE;
  1145. }
  1146. thread->lwp = lwp;
  1147. #ifndef ARCH_MM_MMU
  1148. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1149. thread->user_stack = app_head->stack_offset ?
  1150. (void *)(app_head->stack_offset -
  1151. app_head->data_offset +
  1152. (uint32_t)lwp->data_entry) : RT_NULL;
  1153. thread->user_stack_size = app_head->stack_size;
  1154. /* init data area */
  1155. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1156. /* init user stack */
  1157. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1158. #endif /* not defined ARCH_MM_MMU */
  1159. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1160. if (debug && rt_dbg_ops)
  1161. {
  1162. lwp->debug = debug;
  1163. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1164. }
  1165. rt_hw_interrupt_enable(level);
  1166. rt_thread_startup(thread);
  1167. return lwp_to_pid(lwp);
  1168. }
  1169. }
  1170. lwp_tid_put(tid);
  1171. lwp_ref_dec(lwp);
  1172. return -RT_ERROR;
  1173. }
  1174. #ifdef RT_USING_MUSL
  1175. extern char **__environ;
  1176. #else
  1177. char **__environ = 0;
  1178. #endif
  1179. pid_t exec(char *filename, int debug, int argc, char **argv)
  1180. {
  1181. setenv("OS", "RT-Thread", 1);
  1182. return lwp_execve(filename, debug, argc, argv, __environ);
  1183. }
  1184. #ifdef ARCH_MM_MMU
  1185. void lwp_user_setting_save(rt_thread_t thread)
  1186. {
  1187. if (thread)
  1188. {
  1189. thread->thread_idr = arch_get_tidr();
  1190. }
  1191. }
  1192. void lwp_user_setting_restore(rt_thread_t thread)
  1193. {
  1194. if (!thread)
  1195. {
  1196. return;
  1197. }
  1198. #if !defined(ARCH_RISCV64)
  1199. /* tidr will be set in RESTORE_ALL in risc-v */
  1200. arch_set_tidr(thread->thread_idr);
  1201. #endif
  1202. if (rt_dbg_ops)
  1203. {
  1204. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1205. if (l != 0)
  1206. {
  1207. rt_hw_set_process_id((size_t)l->pid);
  1208. }
  1209. else
  1210. {
  1211. rt_hw_set_process_id(0);
  1212. }
  1213. if (l && l->debug)
  1214. {
  1215. uint32_t step_type = 0;
  1216. step_type = dbg_step_type();
  1217. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1218. {
  1219. dbg_activate_step();
  1220. }
  1221. else
  1222. {
  1223. dbg_deactivate_step();
  1224. }
  1225. }
  1226. }
  1227. }
  1228. #endif /* ARCH_MM_MMU */