lwp.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_file.h>
  16. #include <unistd.h>
  17. #include <stdio.h> /* rename() */
  18. #include <fcntl.h>
  19. #include <sys/stat.h>
  20. #include <sys/statfs.h> /* statfs() */
  21. #include <lwp_elf.h>
  22. #ifndef RT_USING_DFS
  23. #error "lwp need file system(RT_USING_DFS)"
  24. #endif
  25. #include "lwp.h"
  26. #include "lwp_arch.h"
  27. #include "lwp_arch_comm.h"
  28. #include "console.h"
  29. #define DBG_TAG "LWP"
  30. #define DBG_LVL DBG_WARNING
  31. #include <rtdbg.h>
  32. #ifdef ARCH_MM_MMU
  33. #include <lwp_mm_area.h>
  34. #include <lwp_user_mm.h>
  35. #endif /* end of ARCH_MM_MMU */
  36. #ifndef O_DIRECTORY
  37. #define O_DIRECTORY 0x200000
  38. #endif
  39. #ifndef O_BINARY
  40. #define O_BINARY 0x10000
  41. #endif
  42. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  43. #ifdef DFS_USING_WORKDIR
  44. extern char working_directory[];
  45. #endif
  46. static struct termios stdin_termios, old_stdin_termios;
  47. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  48. struct termios *get_old_termios(void)
  49. {
  50. return &old_stdin_termios;
  51. }
  52. void lwp_setcwd(char *buf)
  53. {
  54. struct rt_lwp *lwp = RT_NULL;
  55. if(strlen(buf) >= DFS_PATH_MAX)
  56. {
  57. rt_kprintf("buf too long!\n");
  58. return ;
  59. }
  60. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  61. if (lwp)
  62. {
  63. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  64. }
  65. else
  66. {
  67. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  68. }
  69. return ;
  70. }
  71. char *lwp_getcwd(void)
  72. {
  73. char *dir_buf = RT_NULL;
  74. struct rt_lwp *lwp = RT_NULL;
  75. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  76. if (lwp)
  77. {
  78. if(lwp->working_directory[0] != '/')
  79. {
  80. dir_buf = &working_directory[0];
  81. }
  82. else
  83. {
  84. dir_buf = &lwp->working_directory[0];
  85. }
  86. }
  87. else
  88. dir_buf = &working_directory[0];
  89. return dir_buf;
  90. }
  91. /**
  92. * RT-Thread light-weight process
  93. */
  94. void lwp_set_kernel_sp(uint32_t *sp)
  95. {
  96. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  97. }
  98. uint32_t *lwp_get_kernel_sp(void)
  99. {
  100. #ifdef ARCH_MM_MMU
  101. return (uint32_t *)rt_thread_self()->sp;
  102. #else
  103. uint32_t* kernel_sp;
  104. extern rt_uint32_t rt_interrupt_from_thread;
  105. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  106. if (rt_thread_switch_interrupt_flag)
  107. {
  108. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  109. }
  110. else
  111. {
  112. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  113. }
  114. return kernel_sp;
  115. #endif
  116. }
  117. #ifdef ARCH_MM_MMU
  118. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  119. {
  120. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  121. int *args;
  122. char *str;
  123. char *str_k;
  124. char **new_argve;
  125. int i;
  126. int len;
  127. size_t *args_k;
  128. struct process_aux *aux;
  129. for (i = 0; i < argc; i++)
  130. {
  131. size += (rt_strlen(argv[i]) + 1);
  132. }
  133. size += (sizeof(size_t) * argc);
  134. i = 0;
  135. if (envp)
  136. {
  137. while (envp[i] != 0)
  138. {
  139. size += (rt_strlen(envp[i]) + 1);
  140. size += sizeof(size_t);
  141. i++;
  142. }
  143. }
  144. /* for aux */
  145. size += sizeof(struct process_aux);
  146. if (size > ARCH_PAGE_SIZE)
  147. {
  148. return RT_NULL;
  149. }
  150. /* args = (int *)lwp_map_user(lwp, 0, size); */
  151. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  152. if (args == RT_NULL)
  153. {
  154. return RT_NULL;
  155. }
  156. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  157. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  158. /* argc, argv[], 0, envp[], 0 , aux[] */
  159. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  160. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  161. new_argve = (char **)&args_k[1];
  162. args_k[0] = argc;
  163. for (i = 0; i < argc; i++)
  164. {
  165. len = rt_strlen(argv[i]) + 1;
  166. new_argve[i] = str;
  167. rt_memcpy(str_k, argv[i], len);
  168. str += len;
  169. str_k += len;
  170. }
  171. new_argve[i] = 0;
  172. i++;
  173. new_argve[i] = 0;
  174. if (envp)
  175. {
  176. int j;
  177. for (j = 0; envp[j] != 0; j++)
  178. {
  179. len = rt_strlen(envp[j]) + 1;
  180. new_argve[i] = str;
  181. rt_memcpy(str_k, envp[j], len);
  182. str += len;
  183. str_k += len;
  184. i++;
  185. }
  186. new_argve[i] = 0;
  187. }
  188. i++;
  189. /* aux */
  190. aux = (struct process_aux *)(new_argve + i);
  191. aux->item[0].key = AT_EXECFN;
  192. aux->item[0].value = (size_t)(size_t)new_argve[0];
  193. i += AUX_ARRAY_ITEMS_NR * 2;
  194. new_argve[i] = 0;
  195. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  196. lwp->args = args;
  197. return aux;
  198. }
  199. #else
  200. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  201. {
  202. #ifdef ARCH_MM_MMU
  203. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  204. struct process_aux *aux;
  205. #else
  206. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  207. #endif /* ARCH_MM_MMU */
  208. int *args;
  209. char *str;
  210. char **new_argve;
  211. int i;
  212. int len;
  213. for (i = 0; i < argc; i++)
  214. {
  215. size += (rt_strlen(argv[i]) + 1);
  216. }
  217. size += (sizeof(int) * argc);
  218. i = 0;
  219. if (envp)
  220. {
  221. while (envp[i] != 0)
  222. {
  223. size += (rt_strlen(envp[i]) + 1);
  224. size += sizeof(int);
  225. i++;
  226. }
  227. }
  228. #ifdef ARCH_MM_MMU
  229. /* for aux */
  230. size += sizeof(struct process_aux);
  231. args = (int *)rt_malloc(size);
  232. if (args == RT_NULL)
  233. {
  234. return RT_NULL;
  235. }
  236. /* argc, argv[], 0, envp[], 0 */
  237. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  238. #else
  239. args = (int *)rt_malloc(size);
  240. if (args == RT_NULL)
  241. {
  242. return RT_NULL;
  243. }
  244. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  245. #endif /* ARCH_MM_MMU */
  246. new_argve = (char **)&args[1];
  247. args[0] = argc;
  248. for (i = 0; i < argc; i++)
  249. {
  250. len = rt_strlen(argv[i]) + 1;
  251. new_argve[i] = str;
  252. rt_memcpy(str, argv[i], len);
  253. str += len;
  254. }
  255. new_argve[i] = 0;
  256. i++;
  257. new_argve[i] = 0;
  258. if (envp)
  259. {
  260. int j;
  261. for (j = 0; envp[j] != 0; j++)
  262. {
  263. len = rt_strlen(envp[j]) + 1;
  264. new_argve[i] = str;
  265. rt_memcpy(str, envp[j], len);
  266. str += len;
  267. i++;
  268. }
  269. new_argve[i] = 0;
  270. }
  271. #ifdef ARCH_MM_MMU
  272. /* aux */
  273. aux = (struct process_aux *)(new_argve + i);
  274. aux->item[0].key = AT_EXECFN;
  275. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  276. i += AUX_ARRAY_ITEMS_NR * 2;
  277. new_argve[i] = 0;
  278. lwp->args = args;
  279. return aux;
  280. #else
  281. lwp->args = args;
  282. lwp->args_length = size;
  283. return (struct process_aux *)(new_argve + i);
  284. #endif /* ARCH_MM_MMU */
  285. }
  286. #endif
  287. #ifdef ARCH_MM_MMU
  288. #define check_off(voff, vlen) \
  289. do \
  290. { \
  291. if (voff > vlen) \
  292. { \
  293. result = -RT_ERROR; \
  294. goto _exit; \
  295. } \
  296. } while (0)
  297. #define check_read(vrlen, vrlen_want) \
  298. do \
  299. { \
  300. if (vrlen < vrlen_want) \
  301. { \
  302. result = -RT_ERROR; \
  303. goto _exit; \
  304. } \
  305. } while (0)
  306. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  307. {
  308. size_t read_block = 0;
  309. while (nmemb)
  310. {
  311. size_t count;
  312. count = read(fd, ptr, size * nmemb) / size;
  313. if (count < nmemb)
  314. {
  315. LOG_E("ERROR: file size error!");
  316. break;
  317. }
  318. ptr = (void *)((uint8_t *)ptr + (count * size));
  319. nmemb -= count;
  320. read_block += count;
  321. }
  322. return read_block;
  323. }
  324. typedef struct
  325. {
  326. Elf_Word st_name;
  327. Elf_Addr st_value;
  328. Elf_Word st_size;
  329. unsigned char st_info;
  330. unsigned char st_other;
  331. Elf_Half st_shndx;
  332. } Elf_sym;
  333. #ifdef ARCH_MM_MMU
  334. struct map_range
  335. {
  336. void *start;
  337. size_t size;
  338. };
  339. static void expand_map_range(struct map_range *m, void *start, size_t size)
  340. {
  341. if (!m->start)
  342. {
  343. m->start = start;
  344. m->size = size;
  345. }
  346. else
  347. {
  348. void *end = (void *)((char*)start + size);
  349. void *mend = (void *)((char*)m->start + m->size);
  350. if (m->start > start)
  351. {
  352. m->start = start;
  353. }
  354. if (mend < end)
  355. {
  356. mend = end;
  357. }
  358. m->size = (char *)mend - (char *)m->start;
  359. }
  360. }
  361. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  362. {
  363. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  364. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  365. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  366. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  367. if (m1->size)
  368. {
  369. if (m1_start < (void *)USER_LOAD_VADDR)
  370. {
  371. return -1;
  372. }
  373. if (m1_start > (void *)USER_STACK_VSTART)
  374. {
  375. return -1;
  376. }
  377. if (m1_end < (void *)USER_LOAD_VADDR)
  378. {
  379. return -1;
  380. }
  381. if (m1_end > (void *)USER_STACK_VSTART)
  382. {
  383. return -1;
  384. }
  385. }
  386. if (m2->size)
  387. {
  388. if (m2_start < (void *)USER_LOAD_VADDR)
  389. {
  390. return -1;
  391. }
  392. if (m2_start > (void *)USER_STACK_VSTART)
  393. {
  394. return -1;
  395. }
  396. if (m2_end < (void *)USER_LOAD_VADDR)
  397. {
  398. return -1;
  399. }
  400. if (m2_end > (void *)USER_STACK_VSTART)
  401. {
  402. return -1;
  403. }
  404. }
  405. if ((m1->size != 0) && (m2->size != 0))
  406. {
  407. if (m1_start < m2_start)
  408. {
  409. if (m1_end > m2_start)
  410. {
  411. return -1;
  412. }
  413. }
  414. else /* m2_start <= m1_start */
  415. {
  416. if (m2_end > m1_start)
  417. {
  418. return -1;
  419. }
  420. }
  421. }
  422. return 0;
  423. }
  424. #endif
  425. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  426. {
  427. uint32_t i;
  428. uint32_t off = 0;
  429. size_t load_off = 0;
  430. char *p_section_str = 0;
  431. Elf_sym *dynsym = 0;
  432. Elf_Ehdr eheader;
  433. Elf_Phdr pheader;
  434. Elf_Shdr sheader;
  435. int result = RT_EOK;
  436. uint32_t magic;
  437. size_t read_len;
  438. void *got_start = 0;
  439. size_t got_size = 0;
  440. void *rel_dyn_start = 0;
  441. size_t rel_dyn_size = 0;
  442. size_t dynsym_off = 0;
  443. size_t dynsym_size = 0;
  444. #ifdef ARCH_MM_MMU
  445. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  446. void *pa, *va;
  447. void *va_self;
  448. rt_mmu_info *m_info = &lwp->mmu_info;
  449. #endif
  450. if (len < sizeof eheader)
  451. {
  452. LOG_E("len < sizeof eheader!");
  453. return -RT_ERROR;
  454. }
  455. lseek(fd, 0, SEEK_SET);
  456. read_len = load_fread(&magic, 1, sizeof magic, fd);
  457. check_read(read_len, sizeof magic);
  458. if (memcmp(elf_magic, &magic, 4) != 0)
  459. {
  460. LOG_E("elf_magic not same, magic:0x%x!", magic);
  461. return -RT_ERROR;
  462. }
  463. lseek(fd, off, SEEK_SET);
  464. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  465. check_read(read_len, sizeof eheader);
  466. #ifndef ARCH_CPU_64BIT
  467. if (eheader.e_ident[4] != 1)
  468. { /* not 32bit */
  469. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  470. return -RT_ERROR;
  471. }
  472. #else
  473. if (eheader.e_ident[4] != 2)
  474. { /* not 64bit */
  475. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  476. return -RT_ERROR;
  477. }
  478. #endif
  479. if (eheader.e_ident[6] != 1)
  480. { /* ver not 1 */
  481. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  482. return -RT_ERROR;
  483. }
  484. if ((eheader.e_type != ET_DYN)
  485. #ifdef ARCH_MM_MMU
  486. && (eheader.e_type != ET_EXEC)
  487. #endif
  488. )
  489. {
  490. /* not pie or exec elf */
  491. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  492. return -RT_ERROR;
  493. }
  494. #ifdef ARCH_MM_MMU
  495. {
  496. off = eheader.e_phoff;
  497. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  498. {
  499. check_off(off, len);
  500. lseek(fd, off, SEEK_SET);
  501. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  502. check_read(read_len, sizeof pheader);
  503. if (pheader.p_type == PT_DYNAMIC)
  504. {
  505. /* load ld.so */
  506. return 1; /* 1 means dynamic */
  507. }
  508. }
  509. }
  510. #endif
  511. if (eheader.e_entry != 0)
  512. {
  513. if ((eheader.e_entry != USER_LOAD_VADDR)
  514. && (eheader.e_entry != LDSO_LOAD_VADDR))
  515. {
  516. /* the entry is invalidate */
  517. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  518. return -RT_ERROR;
  519. }
  520. }
  521. { /* load aux */
  522. uint8_t *process_header;
  523. size_t process_header_size;
  524. off = eheader.e_phoff;
  525. process_header_size = eheader.e_phnum * sizeof pheader;
  526. #ifdef ARCH_MM_MMU
  527. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  528. {
  529. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  530. return -RT_ERROR;
  531. }
  532. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  533. if (!va)
  534. {
  535. LOG_E("lwp map user failed!");
  536. return -RT_ERROR;
  537. }
  538. pa = rt_hw_mmu_v2p(m_info, va);
  539. process_header = (uint8_t *)pa - PV_OFFSET;
  540. #else
  541. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  542. if (!process_header)
  543. {
  544. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  545. return -RT_ERROR;
  546. }
  547. #endif
  548. check_off(off, len);
  549. lseek(fd, off, SEEK_SET);
  550. read_len = load_fread(process_header, 1, process_header_size, fd);
  551. check_read(read_len, process_header_size);
  552. #ifdef ARCH_MM_MMU
  553. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  554. #endif
  555. aux->item[1].key = AT_PAGESZ;
  556. #ifdef ARCH_MM_MMU
  557. aux->item[1].value = ARCH_PAGE_SIZE;
  558. #else
  559. aux->item[1].value = RT_MM_PAGE_SIZE;
  560. #endif
  561. aux->item[2].key = AT_RANDOM;
  562. {
  563. uint32_t random_value = rt_tick_get();
  564. uint8_t *random;
  565. #ifdef ARCH_MM_MMU
  566. uint8_t *krandom;
  567. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  568. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  569. krandom = (uint8_t *)krandom - PV_OFFSET;
  570. rt_memcpy(krandom, &random_value, sizeof random_value);
  571. #else
  572. random = (uint8_t *)(process_header + process_header_size);
  573. rt_memcpy(random, &random_value, sizeof random_value);
  574. #endif
  575. aux->item[2].value = (size_t)random;
  576. }
  577. aux->item[3].key = AT_PHDR;
  578. #ifdef ARCH_MM_MMU
  579. aux->item[3].value = (size_t)va;
  580. #else
  581. aux->item[3].value = (size_t)process_header;
  582. #endif
  583. aux->item[4].key = AT_PHNUM;
  584. aux->item[4].value = eheader.e_phnum;
  585. aux->item[5].key = AT_PHENT;
  586. aux->item[5].value = sizeof pheader;
  587. #ifdef ARCH_MM_MMU
  588. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  589. #endif
  590. }
  591. if (load_addr)
  592. {
  593. load_off = (size_t)load_addr;
  594. }
  595. #ifdef ARCH_MM_MMU
  596. else
  597. {
  598. /* map user */
  599. off = eheader.e_shoff;
  600. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  601. {
  602. check_off(off, len);
  603. lseek(fd, off, SEEK_SET);
  604. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  605. check_read(read_len, sizeof sheader);
  606. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  607. {
  608. continue;
  609. }
  610. switch (sheader.sh_type)
  611. {
  612. case SHT_PROGBITS:
  613. if ((sheader.sh_flags & SHF_WRITE) == 0)
  614. {
  615. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  616. }
  617. else
  618. {
  619. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  620. }
  621. break;
  622. case SHT_NOBITS:
  623. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  624. break;
  625. default:
  626. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  627. break;
  628. }
  629. }
  630. if (user_area[0].size == 0)
  631. {
  632. /* no code */
  633. result = -RT_ERROR;
  634. goto _exit;
  635. }
  636. if (user_area[0].start == NULL)
  637. {
  638. /* DYN */
  639. load_off = USER_LOAD_VADDR;
  640. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  641. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  642. }
  643. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  644. {
  645. result = -RT_ERROR;
  646. goto _exit;
  647. }
  648. /* text and data */
  649. for (i = 0; i < 2; i++)
  650. {
  651. if (user_area[i].size != 0)
  652. {
  653. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  654. if (!va || (va != user_area[i].start))
  655. {
  656. result = -RT_ERROR;
  657. goto _exit;
  658. }
  659. }
  660. }
  661. lwp->text_size = user_area[0].size;
  662. }
  663. #else
  664. else
  665. {
  666. size_t start = -1UL;
  667. size_t end = 0UL;
  668. size_t total_size;
  669. off = eheader.e_shoff;
  670. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  671. {
  672. check_off(off, len);
  673. lseek(fd, off, SEEK_SET);
  674. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  675. check_read(read_len, sizeof sheader);
  676. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  677. {
  678. continue;
  679. }
  680. switch (sheader.sh_type)
  681. {
  682. case SHT_PROGBITS:
  683. case SHT_NOBITS:
  684. if (start > sheader.sh_addr)
  685. {
  686. start = sheader.sh_addr;
  687. }
  688. if (sheader.sh_addr + sheader.sh_size > end)
  689. {
  690. end = sheader.sh_addr + sheader.sh_size;
  691. }
  692. break;
  693. default:
  694. break;
  695. }
  696. }
  697. total_size = end - start;
  698. #ifdef RT_USING_CACHE
  699. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  700. #else
  701. load_off = (size_t)rt_malloc(total_size);
  702. #endif
  703. if (load_off == 0)
  704. {
  705. LOG_E("alloc text memory faild!");
  706. result = -RT_ENOMEM;
  707. goto _exit;
  708. }
  709. else
  710. {
  711. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  712. }
  713. lwp->load_off = load_off; /* for free */
  714. lwp->text_size = total_size;
  715. }
  716. #endif
  717. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  718. off = eheader.e_phoff;
  719. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  720. {
  721. check_off(off, len);
  722. lseek(fd, off, SEEK_SET);
  723. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  724. check_read(read_len, sizeof pheader);
  725. if (pheader.p_type == PT_LOAD)
  726. {
  727. if (pheader.p_filesz > pheader.p_memsz)
  728. {
  729. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  730. return -RT_ERROR;
  731. }
  732. check_off(pheader.p_offset, len);
  733. lseek(fd, pheader.p_offset, SEEK_SET);
  734. #ifdef ARCH_MM_MMU
  735. {
  736. uint32_t size = pheader.p_filesz;
  737. size_t tmp_len = 0;
  738. va = (void *)(pheader.p_vaddr + load_addr);
  739. read_len = 0;
  740. while (size)
  741. {
  742. pa = rt_hw_mmu_v2p(m_info, va);
  743. va_self = (void *)((char *)pa - PV_OFFSET);
  744. LOG_D("va_self = %p pa = %p", va_self, pa);
  745. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  746. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  747. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  748. read_len += tmp_len;
  749. size -= tmp_len;
  750. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  751. }
  752. }
  753. #else
  754. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  755. #endif
  756. check_read(read_len, pheader.p_filesz);
  757. if (pheader.p_filesz < pheader.p_memsz)
  758. {
  759. #ifdef ARCH_MM_MMU
  760. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  761. uint32_t size_s;
  762. uint32_t off;
  763. off = pheader.p_filesz & ARCH_PAGE_MASK;
  764. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  765. while (size)
  766. {
  767. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  768. pa = rt_hw_mmu_v2p(m_info, va);
  769. va_self = (void *)((char *)pa - PV_OFFSET);
  770. memset((void *)((char *)va_self + off), 0, size_s);
  771. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  772. off = 0;
  773. size -= size_s;
  774. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  775. }
  776. #else
  777. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  778. #endif
  779. }
  780. }
  781. }
  782. /* relocate */
  783. if (eheader.e_type == ET_DYN)
  784. {
  785. /* section info */
  786. off = eheader.e_shoff;
  787. /* find section string table */
  788. check_off(off, len);
  789. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  790. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  791. check_read(read_len, sizeof sheader);
  792. p_section_str = (char *)rt_malloc(sheader.sh_size);
  793. if (!p_section_str)
  794. {
  795. LOG_E("out of memory!");
  796. result = -ENOMEM;
  797. goto _exit;
  798. }
  799. check_off(sheader.sh_offset, len);
  800. lseek(fd, sheader.sh_offset, SEEK_SET);
  801. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  802. check_read(read_len, sheader.sh_size);
  803. check_off(off, len);
  804. lseek(fd, off, SEEK_SET);
  805. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  806. {
  807. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  808. check_read(read_len, sizeof sheader);
  809. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  810. {
  811. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  812. got_size = (size_t)sheader.sh_size;
  813. }
  814. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  815. {
  816. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  817. rel_dyn_size = (size_t)sheader.sh_size;
  818. }
  819. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  820. {
  821. dynsym_off = (size_t)sheader.sh_offset;
  822. dynsym_size = (size_t)sheader.sh_size;
  823. }
  824. }
  825. /* reloc */
  826. if (dynsym_size)
  827. {
  828. dynsym = rt_malloc(dynsym_size);
  829. if (!dynsym)
  830. {
  831. LOG_E("ERROR: Malloc error!");
  832. result = -ENOMEM;
  833. goto _exit;
  834. }
  835. check_off(dynsym_off, len);
  836. lseek(fd, dynsym_off, SEEK_SET);
  837. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  838. check_read(read_len, dynsym_size);
  839. }
  840. #ifdef ARCH_MM_MMU
  841. arch_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  842. #else
  843. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  844. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  845. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  846. #endif
  847. }
  848. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  849. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  850. _exit:
  851. if (dynsym)
  852. {
  853. rt_free(dynsym);
  854. }
  855. if (p_section_str)
  856. {
  857. rt_free(p_section_str);
  858. }
  859. if (result != RT_EOK)
  860. {
  861. LOG_E("lwp load faild, %d", result);
  862. }
  863. return result;
  864. }
  865. #endif /* ARCH_MM_MMU */
  866. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  867. {
  868. uint8_t *ptr;
  869. int ret = -1;
  870. int len;
  871. int fd = -1;
  872. /* check file name */
  873. RT_ASSERT(filename != RT_NULL);
  874. /* check lwp control block */
  875. RT_ASSERT(lwp != RT_NULL);
  876. /* copy file name to process name */
  877. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  878. if (load_addr != RT_NULL)
  879. {
  880. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  881. ptr = load_addr;
  882. }
  883. else
  884. {
  885. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  886. ptr = RT_NULL;
  887. }
  888. fd = open(filename, O_BINARY | O_RDONLY, 0);
  889. if (fd < 0)
  890. {
  891. LOG_E("ERROR: Can't open elf file %s!", filename);
  892. goto out;
  893. }
  894. len = lseek(fd, 0, SEEK_END);
  895. if (len < 0)
  896. {
  897. LOG_E("ERROR: File %s size error!", filename);
  898. goto out;
  899. }
  900. lseek(fd, 0, SEEK_SET);
  901. ret = load_elf(fd, len, lwp, ptr, aux);
  902. if ((ret != RT_EOK) && (ret != 1))
  903. {
  904. LOG_E("lwp load ret = %d", ret);
  905. }
  906. out:
  907. if (fd > 0)
  908. {
  909. close(fd);
  910. }
  911. return ret;
  912. }
  913. void lwp_cleanup(struct rt_thread *tid)
  914. {
  915. rt_base_t level;
  916. struct rt_lwp *lwp;
  917. struct tty_node *tty_head = RT_NULL;
  918. if (tid == NULL)
  919. {
  920. return;
  921. }
  922. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  923. level = rt_hw_interrupt_disable();
  924. lwp = (struct rt_lwp *)tid->lwp;
  925. lwp_tid_put(tid->tid);
  926. rt_list_remove(&tid->sibling);
  927. rt_hw_interrupt_enable(level);
  928. if (lwp->tty != RT_NULL)
  929. {
  930. tty_head = lwp->tty->head;
  931. }
  932. if (!lwp_ref_dec(lwp))
  933. {
  934. if (tty_head)
  935. {
  936. tty_pop(&tty_head, lwp);
  937. }
  938. }
  939. return;
  940. }
  941. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  942. {
  943. struct dfs_fd *d;
  944. struct dfs_fdtable *lwp_fdt;
  945. lwp_fdt = &lwp->fdt;
  946. /* init 4 fds */
  947. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  948. if (lwp_fdt->fds)
  949. {
  950. lwp_fdt->maxfd = 4;
  951. d = fd_get(0);
  952. fd_associate(lwp_fdt, 0, d);
  953. d = fd_get(1);
  954. fd_associate(lwp_fdt, 1, d);
  955. d = fd_get(2);
  956. fd_associate(lwp_fdt, 2, d);
  957. }
  958. return;
  959. }
  960. static void _lwp_thread_entry(void *parameter)
  961. {
  962. rt_thread_t tid;
  963. struct rt_lwp *lwp;
  964. tid = rt_thread_self();
  965. lwp = (struct rt_lwp *)tid->lwp;
  966. tid->cleanup = lwp_cleanup;
  967. tid->user_stack = RT_NULL;
  968. if (lwp->debug)
  969. {
  970. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  971. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  972. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  973. icache_invalid_all();
  974. }
  975. #ifdef ARCH_MM_MMU
  976. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  977. #else
  978. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  979. #endif /* ARCH_MM_MMU */
  980. }
  981. struct rt_lwp *lwp_self(void)
  982. {
  983. rt_thread_t tid;
  984. tid = rt_thread_self();
  985. if (tid)
  986. {
  987. return (struct rt_lwp *)tid->lwp;
  988. }
  989. return RT_NULL;
  990. }
  991. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  992. {
  993. int result;
  994. rt_base_t level;
  995. struct rt_lwp *lwp;
  996. char *thread_name;
  997. char *argv_last = argv[argc - 1];
  998. int bg = 0;
  999. struct process_aux *aux;
  1000. int tid = 0;
  1001. int ret;
  1002. if (filename == RT_NULL)
  1003. {
  1004. return -RT_ERROR;
  1005. }
  1006. lwp = lwp_new();
  1007. if (lwp == RT_NULL)
  1008. {
  1009. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1010. return -RT_ENOMEM;
  1011. }
  1012. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1013. if ((tid = lwp_tid_get()) == 0)
  1014. {
  1015. lwp_ref_dec(lwp);
  1016. return -ENOMEM;
  1017. }
  1018. #ifdef ARCH_MM_MMU
  1019. if (lwp_user_space_init(lwp) != 0)
  1020. {
  1021. lwp_tid_put(tid);
  1022. lwp_ref_dec(lwp);
  1023. return -ENOMEM;
  1024. }
  1025. #endif
  1026. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1027. {
  1028. argc--;
  1029. bg = 1;
  1030. }
  1031. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1032. {
  1033. lwp_tid_put(tid);
  1034. lwp_ref_dec(lwp);
  1035. return -ENOMEM;
  1036. }
  1037. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1038. #ifdef ARCH_MM_MMU
  1039. if (result == 1)
  1040. {
  1041. /* dynmaic */
  1042. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1043. result = load_ldso(lwp, filename, argv, envp);
  1044. }
  1045. #endif /* ARCH_MM_MMU */
  1046. if (result == RT_EOK)
  1047. {
  1048. rt_thread_t thread = RT_NULL;
  1049. rt_uint32_t priority = 25, tick = 200;
  1050. lwp_copy_stdio_fdt(lwp);
  1051. /* obtain the base name */
  1052. thread_name = strrchr(filename, '/');
  1053. thread_name = thread_name ? thread_name + 1 : filename;
  1054. #ifndef ARCH_MM_MMU
  1055. struct lwp_app_head *app_head = lwp->text_entry;
  1056. if (app_head->priority)
  1057. {
  1058. priority = app_head->priority;
  1059. }
  1060. if (app_head->tick)
  1061. {
  1062. tick = app_head->tick;
  1063. }
  1064. #endif /* not defined ARCH_MM_MMU */
  1065. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1066. LWP_TASK_STACK_SIZE, priority, tick);
  1067. if (thread != RT_NULL)
  1068. {
  1069. struct rt_lwp *self_lwp;
  1070. thread->tid = tid;
  1071. lwp_tid_set_thread(tid, thread);
  1072. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1073. (rt_size_t)thread->stack_addr + thread->stack_size);
  1074. level = rt_hw_interrupt_disable();
  1075. self_lwp = lwp_self();
  1076. if (self_lwp)
  1077. {
  1078. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1079. lwp->__pgrp = tid;
  1080. lwp->session = self_lwp->session;
  1081. /* lwp add to children link */
  1082. lwp->sibling = self_lwp->first_child;
  1083. self_lwp->first_child = lwp;
  1084. lwp->parent = self_lwp;
  1085. }
  1086. else
  1087. {
  1088. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1089. lwp->__pgrp = tid;
  1090. }
  1091. if (!bg)
  1092. {
  1093. if (lwp->session == -1)
  1094. {
  1095. struct tty_struct *tty = RT_NULL;
  1096. struct rt_lwp *old_lwp;
  1097. tty = (struct tty_struct *)console_tty_get();
  1098. old_lwp = tty->foreground;
  1099. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1100. ret = tty_push(&tty->head, old_lwp);
  1101. rt_mutex_release(&tty->lock);
  1102. if (ret < 0)
  1103. {
  1104. lwp_tid_put(tid);
  1105. lwp_ref_dec(lwp);
  1106. LOG_E("malloc fail!\n");
  1107. return -ENOMEM;
  1108. }
  1109. lwp->tty = tty;
  1110. lwp->tty->pgrp = lwp->__pgrp;
  1111. lwp->tty->session = lwp->session;
  1112. lwp->tty->foreground = lwp;
  1113. tcgetattr(1, &stdin_termios);
  1114. old_stdin_termios = stdin_termios;
  1115. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1116. tcsetattr(1, 0, &stdin_termios);
  1117. }
  1118. else
  1119. {
  1120. if (self_lwp != RT_NULL)
  1121. {
  1122. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1123. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1124. rt_mutex_release(&self_lwp->tty->lock);
  1125. if (ret < 0)
  1126. {
  1127. lwp_tid_put(tid);
  1128. lwp_ref_dec(lwp);
  1129. LOG_E("malloc fail!\n");
  1130. return -ENOMEM;
  1131. }
  1132. lwp->tty = self_lwp->tty;
  1133. lwp->tty->pgrp = lwp->__pgrp;
  1134. lwp->tty->session = lwp->session;
  1135. lwp->tty->foreground = lwp;
  1136. }
  1137. else
  1138. {
  1139. lwp->tty = RT_NULL;
  1140. }
  1141. }
  1142. }
  1143. thread->lwp = lwp;
  1144. #ifndef ARCH_MM_MMU
  1145. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1146. thread->user_stack = app_head->stack_offset ?
  1147. (void *)(app_head->stack_offset -
  1148. app_head->data_offset +
  1149. (uint32_t)lwp->data_entry) : RT_NULL;
  1150. thread->user_stack_size = app_head->stack_size;
  1151. /* init data area */
  1152. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1153. /* init user stack */
  1154. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1155. #endif /* not defined ARCH_MM_MMU */
  1156. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1157. if (debug && rt_dbg_ops)
  1158. {
  1159. lwp->debug = debug;
  1160. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1161. }
  1162. rt_hw_interrupt_enable(level);
  1163. rt_thread_startup(thread);
  1164. return lwp_to_pid(lwp);
  1165. }
  1166. }
  1167. lwp_tid_put(tid);
  1168. lwp_ref_dec(lwp);
  1169. return -RT_ERROR;
  1170. }
  1171. #ifdef RT_USING_MUSL
  1172. extern char **__environ;
  1173. #else
  1174. char **__environ = 0;
  1175. #endif
  1176. pid_t exec(char *filename, int debug, int argc, char **argv)
  1177. {
  1178. return lwp_execve(filename, debug, argc, argv, __environ);
  1179. }
  1180. #ifdef ARCH_MM_MMU
  1181. void lwp_user_setting_save(rt_thread_t thread)
  1182. {
  1183. if (thread)
  1184. {
  1185. thread->thread_idr = arch_get_tidr();
  1186. }
  1187. }
  1188. void lwp_user_setting_restore(rt_thread_t thread)
  1189. {
  1190. if (!thread)
  1191. {
  1192. return;
  1193. }
  1194. #if !defined(ARCH_RISCV64)
  1195. /* tidr will be set in RESTORE_ALL in risc-v */
  1196. arch_set_tidr(thread->thread_idr);
  1197. #endif
  1198. if (rt_dbg_ops)
  1199. {
  1200. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1201. if (l != 0)
  1202. {
  1203. rt_hw_set_process_id((size_t)l->pid);
  1204. }
  1205. else
  1206. {
  1207. rt_hw_set_process_id(0);
  1208. }
  1209. if (l && l->debug)
  1210. {
  1211. uint32_t step_type = 0;
  1212. step_type = dbg_step_type();
  1213. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1214. {
  1215. dbg_activate_step();
  1216. }
  1217. else
  1218. {
  1219. dbg_deactivate_step();
  1220. }
  1221. }
  1222. }
  1223. }
  1224. #endif /* ARCH_MM_MMU */