1
0

lwp.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. */
  15. #define DBG_TAG "LWP"
  16. #define DBG_LVL DBG_WARNING
  17. #include <rtdbg.h>
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #include <dfs_file.h>
  21. #include <unistd.h>
  22. #include <stdio.h> /* rename() */
  23. #include <fcntl.h>
  24. #include <sys/stat.h>
  25. #include <sys/statfs.h> /* statfs() */
  26. #include <lwp_elf.h>
  27. #ifndef RT_USING_DFS
  28. #error "lwp need file system(RT_USING_DFS)"
  29. #endif
  30. #include "lwp.h"
  31. #include "lwp_arch.h"
  32. #include "lwp_arch_comm.h"
  33. #include "lwp_signal.h"
  34. #include "lwp_dbg.h"
  35. #include "console.h"
  36. #ifdef ARCH_MM_MMU
  37. #include <lwp_user_mm.h>
  38. #endif /* end of ARCH_MM_MMU */
  39. #ifndef O_DIRECTORY
  40. #define O_DIRECTORY 0x200000
  41. #endif
  42. #ifndef O_BINARY
  43. #define O_BINARY 0x10000
  44. #endif
  45. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  46. #ifdef DFS_USING_WORKDIR
  47. extern char working_directory[];
  48. #endif
  49. static struct termios stdin_termios, old_stdin_termios;
  50. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  51. struct termios *get_old_termios(void)
  52. {
  53. return &old_stdin_termios;
  54. }
  55. void lwp_setcwd(char *buf)
  56. {
  57. struct rt_lwp *lwp = RT_NULL;
  58. if(strlen(buf) >= DFS_PATH_MAX)
  59. {
  60. rt_kprintf("buf too long!\n");
  61. return ;
  62. }
  63. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  64. if (lwp)
  65. {
  66. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  67. }
  68. else
  69. {
  70. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  71. }
  72. return ;
  73. }
  74. char *lwp_getcwd(void)
  75. {
  76. char *dir_buf = RT_NULL;
  77. struct rt_lwp *lwp = RT_NULL;
  78. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  79. if (lwp)
  80. {
  81. if(lwp->working_directory[0] != '/')
  82. {
  83. dir_buf = &working_directory[0];
  84. }
  85. else
  86. {
  87. dir_buf = &lwp->working_directory[0];
  88. }
  89. }
  90. else
  91. dir_buf = &working_directory[0];
  92. return dir_buf;
  93. }
  94. /**
  95. * RT-Thread light-weight process
  96. */
  97. void lwp_set_kernel_sp(uint32_t *sp)
  98. {
  99. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  100. }
  101. uint32_t *lwp_get_kernel_sp(void)
  102. {
  103. #ifdef ARCH_MM_MMU
  104. return (uint32_t *)rt_thread_self()->sp;
  105. #else
  106. uint32_t* kernel_sp;
  107. extern rt_uint32_t rt_interrupt_from_thread;
  108. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  109. if (rt_thread_switch_interrupt_flag)
  110. {
  111. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  112. }
  113. else
  114. {
  115. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  116. }
  117. return kernel_sp;
  118. #endif
  119. }
  120. #ifdef ARCH_MM_MMU
  121. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  122. {
  123. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  124. int *args;
  125. char *str;
  126. char *str_k;
  127. char **new_argve;
  128. int i;
  129. int len;
  130. size_t *args_k;
  131. struct process_aux *aux;
  132. for (i = 0; i < argc; i++)
  133. {
  134. size += (rt_strlen(argv[i]) + 1);
  135. }
  136. size += (sizeof(size_t) * argc);
  137. i = 0;
  138. if (envp)
  139. {
  140. while (envp[i] != 0)
  141. {
  142. size += (rt_strlen(envp[i]) + 1);
  143. size += sizeof(size_t);
  144. i++;
  145. }
  146. }
  147. /* for aux */
  148. size += sizeof(struct process_aux);
  149. if (size > ARCH_PAGE_SIZE)
  150. {
  151. return RT_NULL;
  152. }
  153. /* args = (int *)lwp_map_user(lwp, 0, size); */
  154. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  155. if (args == RT_NULL)
  156. {
  157. return RT_NULL;
  158. }
  159. args_k = (size_t *)lwp_v2p(lwp, args);
  160. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  161. /* argc, argv[], 0, envp[], 0 , aux[] */
  162. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  163. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  164. new_argve = (char **)&args_k[1];
  165. args_k[0] = argc;
  166. for (i = 0; i < argc; i++)
  167. {
  168. len = rt_strlen(argv[i]) + 1;
  169. new_argve[i] = str;
  170. rt_memcpy(str_k, argv[i], len);
  171. str += len;
  172. str_k += len;
  173. }
  174. new_argve[i] = 0;
  175. i++;
  176. new_argve[i] = 0;
  177. if (envp)
  178. {
  179. int j;
  180. for (j = 0; envp[j] != 0; j++)
  181. {
  182. len = rt_strlen(envp[j]) + 1;
  183. new_argve[i] = str;
  184. rt_memcpy(str_k, envp[j], len);
  185. str += len;
  186. str_k += len;
  187. i++;
  188. }
  189. new_argve[i] = 0;
  190. }
  191. i++;
  192. /* aux */
  193. aux = (struct process_aux *)(new_argve + i);
  194. aux->item[0].key = AT_EXECFN;
  195. aux->item[0].value = (size_t)(size_t)new_argve[0];
  196. i += AUX_ARRAY_ITEMS_NR * 2;
  197. new_argve[i] = 0;
  198. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  199. lwp->args = args;
  200. return aux;
  201. }
  202. #else
  203. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  204. {
  205. #ifdef ARCH_MM_MMU
  206. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  207. struct process_aux *aux;
  208. #else
  209. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  210. #endif /* ARCH_MM_MMU */
  211. int *args;
  212. char *str;
  213. char **new_argve;
  214. int i;
  215. int len;
  216. for (i = 0; i < argc; i++)
  217. {
  218. size += (rt_strlen(argv[i]) + 1);
  219. }
  220. size += (sizeof(int) * argc);
  221. i = 0;
  222. if (envp)
  223. {
  224. while (envp[i] != 0)
  225. {
  226. size += (rt_strlen(envp[i]) + 1);
  227. size += sizeof(int);
  228. i++;
  229. }
  230. }
  231. #ifdef ARCH_MM_MMU
  232. /* for aux */
  233. size += sizeof(struct process_aux);
  234. args = (int *)rt_malloc(size);
  235. if (args == RT_NULL)
  236. {
  237. return RT_NULL;
  238. }
  239. /* argc, argv[], 0, envp[], 0 */
  240. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  241. #else
  242. args = (int *)rt_malloc(size);
  243. if (args == RT_NULL)
  244. {
  245. return RT_NULL;
  246. }
  247. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  248. #endif /* ARCH_MM_MMU */
  249. new_argve = (char **)&args[1];
  250. args[0] = argc;
  251. for (i = 0; i < argc; i++)
  252. {
  253. len = rt_strlen(argv[i]) + 1;
  254. new_argve[i] = str;
  255. rt_memcpy(str, argv[i], len);
  256. str += len;
  257. }
  258. new_argve[i] = 0;
  259. i++;
  260. new_argve[i] = 0;
  261. if (envp)
  262. {
  263. int j;
  264. for (j = 0; envp[j] != 0; j++)
  265. {
  266. len = rt_strlen(envp[j]) + 1;
  267. new_argve[i] = str;
  268. rt_memcpy(str, envp[j], len);
  269. str += len;
  270. i++;
  271. }
  272. new_argve[i] = 0;
  273. }
  274. #ifdef ARCH_MM_MMU
  275. /* aux */
  276. aux = (struct process_aux *)(new_argve + i);
  277. aux->item[0].key = AT_EXECFN;
  278. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  279. i += AUX_ARRAY_ITEMS_NR * 2;
  280. new_argve[i] = 0;
  281. lwp->args = args;
  282. return aux;
  283. #else
  284. lwp->args = args;
  285. lwp->args_length = size;
  286. return (struct process_aux *)(new_argve + i);
  287. #endif /* ARCH_MM_MMU */
  288. }
  289. #endif
  290. #ifdef ARCH_MM_MMU
  291. #define check_off(voff, vlen) \
  292. do \
  293. { \
  294. if (voff > vlen) \
  295. { \
  296. result = -RT_ERROR; \
  297. goto _exit; \
  298. } \
  299. } while (0)
  300. #define check_read(vrlen, vrlen_want) \
  301. do \
  302. { \
  303. if (vrlen < vrlen_want) \
  304. { \
  305. result = -RT_ERROR; \
  306. goto _exit; \
  307. } \
  308. } while (0)
  309. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  310. {
  311. size_t read_block = 0;
  312. while (nmemb)
  313. {
  314. size_t count;
  315. count = read(fd, ptr, size * nmemb) / size;
  316. if (count < nmemb)
  317. {
  318. LOG_E("ERROR: file size error!");
  319. break;
  320. }
  321. ptr = (void *)((uint8_t *)ptr + (count * size));
  322. nmemb -= count;
  323. read_block += count;
  324. }
  325. return read_block;
  326. }
  327. typedef struct
  328. {
  329. Elf_Word st_name;
  330. Elf_Addr st_value;
  331. Elf_Word st_size;
  332. unsigned char st_info;
  333. unsigned char st_other;
  334. Elf_Half st_shndx;
  335. } Elf_sym;
  336. #ifdef ARCH_MM_MMU
  337. struct map_range
  338. {
  339. void *start;
  340. size_t size;
  341. };
  342. static void expand_map_range(struct map_range *m, void *start, size_t size)
  343. {
  344. if (!m->start)
  345. {
  346. m->start = start;
  347. m->size = size;
  348. }
  349. else
  350. {
  351. void *end = (void *)((char*)start + size);
  352. void *mend = (void *)((char*)m->start + m->size);
  353. if (m->start > start)
  354. {
  355. m->start = start;
  356. }
  357. if (mend < end)
  358. {
  359. mend = end;
  360. }
  361. m->size = (char *)mend - (char *)m->start;
  362. }
  363. }
  364. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  365. {
  366. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  367. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  368. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  369. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  370. if (m1->size)
  371. {
  372. if (m1_start < (void *)USER_LOAD_VADDR)
  373. {
  374. return -1;
  375. }
  376. if (m1_start > (void *)USER_STACK_VSTART)
  377. {
  378. return -1;
  379. }
  380. if (m1_end < (void *)USER_LOAD_VADDR)
  381. {
  382. return -1;
  383. }
  384. if (m1_end > (void *)USER_STACK_VSTART)
  385. {
  386. return -1;
  387. }
  388. }
  389. if (m2->size)
  390. {
  391. if (m2_start < (void *)USER_LOAD_VADDR)
  392. {
  393. return -1;
  394. }
  395. if (m2_start > (void *)USER_STACK_VSTART)
  396. {
  397. return -1;
  398. }
  399. if (m2_end < (void *)USER_LOAD_VADDR)
  400. {
  401. return -1;
  402. }
  403. if (m2_end > (void *)USER_STACK_VSTART)
  404. {
  405. return -1;
  406. }
  407. }
  408. if ((m1->size != 0) && (m2->size != 0))
  409. {
  410. if (m1_start < m2_start)
  411. {
  412. if (m1_end > m2_start)
  413. {
  414. return -1;
  415. }
  416. }
  417. else /* m2_start <= m1_start */
  418. {
  419. if (m2_end > m1_start)
  420. {
  421. return -1;
  422. }
  423. }
  424. }
  425. return 0;
  426. }
  427. #endif
  428. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  429. {
  430. uint32_t i;
  431. uint32_t off = 0;
  432. size_t load_off = 0;
  433. char *p_section_str = 0;
  434. Elf_sym *dynsym = 0;
  435. Elf_Ehdr eheader;
  436. Elf_Phdr pheader;
  437. Elf_Shdr sheader;
  438. int result = RT_EOK;
  439. uint32_t magic;
  440. size_t read_len;
  441. void *got_start = 0;
  442. size_t got_size = 0;
  443. void *rel_dyn_start = 0;
  444. size_t rel_dyn_size = 0;
  445. size_t dynsym_off = 0;
  446. size_t dynsym_size = 0;
  447. #ifdef ARCH_MM_MMU
  448. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  449. void *pa, *va;
  450. void *va_self;
  451. #endif
  452. if (len < sizeof eheader)
  453. {
  454. LOG_E("len < sizeof eheader!");
  455. return -RT_ERROR;
  456. }
  457. lseek(fd, 0, SEEK_SET);
  458. read_len = load_fread(&magic, 1, sizeof magic, fd);
  459. check_read(read_len, sizeof magic);
  460. if (memcmp(elf_magic, &magic, 4) != 0)
  461. {
  462. LOG_E("elf_magic not same, magic:0x%x!", magic);
  463. return -RT_ERROR;
  464. }
  465. lseek(fd, off, SEEK_SET);
  466. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  467. check_read(read_len, sizeof eheader);
  468. #ifndef ARCH_CPU_64BIT
  469. if (eheader.e_ident[4] != 1)
  470. { /* not 32bit */
  471. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  472. return -RT_ERROR;
  473. }
  474. #else
  475. if (eheader.e_ident[4] != 2)
  476. { /* not 64bit */
  477. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  478. return -RT_ERROR;
  479. }
  480. #endif
  481. if (eheader.e_ident[6] != 1)
  482. { /* ver not 1 */
  483. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  484. return -RT_ERROR;
  485. }
  486. if ((eheader.e_type != ET_DYN)
  487. #ifdef ARCH_MM_MMU
  488. && (eheader.e_type != ET_EXEC)
  489. #endif
  490. )
  491. {
  492. /* not pie or exec elf */
  493. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  494. return -RT_ERROR;
  495. }
  496. #ifdef ARCH_MM_MMU
  497. {
  498. off = eheader.e_phoff;
  499. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  500. {
  501. check_off(off, len);
  502. lseek(fd, off, SEEK_SET);
  503. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  504. check_read(read_len, sizeof pheader);
  505. if (pheader.p_type == PT_DYNAMIC)
  506. {
  507. /* load ld.so */
  508. return 1; /* 1 means dynamic */
  509. }
  510. }
  511. }
  512. #endif
  513. if (eheader.e_entry != 0)
  514. {
  515. if ((eheader.e_entry != USER_LOAD_VADDR)
  516. && (eheader.e_entry != LDSO_LOAD_VADDR))
  517. {
  518. /* the entry is invalidate */
  519. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  520. return -RT_ERROR;
  521. }
  522. }
  523. { /* load aux */
  524. uint8_t *process_header;
  525. size_t process_header_size;
  526. off = eheader.e_phoff;
  527. process_header_size = eheader.e_phnum * sizeof pheader;
  528. #ifdef ARCH_MM_MMU
  529. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  530. {
  531. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  532. return -RT_ERROR;
  533. }
  534. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  535. if (!va)
  536. {
  537. LOG_E("lwp map user failed!");
  538. return -RT_ERROR;
  539. }
  540. pa = lwp_v2p(lwp, va);
  541. process_header = (uint8_t *)pa - PV_OFFSET;
  542. #else
  543. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  544. if (!process_header)
  545. {
  546. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  547. return -RT_ERROR;
  548. }
  549. #endif
  550. check_off(off, len);
  551. lseek(fd, off, SEEK_SET);
  552. read_len = load_fread(process_header, 1, process_header_size, fd);
  553. check_read(read_len, process_header_size);
  554. #ifdef ARCH_MM_MMU
  555. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  556. #endif
  557. aux->item[1].key = AT_PAGESZ;
  558. #ifdef ARCH_MM_MMU
  559. aux->item[1].value = ARCH_PAGE_SIZE;
  560. #else
  561. aux->item[1].value = RT_MM_PAGE_SIZE;
  562. #endif
  563. aux->item[2].key = AT_RANDOM;
  564. {
  565. uint32_t random_value = rt_tick_get();
  566. uint8_t *random;
  567. #ifdef ARCH_MM_MMU
  568. uint8_t *krandom;
  569. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  570. krandom = (uint8_t *)lwp_v2p(lwp, random);
  571. krandom = (uint8_t *)krandom - PV_OFFSET;
  572. rt_memcpy(krandom, &random_value, sizeof random_value);
  573. #else
  574. random = (uint8_t *)(process_header + process_header_size);
  575. rt_memcpy(random, &random_value, sizeof random_value);
  576. #endif
  577. aux->item[2].value = (size_t)random;
  578. }
  579. aux->item[3].key = AT_PHDR;
  580. #ifdef ARCH_MM_MMU
  581. aux->item[3].value = (size_t)va;
  582. #else
  583. aux->item[3].value = (size_t)process_header;
  584. #endif
  585. aux->item[4].key = AT_PHNUM;
  586. aux->item[4].value = eheader.e_phnum;
  587. aux->item[5].key = AT_PHENT;
  588. aux->item[5].value = sizeof pheader;
  589. #ifdef ARCH_MM_MMU
  590. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  591. #endif
  592. }
  593. if (load_addr)
  594. {
  595. load_off = (size_t)load_addr;
  596. }
  597. #ifdef ARCH_MM_MMU
  598. else
  599. {
  600. /* map user */
  601. off = eheader.e_shoff;
  602. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  603. {
  604. check_off(off, len);
  605. lseek(fd, off, SEEK_SET);
  606. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  607. check_read(read_len, sizeof sheader);
  608. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  609. {
  610. continue;
  611. }
  612. switch (sheader.sh_type)
  613. {
  614. case SHT_PROGBITS:
  615. if ((sheader.sh_flags & SHF_WRITE) == 0)
  616. {
  617. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  618. }
  619. else
  620. {
  621. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  622. }
  623. break;
  624. case SHT_NOBITS:
  625. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  626. break;
  627. default:
  628. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  629. break;
  630. }
  631. }
  632. if (user_area[0].size == 0)
  633. {
  634. /* no code */
  635. result = -RT_ERROR;
  636. goto _exit;
  637. }
  638. if (user_area[0].start == NULL)
  639. {
  640. /* DYN */
  641. load_off = USER_LOAD_VADDR;
  642. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  643. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  644. }
  645. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  646. {
  647. result = -RT_ERROR;
  648. goto _exit;
  649. }
  650. /* text and data */
  651. for (i = 0; i < 2; i++)
  652. {
  653. if (user_area[i].size != 0)
  654. {
  655. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  656. if (!va || (va != user_area[i].start))
  657. {
  658. result = -RT_ERROR;
  659. goto _exit;
  660. }
  661. }
  662. }
  663. lwp->text_size = user_area[0].size;
  664. }
  665. #else
  666. else
  667. {
  668. size_t start = -1UL;
  669. size_t end = 0UL;
  670. size_t total_size;
  671. off = eheader.e_shoff;
  672. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  673. {
  674. check_off(off, len);
  675. lseek(fd, off, SEEK_SET);
  676. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  677. check_read(read_len, sizeof sheader);
  678. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  679. {
  680. continue;
  681. }
  682. switch (sheader.sh_type)
  683. {
  684. case SHT_PROGBITS:
  685. case SHT_NOBITS:
  686. if (start > sheader.sh_addr)
  687. {
  688. start = sheader.sh_addr;
  689. }
  690. if (sheader.sh_addr + sheader.sh_size > end)
  691. {
  692. end = sheader.sh_addr + sheader.sh_size;
  693. }
  694. break;
  695. default:
  696. break;
  697. }
  698. }
  699. total_size = end - start;
  700. #ifdef RT_USING_CACHE
  701. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  702. #else
  703. load_off = (size_t)rt_malloc(total_size);
  704. #endif
  705. if (load_off == 0)
  706. {
  707. LOG_E("alloc text memory faild!");
  708. result = -RT_ENOMEM;
  709. goto _exit;
  710. }
  711. else
  712. {
  713. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  714. }
  715. lwp->load_off = load_off; /* for free */
  716. lwp->text_size = total_size;
  717. }
  718. #endif
  719. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  720. off = eheader.e_phoff;
  721. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  722. {
  723. check_off(off, len);
  724. lseek(fd, off, SEEK_SET);
  725. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  726. check_read(read_len, sizeof pheader);
  727. if (pheader.p_type == PT_LOAD)
  728. {
  729. if (pheader.p_filesz > pheader.p_memsz)
  730. {
  731. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  732. return -RT_ERROR;
  733. }
  734. check_off(pheader.p_offset, len);
  735. lseek(fd, pheader.p_offset, SEEK_SET);
  736. #ifdef ARCH_MM_MMU
  737. {
  738. uint32_t size = pheader.p_filesz;
  739. size_t tmp_len = 0;
  740. va = (void *)(pheader.p_vaddr + load_addr);
  741. read_len = 0;
  742. while (size)
  743. {
  744. pa = lwp_v2p(lwp, va);
  745. va_self = (void *)((char *)pa - PV_OFFSET);
  746. LOG_D("va_self = %p pa = %p", va_self, pa);
  747. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  748. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  749. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  750. read_len += tmp_len;
  751. size -= tmp_len;
  752. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  753. }
  754. }
  755. #else
  756. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  757. #endif
  758. check_read(read_len, pheader.p_filesz);
  759. if (pheader.p_filesz < pheader.p_memsz)
  760. {
  761. #ifdef ARCH_MM_MMU
  762. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  763. uint32_t size_s;
  764. uint32_t off;
  765. off = pheader.p_filesz & ARCH_PAGE_MASK;
  766. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  767. while (size)
  768. {
  769. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  770. pa = lwp_v2p(lwp, va);
  771. va_self = (void *)((char *)pa - PV_OFFSET);
  772. memset((void *)((char *)va_self + off), 0, size_s);
  773. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  774. off = 0;
  775. size -= size_s;
  776. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  777. }
  778. #else
  779. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  780. #endif
  781. }
  782. }
  783. }
  784. /* relocate */
  785. if (eheader.e_type == ET_DYN)
  786. {
  787. /* section info */
  788. off = eheader.e_shoff;
  789. /* find section string table */
  790. check_off(off, len);
  791. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  792. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  793. check_read(read_len, sizeof sheader);
  794. p_section_str = (char *)rt_malloc(sheader.sh_size);
  795. if (!p_section_str)
  796. {
  797. LOG_E("out of memory!");
  798. result = -ENOMEM;
  799. goto _exit;
  800. }
  801. check_off(sheader.sh_offset, len);
  802. lseek(fd, sheader.sh_offset, SEEK_SET);
  803. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  804. check_read(read_len, sheader.sh_size);
  805. check_off(off, len);
  806. lseek(fd, off, SEEK_SET);
  807. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  808. {
  809. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  810. check_read(read_len, sizeof sheader);
  811. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  812. {
  813. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  814. got_size = (size_t)sheader.sh_size;
  815. }
  816. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  817. {
  818. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  819. rel_dyn_size = (size_t)sheader.sh_size;
  820. }
  821. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  822. {
  823. dynsym_off = (size_t)sheader.sh_offset;
  824. dynsym_size = (size_t)sheader.sh_size;
  825. }
  826. }
  827. /* reloc */
  828. if (dynsym_size)
  829. {
  830. dynsym = rt_malloc(dynsym_size);
  831. if (!dynsym)
  832. {
  833. LOG_E("ERROR: Malloc error!");
  834. result = -ENOMEM;
  835. goto _exit;
  836. }
  837. check_off(dynsym_off, len);
  838. lseek(fd, dynsym_off, SEEK_SET);
  839. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  840. check_read(read_len, dynsym_size);
  841. }
  842. #ifdef ARCH_MM_MMU
  843. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  844. #else
  845. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  846. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  847. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  848. #endif
  849. }
  850. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  851. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  852. _exit:
  853. if (dynsym)
  854. {
  855. rt_free(dynsym);
  856. }
  857. if (p_section_str)
  858. {
  859. rt_free(p_section_str);
  860. }
  861. if (result != RT_EOK)
  862. {
  863. LOG_E("lwp load faild, %d", result);
  864. }
  865. return result;
  866. }
  867. #endif /* ARCH_MM_MMU */
  868. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  869. {
  870. uint8_t *ptr;
  871. int ret = -1;
  872. int len;
  873. int fd = -1;
  874. /* check file name */
  875. RT_ASSERT(filename != RT_NULL);
  876. /* check lwp control block */
  877. RT_ASSERT(lwp != RT_NULL);
  878. /* copy file name to process name */
  879. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  880. if (load_addr != RT_NULL)
  881. {
  882. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  883. ptr = load_addr;
  884. }
  885. else
  886. {
  887. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  888. ptr = RT_NULL;
  889. }
  890. fd = open(filename, O_BINARY | O_RDONLY, 0);
  891. if (fd < 0)
  892. {
  893. LOG_E("ERROR: Can't open elf file %s!", filename);
  894. goto out;
  895. }
  896. len = lseek(fd, 0, SEEK_END);
  897. if (len < 0)
  898. {
  899. LOG_E("ERROR: File %s size error!", filename);
  900. goto out;
  901. }
  902. lseek(fd, 0, SEEK_SET);
  903. ret = load_elf(fd, len, lwp, ptr, aux);
  904. if ((ret != RT_EOK) && (ret != 1))
  905. {
  906. LOG_E("lwp load ret = %d", ret);
  907. }
  908. out:
  909. if (fd > 0)
  910. {
  911. close(fd);
  912. }
  913. return ret;
  914. }
  915. /* lwp thread clean up */
  916. void lwp_cleanup(struct rt_thread *tid)
  917. {
  918. rt_base_t level;
  919. struct rt_lwp *lwp;
  920. if (tid == NULL)
  921. {
  922. LOG_I("%s: invalid parameter tid == NULL", __func__);
  923. return;
  924. }
  925. else
  926. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  927. level = rt_hw_interrupt_disable();
  928. lwp = (struct rt_lwp *)tid->lwp;
  929. /* lwp thread cleanup */
  930. lwp_tid_put(tid->tid);
  931. rt_list_remove(&tid->sibling);
  932. lwp_thread_signal_detach(&tid->signal);
  933. rt_hw_interrupt_enable(level);
  934. /* tty will be release in lwp_ref_dec() if ref is cleared */
  935. lwp_ref_dec(lwp);
  936. return;
  937. }
  938. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  939. {
  940. struct dfs_file *d;
  941. struct dfs_fdtable *lwp_fdt;
  942. lwp_fdt = &lwp->fdt;
  943. /* init 4 fds */
  944. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  945. if (lwp_fdt->fds)
  946. {
  947. lwp_fdt->maxfd = 4;
  948. d = fd_get(0);
  949. fd_associate(lwp_fdt, 0, d);
  950. d = fd_get(1);
  951. fd_associate(lwp_fdt, 1, d);
  952. d = fd_get(2);
  953. fd_associate(lwp_fdt, 2, d);
  954. }
  955. return;
  956. }
  957. static void _lwp_thread_entry(void *parameter)
  958. {
  959. rt_thread_t tid;
  960. struct rt_lwp *lwp;
  961. tid = rt_thread_self();
  962. lwp = (struct rt_lwp *)tid->lwp;
  963. tid->cleanup = lwp_cleanup;
  964. tid->user_stack = RT_NULL;
  965. if (lwp->debug)
  966. {
  967. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  968. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  969. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  970. icache_invalid_all();
  971. }
  972. /**
  973. * without ASID support, it will be a special case when trying to run application
  974. * and exit multiple times and a same page frame allocated to it bound to
  975. * different text segment. Then we are in a situation where icache contains
  976. * out-of-dated data and must be handle by the running core itself.
  977. * with ASID support, this should be a rare case that ASID & page frame both
  978. * identical to previous running application.
  979. *
  980. * For a new application loaded into memory, icache are seen as empty. And there
  981. * should be nothing in the icache entry to match. So this icache invalidation
  982. * operation should have barely influence.
  983. */
  984. rt_hw_icache_invalidate_all();
  985. #ifdef ARCH_MM_MMU
  986. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  987. #else
  988. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  989. #endif /* ARCH_MM_MMU */
  990. }
  991. struct rt_lwp *lwp_self(void)
  992. {
  993. rt_thread_t tid;
  994. tid = rt_thread_self();
  995. if (tid)
  996. {
  997. return (struct rt_lwp *)tid->lwp;
  998. }
  999. return RT_NULL;
  1000. }
  1001. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1002. {
  1003. int result;
  1004. rt_base_t level;
  1005. struct rt_lwp *lwp;
  1006. char *thread_name;
  1007. char *argv_last = argv[argc - 1];
  1008. int bg = 0;
  1009. struct process_aux *aux;
  1010. int tid = 0;
  1011. int ret;
  1012. if (filename == RT_NULL)
  1013. {
  1014. return -RT_ERROR;
  1015. }
  1016. if (access(filename, X_OK) != 0)
  1017. {
  1018. return -EACCES;
  1019. }
  1020. lwp = lwp_new();
  1021. if (lwp == RT_NULL)
  1022. {
  1023. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1024. return -RT_ENOMEM;
  1025. }
  1026. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1027. if ((tid = lwp_tid_get()) == 0)
  1028. {
  1029. lwp_ref_dec(lwp);
  1030. return -ENOMEM;
  1031. }
  1032. #ifdef ARCH_MM_MMU
  1033. if (lwp_user_space_init(lwp, 0) != 0)
  1034. {
  1035. lwp_tid_put(tid);
  1036. lwp_ref_dec(lwp);
  1037. return -ENOMEM;
  1038. }
  1039. #endif
  1040. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1041. {
  1042. argc--;
  1043. bg = 1;
  1044. }
  1045. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1046. {
  1047. lwp_tid_put(tid);
  1048. lwp_ref_dec(lwp);
  1049. return -ENOMEM;
  1050. }
  1051. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1052. #ifdef ARCH_MM_MMU
  1053. if (result == 1)
  1054. {
  1055. /* dynmaic */
  1056. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1057. result = load_ldso(lwp, filename, argv, envp);
  1058. }
  1059. #endif /* ARCH_MM_MMU */
  1060. if (result == RT_EOK)
  1061. {
  1062. rt_thread_t thread = RT_NULL;
  1063. rt_uint32_t priority = 25, tick = 200;
  1064. lwp_copy_stdio_fdt(lwp);
  1065. /* obtain the base name */
  1066. thread_name = strrchr(filename, '/');
  1067. thread_name = thread_name ? thread_name + 1 : filename;
  1068. #ifndef ARCH_MM_MMU
  1069. struct lwp_app_head *app_head = lwp->text_entry;
  1070. if (app_head->priority)
  1071. {
  1072. priority = app_head->priority;
  1073. }
  1074. if (app_head->tick)
  1075. {
  1076. tick = app_head->tick;
  1077. }
  1078. #endif /* not defined ARCH_MM_MMU */
  1079. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1080. LWP_TASK_STACK_SIZE, priority, tick);
  1081. if (thread != RT_NULL)
  1082. {
  1083. struct rt_lwp *self_lwp;
  1084. thread->tid = tid;
  1085. lwp_tid_set_thread(tid, thread);
  1086. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1087. (rt_size_t)thread->stack_addr + thread->stack_size);
  1088. level = rt_hw_interrupt_disable();
  1089. self_lwp = lwp_self();
  1090. if (self_lwp)
  1091. {
  1092. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1093. lwp->__pgrp = tid;
  1094. lwp->session = self_lwp->session;
  1095. /* lwp add to children link */
  1096. lwp->sibling = self_lwp->first_child;
  1097. self_lwp->first_child = lwp;
  1098. lwp->parent = self_lwp;
  1099. }
  1100. else
  1101. {
  1102. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1103. lwp->__pgrp = tid;
  1104. }
  1105. if (!bg)
  1106. {
  1107. if (lwp->session == -1)
  1108. {
  1109. struct tty_struct *tty = RT_NULL;
  1110. struct rt_lwp *old_lwp;
  1111. tty = (struct tty_struct *)console_tty_get();
  1112. old_lwp = tty->foreground;
  1113. if (old_lwp)
  1114. {
  1115. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1116. ret = tty_push(&tty->head, old_lwp);
  1117. rt_mutex_release(&tty->lock);
  1118. if (ret < 0)
  1119. {
  1120. lwp_tid_put(tid);
  1121. lwp_ref_dec(lwp);
  1122. LOG_E("malloc fail!\n");
  1123. return -ENOMEM;
  1124. }
  1125. }
  1126. lwp->tty = tty;
  1127. lwp->tty->pgrp = lwp->__pgrp;
  1128. lwp->tty->session = lwp->session;
  1129. lwp->tty->foreground = lwp;
  1130. tcgetattr(1, &stdin_termios);
  1131. old_stdin_termios = stdin_termios;
  1132. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1133. tcsetattr(1, 0, &stdin_termios);
  1134. }
  1135. else
  1136. {
  1137. if (self_lwp != RT_NULL)
  1138. {
  1139. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1140. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1141. rt_mutex_release(&self_lwp->tty->lock);
  1142. if (ret < 0)
  1143. {
  1144. lwp_tid_put(tid);
  1145. lwp_ref_dec(lwp);
  1146. LOG_E("malloc fail!\n");
  1147. return -ENOMEM;
  1148. }
  1149. lwp->tty = self_lwp->tty;
  1150. lwp->tty->pgrp = lwp->__pgrp;
  1151. lwp->tty->session = lwp->session;
  1152. lwp->tty->foreground = lwp;
  1153. }
  1154. else
  1155. {
  1156. lwp->tty = RT_NULL;
  1157. }
  1158. }
  1159. }
  1160. else
  1161. {
  1162. lwp->background = RT_TRUE;
  1163. }
  1164. thread->lwp = lwp;
  1165. #ifndef ARCH_MM_MMU
  1166. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1167. thread->user_stack = app_head->stack_offset ?
  1168. (void *)(app_head->stack_offset -
  1169. app_head->data_offset +
  1170. (uint32_t)lwp->data_entry) : RT_NULL;
  1171. thread->user_stack_size = app_head->stack_size;
  1172. /* init data area */
  1173. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1174. /* init user stack */
  1175. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1176. #endif /* not defined ARCH_MM_MMU */
  1177. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1178. if (debug && rt_dbg_ops)
  1179. {
  1180. lwp->debug = debug;
  1181. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1182. }
  1183. rt_hw_interrupt_enable(level);
  1184. rt_thread_startup(thread);
  1185. return lwp_to_pid(lwp);
  1186. }
  1187. }
  1188. lwp_tid_put(tid);
  1189. lwp_ref_dec(lwp);
  1190. return -RT_ERROR;
  1191. }
  1192. #ifdef RT_USING_MUSL
  1193. extern char **__environ;
  1194. #else
  1195. char **__environ = 0;
  1196. #endif
  1197. pid_t exec(char *filename, int debug, int argc, char **argv)
  1198. {
  1199. setenv("OS", "RT-Thread", 1);
  1200. return lwp_execve(filename, debug, argc, argv, __environ);
  1201. }
  1202. #ifdef ARCH_MM_MMU
  1203. void lwp_user_setting_save(rt_thread_t thread)
  1204. {
  1205. if (thread)
  1206. {
  1207. thread->thread_idr = arch_get_tidr();
  1208. }
  1209. }
  1210. void lwp_user_setting_restore(rt_thread_t thread)
  1211. {
  1212. if (!thread)
  1213. {
  1214. return;
  1215. }
  1216. #if !defined(ARCH_RISCV64)
  1217. /* tidr will be set in RESTORE_ALL in risc-v */
  1218. arch_set_tidr(thread->thread_idr);
  1219. #endif
  1220. if (rt_dbg_ops)
  1221. {
  1222. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1223. if (l != 0)
  1224. {
  1225. rt_hw_set_process_id((size_t)l->pid);
  1226. }
  1227. else
  1228. {
  1229. rt_hw_set_process_id(0);
  1230. }
  1231. if (l && l->debug)
  1232. {
  1233. uint32_t step_type = 0;
  1234. step_type = dbg_step_type();
  1235. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1236. {
  1237. dbg_activate_step();
  1238. }
  1239. else
  1240. {
  1241. dbg_deactivate_step();
  1242. }
  1243. }
  1244. }
  1245. }
  1246. #endif /* ARCH_MM_MMU */