lwp.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. * 2023-10-16 Shell Support a new backtrace framework
  15. */
  16. #define DBG_TAG "lwp"
  17. #define DBG_LVL DBG_WARNING
  18. #include <rtdbg.h>
  19. #include <rthw.h>
  20. #include <rtthread.h>
  21. #include <dfs_file.h>
  22. #include <unistd.h>
  23. #include <stdio.h> /* rename() */
  24. #include <fcntl.h>
  25. #include <sys/stat.h>
  26. #include <sys/statfs.h> /* statfs() */
  27. #include <lwp_elf.h>
  28. #ifndef RT_USING_DFS
  29. #error "lwp need file system(RT_USING_DFS)"
  30. #endif
  31. #include "lwp_internal.h"
  32. #include "lwp_arch.h"
  33. #include "lwp_arch_comm.h"
  34. #include "lwp_signal.h"
  35. #include "lwp_dbg.h"
  36. #include "console.h"
  37. #ifdef ARCH_MM_MMU
  38. #include <lwp_user_mm.h>
  39. #endif /* end of ARCH_MM_MMU */
  40. #ifndef O_DIRECTORY
  41. #define O_DIRECTORY 0x200000
  42. #endif
  43. #ifndef O_BINARY
  44. #define O_BINARY 0x10000
  45. #endif
  46. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  47. #ifdef DFS_USING_WORKDIR
  48. extern char working_directory[];
  49. #endif
  50. static struct termios stdin_termios, old_stdin_termios;
  51. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  52. struct termios *get_old_termios(void)
  53. {
  54. return &old_stdin_termios;
  55. }
  56. int lwp_component_init(void)
  57. {
  58. int rc;
  59. if ((rc = lwp_tid_init()) != RT_EOK)
  60. {
  61. LOG_E("%s: lwp_component_init() failed", __func__);
  62. }
  63. else if ((rc = lwp_pid_init()) != RT_EOK)
  64. {
  65. LOG_E("%s: lwp_pid_init() failed", __func__);
  66. }
  67. else if ((rc = rt_channel_component_init()) != RT_EOK)
  68. {
  69. LOG_E("%s: rt_channel_component_init failed", __func__);
  70. }
  71. return rc;
  72. }
  73. INIT_COMPONENT_EXPORT(lwp_component_init);
  74. void lwp_setcwd(char *buf)
  75. {
  76. struct rt_lwp *lwp = RT_NULL;
  77. if(strlen(buf) >= DFS_PATH_MAX)
  78. {
  79. rt_kprintf("buf too long!\n");
  80. return ;
  81. }
  82. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  83. if (lwp)
  84. {
  85. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  86. }
  87. else
  88. {
  89. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  90. }
  91. return ;
  92. }
  93. char *lwp_getcwd(void)
  94. {
  95. char *dir_buf = RT_NULL;
  96. struct rt_lwp *lwp = RT_NULL;
  97. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  98. if (lwp)
  99. {
  100. if(lwp->working_directory[0] != '/')
  101. {
  102. dir_buf = &working_directory[0];
  103. }
  104. else
  105. {
  106. dir_buf = &lwp->working_directory[0];
  107. }
  108. }
  109. else
  110. dir_buf = &working_directory[0];
  111. return dir_buf;
  112. }
  113. /**
  114. * RT-Thread light-weight process
  115. */
  116. void lwp_set_kernel_sp(uint32_t *sp)
  117. {
  118. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  119. }
  120. uint32_t *lwp_get_kernel_sp(void)
  121. {
  122. #ifdef ARCH_MM_MMU
  123. return (uint32_t *)rt_thread_self()->sp;
  124. #else
  125. uint32_t* kernel_sp;
  126. extern rt_uint32_t rt_interrupt_from_thread;
  127. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  128. if (rt_thread_switch_interrupt_flag)
  129. {
  130. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  131. }
  132. else
  133. {
  134. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  135. }
  136. return kernel_sp;
  137. #endif
  138. }
  139. #ifdef ARCH_MM_MMU
  140. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  141. {
  142. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  143. int *args;
  144. char *str;
  145. char *str_k;
  146. char **new_argve;
  147. int i;
  148. int len;
  149. size_t *args_k;
  150. struct process_aux *aux;
  151. size_t prot = PROT_READ | PROT_WRITE;
  152. size_t flags = MAP_FIXED | MAP_PRIVATE;
  153. size_t zero = 0;
  154. for (i = 0; i < argc; i++)
  155. {
  156. size += (rt_strlen(argv[i]) + 1);
  157. }
  158. size += (sizeof(size_t) * argc);
  159. i = 0;
  160. if (envp)
  161. {
  162. while (envp[i] != 0)
  163. {
  164. size += (rt_strlen(envp[i]) + 1);
  165. size += sizeof(size_t);
  166. i++;
  167. }
  168. }
  169. /* for aux */
  170. size += sizeof(struct process_aux);
  171. if (size > ARCH_PAGE_SIZE)
  172. {
  173. return RT_NULL;
  174. }
  175. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  176. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  177. {
  178. return RT_NULL;
  179. }
  180. args_k = (size_t *)lwp_v2p(lwp, args);
  181. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  182. /* argc, argv[], 0, envp[], 0 , aux[] */
  183. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  184. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  185. new_argve = (char **)&args_k[1];
  186. args_k[0] = argc;
  187. for (i = 0; i < argc; i++)
  188. {
  189. len = rt_strlen(argv[i]) + 1;
  190. new_argve[i] = str;
  191. lwp_memcpy(str_k, argv[i], len);
  192. str += len;
  193. str_k += len;
  194. }
  195. new_argve[i] = 0;
  196. i++;
  197. new_argve[i] = 0;
  198. if (envp)
  199. {
  200. int j;
  201. for (j = 0; envp[j] != 0; j++)
  202. {
  203. len = rt_strlen(envp[j]) + 1;
  204. new_argve[i] = str;
  205. lwp_memcpy(str_k, envp[j], len);
  206. str += len;
  207. str_k += len;
  208. i++;
  209. }
  210. new_argve[i] = 0;
  211. }
  212. i++;
  213. /* aux */
  214. aux = (struct process_aux *)(new_argve + i);
  215. aux->item[0].key = AT_EXECFN;
  216. aux->item[0].value = (size_t)(size_t)new_argve[0];
  217. i += AUX_ARRAY_ITEMS_NR * 2;
  218. new_argve[i] = 0;
  219. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  220. lwp->args = args;
  221. return aux;
  222. }
  223. #else
  224. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  225. {
  226. #ifdef ARCH_MM_MMU
  227. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  228. struct process_aux *aux;
  229. #else
  230. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  231. #endif /* ARCH_MM_MMU */
  232. int *args;
  233. char *str;
  234. char **new_argve;
  235. int i;
  236. int len;
  237. for (i = 0; i < argc; i++)
  238. {
  239. size += (rt_strlen(argv[i]) + 1);
  240. }
  241. size += (sizeof(int) * argc);
  242. i = 0;
  243. if (envp)
  244. {
  245. while (envp[i] != 0)
  246. {
  247. size += (rt_strlen(envp[i]) + 1);
  248. size += sizeof(int);
  249. i++;
  250. }
  251. }
  252. #ifdef ARCH_MM_MMU
  253. /* for aux */
  254. size += sizeof(struct process_aux);
  255. args = (int *)rt_malloc(size);
  256. if (args == RT_NULL)
  257. {
  258. return RT_NULL;
  259. }
  260. /* argc, argv[], 0, envp[], 0 */
  261. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  262. #else
  263. args = (int *)rt_malloc(size);
  264. if (args == RT_NULL)
  265. {
  266. return RT_NULL;
  267. }
  268. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  269. #endif /* ARCH_MM_MMU */
  270. new_argve = (char **)&args[1];
  271. args[0] = argc;
  272. for (i = 0; i < argc; i++)
  273. {
  274. len = rt_strlen(argv[i]) + 1;
  275. new_argve[i] = str;
  276. lwp_memcpy(str, argv[i], len);
  277. str += len;
  278. }
  279. new_argve[i] = 0;
  280. i++;
  281. new_argve[i] = 0;
  282. if (envp)
  283. {
  284. int j;
  285. for (j = 0; envp[j] != 0; j++)
  286. {
  287. len = rt_strlen(envp[j]) + 1;
  288. new_argve[i] = str;
  289. lwp_memcpy(str, envp[j], len);
  290. str += len;
  291. i++;
  292. }
  293. new_argve[i] = 0;
  294. }
  295. #ifdef ARCH_MM_MMU
  296. /* aux */
  297. aux = (struct process_aux *)(new_argve + i);
  298. aux->item[0].key = AT_EXECFN;
  299. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  300. i += AUX_ARRAY_ITEMS_NR * 2;
  301. new_argve[i] = 0;
  302. lwp->args = args;
  303. return aux;
  304. #else
  305. lwp->args = args;
  306. lwp->args_length = size;
  307. return (struct process_aux *)(new_argve + i);
  308. #endif /* ARCH_MM_MMU */
  309. }
  310. #endif
  311. #ifdef ARCH_MM_MMU
  312. #define check_off(voff, vlen) \
  313. do \
  314. { \
  315. if (voff > vlen) \
  316. { \
  317. result = -RT_ERROR; \
  318. goto _exit; \
  319. } \
  320. } while (0)
  321. #define check_read(vrlen, vrlen_want) \
  322. do \
  323. { \
  324. if (vrlen < vrlen_want) \
  325. { \
  326. result = -RT_ERROR; \
  327. goto _exit; \
  328. } \
  329. } while (0)
  330. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  331. {
  332. size_t read_block = 0;
  333. while (nmemb)
  334. {
  335. size_t count;
  336. count = read(fd, ptr, size * nmemb) / size;
  337. if (count < nmemb)
  338. {
  339. LOG_E("ERROR: file size error!");
  340. break;
  341. }
  342. ptr = (void *)((uint8_t *)ptr + (count * size));
  343. nmemb -= count;
  344. read_block += count;
  345. }
  346. return read_block;
  347. }
  348. typedef struct
  349. {
  350. Elf_Word st_name;
  351. Elf_Addr st_value;
  352. Elf_Word st_size;
  353. unsigned char st_info;
  354. unsigned char st_other;
  355. Elf_Half st_shndx;
  356. } Elf_sym;
  357. #ifdef ARCH_MM_MMU
  358. struct map_range
  359. {
  360. void *start;
  361. size_t size;
  362. };
  363. static void expand_map_range(struct map_range *m, void *start, size_t size)
  364. {
  365. if (!m->start)
  366. {
  367. m->start = start;
  368. m->size = size;
  369. }
  370. else
  371. {
  372. void *end = (void *)((char*)start + size);
  373. void *mend = (void *)((char*)m->start + m->size);
  374. if (m->start > start)
  375. {
  376. m->start = start;
  377. }
  378. if (mend < end)
  379. {
  380. mend = end;
  381. }
  382. m->size = (char *)mend - (char *)m->start;
  383. }
  384. }
  385. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  386. {
  387. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  388. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  389. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  390. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  391. if (m1->size)
  392. {
  393. if (m1_start < (void *)USER_LOAD_VADDR)
  394. {
  395. return -1;
  396. }
  397. if (m1_start > (void *)USER_STACK_VSTART)
  398. {
  399. return -1;
  400. }
  401. if (m1_end < (void *)USER_LOAD_VADDR)
  402. {
  403. return -1;
  404. }
  405. if (m1_end > (void *)USER_STACK_VSTART)
  406. {
  407. return -1;
  408. }
  409. }
  410. if (m2->size)
  411. {
  412. if (m2_start < (void *)USER_LOAD_VADDR)
  413. {
  414. return -1;
  415. }
  416. if (m2_start > (void *)USER_STACK_VSTART)
  417. {
  418. return -1;
  419. }
  420. if (m2_end < (void *)USER_LOAD_VADDR)
  421. {
  422. return -1;
  423. }
  424. if (m2_end > (void *)USER_STACK_VSTART)
  425. {
  426. return -1;
  427. }
  428. }
  429. if ((m1->size != 0) && (m2->size != 0))
  430. {
  431. if (m1_start < m2_start)
  432. {
  433. if (m1_end > m2_start)
  434. {
  435. return -1;
  436. }
  437. }
  438. else /* m2_start <= m1_start */
  439. {
  440. if (m2_end > m1_start)
  441. {
  442. return -1;
  443. }
  444. }
  445. }
  446. return 0;
  447. }
  448. #endif
  449. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  450. {
  451. uint32_t i;
  452. uint32_t off = 0;
  453. size_t load_off = 0;
  454. char *p_section_str = 0;
  455. Elf_sym *dynsym = 0;
  456. Elf_Ehdr eheader;
  457. Elf_Phdr pheader;
  458. Elf_Shdr sheader;
  459. int result = RT_EOK;
  460. uint32_t magic;
  461. size_t read_len;
  462. void *got_start = 0;
  463. size_t got_size = 0;
  464. void *rel_dyn_start = 0;
  465. size_t rel_dyn_size = 0;
  466. size_t dynsym_off = 0;
  467. size_t dynsym_size = 0;
  468. #ifdef ARCH_MM_MMU
  469. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  470. void *pa, *va;
  471. void *va_self;
  472. #endif
  473. if (len < sizeof eheader)
  474. {
  475. LOG_E("len < sizeof eheader!");
  476. return -RT_ERROR;
  477. }
  478. lseek(fd, 0, SEEK_SET);
  479. read_len = load_fread(&magic, 1, sizeof magic, fd);
  480. check_read(read_len, sizeof magic);
  481. if (memcmp(elf_magic, &magic, 4) != 0)
  482. {
  483. LOG_E("elf_magic not same, magic:0x%x!", magic);
  484. return -RT_ERROR;
  485. }
  486. lseek(fd, off, SEEK_SET);
  487. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  488. check_read(read_len, sizeof eheader);
  489. #ifndef ARCH_CPU_64BIT
  490. if (eheader.e_ident[4] != 1)
  491. { /* not 32bit */
  492. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  493. return -RT_ERROR;
  494. }
  495. #else
  496. if (eheader.e_ident[4] != 2)
  497. { /* not 64bit */
  498. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  499. return -RT_ERROR;
  500. }
  501. #endif
  502. if (eheader.e_ident[6] != 1)
  503. { /* ver not 1 */
  504. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  505. return -RT_ERROR;
  506. }
  507. if ((eheader.e_type != ET_DYN)
  508. #ifdef ARCH_MM_MMU
  509. && (eheader.e_type != ET_EXEC)
  510. #endif
  511. )
  512. {
  513. /* not pie or exec elf */
  514. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  515. return -RT_ERROR;
  516. }
  517. #ifdef ARCH_MM_MMU
  518. {
  519. off = eheader.e_phoff;
  520. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  521. {
  522. check_off(off, len);
  523. lseek(fd, off, SEEK_SET);
  524. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  525. check_read(read_len, sizeof pheader);
  526. if (pheader.p_type == PT_DYNAMIC)
  527. {
  528. /* load ld.so */
  529. return 1; /* 1 means dynamic */
  530. }
  531. }
  532. }
  533. #endif
  534. if (eheader.e_entry != 0)
  535. {
  536. if ((eheader.e_entry != USER_LOAD_VADDR)
  537. && (eheader.e_entry != LDSO_LOAD_VADDR))
  538. {
  539. /* the entry is invalidate */
  540. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  541. return -RT_ERROR;
  542. }
  543. }
  544. { /* load aux */
  545. uint8_t *process_header;
  546. size_t process_header_size;
  547. off = eheader.e_phoff;
  548. process_header_size = eheader.e_phnum * sizeof pheader;
  549. #ifdef ARCH_MM_MMU
  550. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  551. {
  552. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  553. return -RT_ERROR;
  554. }
  555. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  556. if (!va)
  557. {
  558. LOG_E("lwp map user failed!");
  559. return -RT_ERROR;
  560. }
  561. pa = lwp_v2p(lwp, va);
  562. process_header = (uint8_t *)pa - PV_OFFSET;
  563. #else
  564. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  565. if (!process_header)
  566. {
  567. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  568. return -RT_ERROR;
  569. }
  570. #endif
  571. check_off(off, len);
  572. lseek(fd, off, SEEK_SET);
  573. read_len = load_fread(process_header, 1, process_header_size, fd);
  574. check_read(read_len, process_header_size);
  575. #ifdef ARCH_MM_MMU
  576. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  577. #endif
  578. aux->item[1].key = AT_PAGESZ;
  579. #ifdef ARCH_MM_MMU
  580. aux->item[1].value = ARCH_PAGE_SIZE;
  581. #else
  582. aux->item[1].value = RT_MM_PAGE_SIZE;
  583. #endif
  584. aux->item[2].key = AT_RANDOM;
  585. {
  586. uint32_t random_value = rt_tick_get();
  587. uint8_t *random;
  588. #ifdef ARCH_MM_MMU
  589. uint8_t *krandom;
  590. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  591. krandom = (uint8_t *)lwp_v2p(lwp, random);
  592. krandom = (uint8_t *)krandom - PV_OFFSET;
  593. rt_memcpy(krandom, &random_value, sizeof random_value);
  594. #else
  595. random = (uint8_t *)(process_header + process_header_size);
  596. rt_memcpy(random, &random_value, sizeof random_value);
  597. #endif
  598. aux->item[2].value = (size_t)random;
  599. }
  600. aux->item[3].key = AT_PHDR;
  601. #ifdef ARCH_MM_MMU
  602. aux->item[3].value = (size_t)va;
  603. #else
  604. aux->item[3].value = (size_t)process_header;
  605. #endif
  606. aux->item[4].key = AT_PHNUM;
  607. aux->item[4].value = eheader.e_phnum;
  608. aux->item[5].key = AT_PHENT;
  609. aux->item[5].value = sizeof pheader;
  610. #ifdef ARCH_MM_MMU
  611. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  612. #endif
  613. }
  614. if (load_addr)
  615. {
  616. load_off = (size_t)load_addr;
  617. }
  618. #ifdef ARCH_MM_MMU
  619. else
  620. {
  621. /* map user */
  622. off = eheader.e_shoff;
  623. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  624. {
  625. check_off(off, len);
  626. lseek(fd, off, SEEK_SET);
  627. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  628. check_read(read_len, sizeof sheader);
  629. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  630. {
  631. continue;
  632. }
  633. switch (sheader.sh_type)
  634. {
  635. case SHT_PROGBITS:
  636. if ((sheader.sh_flags & SHF_WRITE) == 0)
  637. {
  638. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  639. }
  640. else
  641. {
  642. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  643. }
  644. break;
  645. case SHT_NOBITS:
  646. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  647. break;
  648. default:
  649. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  650. break;
  651. }
  652. }
  653. if (user_area[0].size == 0)
  654. {
  655. /* no code */
  656. result = -RT_ERROR;
  657. goto _exit;
  658. }
  659. if (user_area[0].start == NULL)
  660. {
  661. /* DYN */
  662. load_off = USER_LOAD_VADDR;
  663. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  664. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  665. }
  666. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  667. {
  668. result = -RT_ERROR;
  669. goto _exit;
  670. }
  671. /* text and data */
  672. for (i = 0; i < 2; i++)
  673. {
  674. if (user_area[i].size != 0)
  675. {
  676. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  677. if (!va || (va != user_area[i].start))
  678. {
  679. result = -RT_ERROR;
  680. goto _exit;
  681. }
  682. }
  683. }
  684. lwp->text_size = user_area[0].size;
  685. }
  686. #else
  687. else
  688. {
  689. size_t start = -1UL;
  690. size_t end = 0UL;
  691. size_t total_size;
  692. off = eheader.e_shoff;
  693. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  694. {
  695. check_off(off, len);
  696. lseek(fd, off, SEEK_SET);
  697. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  698. check_read(read_len, sizeof sheader);
  699. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  700. {
  701. continue;
  702. }
  703. switch (sheader.sh_type)
  704. {
  705. case SHT_PROGBITS:
  706. case SHT_NOBITS:
  707. if (start > sheader.sh_addr)
  708. {
  709. start = sheader.sh_addr;
  710. }
  711. if (sheader.sh_addr + sheader.sh_size > end)
  712. {
  713. end = sheader.sh_addr + sheader.sh_size;
  714. }
  715. break;
  716. default:
  717. break;
  718. }
  719. }
  720. total_size = end - start;
  721. #ifdef RT_USING_CACHE
  722. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  723. #else
  724. load_off = (size_t)rt_malloc(total_size);
  725. #endif
  726. if (load_off == 0)
  727. {
  728. LOG_E("alloc text memory faild!");
  729. result = -RT_ENOMEM;
  730. goto _exit;
  731. }
  732. else
  733. {
  734. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  735. }
  736. lwp->load_off = load_off; /* for free */
  737. lwp->text_size = total_size;
  738. }
  739. #endif
  740. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  741. off = eheader.e_phoff;
  742. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  743. {
  744. check_off(off, len);
  745. lseek(fd, off, SEEK_SET);
  746. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  747. check_read(read_len, sizeof pheader);
  748. if (pheader.p_type == PT_LOAD)
  749. {
  750. if (pheader.p_filesz > pheader.p_memsz)
  751. {
  752. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  753. return -RT_ERROR;
  754. }
  755. check_off(pheader.p_offset, len);
  756. lseek(fd, pheader.p_offset, SEEK_SET);
  757. #ifdef ARCH_MM_MMU
  758. {
  759. uint32_t size = pheader.p_filesz;
  760. size_t tmp_len = 0;
  761. va = (void *)(pheader.p_vaddr + load_addr);
  762. read_len = 0;
  763. while (size)
  764. {
  765. pa = lwp_v2p(lwp, va);
  766. va_self = (void *)((char *)pa - PV_OFFSET);
  767. LOG_D("va_self = %p pa = %p", va_self, pa);
  768. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  769. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  770. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  771. read_len += tmp_len;
  772. size -= tmp_len;
  773. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  774. }
  775. }
  776. #else
  777. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  778. #endif
  779. check_read(read_len, pheader.p_filesz);
  780. if (pheader.p_filesz < pheader.p_memsz)
  781. {
  782. #ifdef ARCH_MM_MMU
  783. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  784. uint32_t size_s;
  785. uint32_t off;
  786. off = pheader.p_filesz & ARCH_PAGE_MASK;
  787. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  788. while (size)
  789. {
  790. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  791. pa = lwp_v2p(lwp, va);
  792. va_self = (void *)((char *)pa - PV_OFFSET);
  793. memset((void *)((char *)va_self + off), 0, size_s);
  794. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  795. off = 0;
  796. size -= size_s;
  797. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  798. }
  799. #else
  800. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  801. #endif
  802. }
  803. }
  804. }
  805. /* relocate */
  806. if (eheader.e_type == ET_DYN)
  807. {
  808. /* section info */
  809. off = eheader.e_shoff;
  810. /* find section string table */
  811. check_off(off, len);
  812. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  813. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  814. check_read(read_len, sizeof sheader);
  815. p_section_str = (char *)rt_malloc(sheader.sh_size);
  816. if (!p_section_str)
  817. {
  818. LOG_E("out of memory!");
  819. result = -ENOMEM;
  820. goto _exit;
  821. }
  822. check_off(sheader.sh_offset, len);
  823. lseek(fd, sheader.sh_offset, SEEK_SET);
  824. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  825. check_read(read_len, sheader.sh_size);
  826. check_off(off, len);
  827. lseek(fd, off, SEEK_SET);
  828. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  829. {
  830. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  831. check_read(read_len, sizeof sheader);
  832. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  833. {
  834. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  835. got_size = (size_t)sheader.sh_size;
  836. }
  837. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  838. {
  839. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  840. rel_dyn_size = (size_t)sheader.sh_size;
  841. }
  842. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  843. {
  844. dynsym_off = (size_t)sheader.sh_offset;
  845. dynsym_size = (size_t)sheader.sh_size;
  846. }
  847. }
  848. /* reloc */
  849. if (dynsym_size)
  850. {
  851. dynsym = rt_malloc(dynsym_size);
  852. if (!dynsym)
  853. {
  854. LOG_E("ERROR: Malloc error!");
  855. result = -ENOMEM;
  856. goto _exit;
  857. }
  858. check_off(dynsym_off, len);
  859. lseek(fd, dynsym_off, SEEK_SET);
  860. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  861. check_read(read_len, dynsym_size);
  862. }
  863. #ifdef ARCH_MM_MMU
  864. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  865. #else
  866. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  867. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  868. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  869. #endif
  870. }
  871. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  872. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  873. _exit:
  874. if (dynsym)
  875. {
  876. rt_free(dynsym);
  877. }
  878. if (p_section_str)
  879. {
  880. rt_free(p_section_str);
  881. }
  882. if (result != RT_EOK)
  883. {
  884. LOG_E("lwp load faild, %d", result);
  885. }
  886. return result;
  887. }
  888. #endif /* ARCH_MM_MMU */
  889. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  890. {
  891. uint8_t *ptr;
  892. int ret = -1;
  893. int len;
  894. int fd = -1;
  895. /* check file name */
  896. RT_ASSERT(filename != RT_NULL);
  897. /* check lwp control block */
  898. RT_ASSERT(lwp != RT_NULL);
  899. /* copy file name to process name */
  900. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  901. if (load_addr != RT_NULL)
  902. {
  903. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  904. ptr = load_addr;
  905. }
  906. else
  907. {
  908. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  909. ptr = RT_NULL;
  910. }
  911. fd = open(filename, O_BINARY | O_RDONLY, 0);
  912. if (fd < 0)
  913. {
  914. LOG_E("ERROR: Can't open elf file %s!", filename);
  915. goto out;
  916. }
  917. len = lseek(fd, 0, SEEK_END);
  918. if (len < 0)
  919. {
  920. LOG_E("ERROR: File %s size error!", filename);
  921. goto out;
  922. }
  923. lseek(fd, 0, SEEK_SET);
  924. ret = load_elf(fd, len, lwp, ptr, aux);
  925. if ((ret != RT_EOK) && (ret != 1))
  926. {
  927. LOG_E("lwp load ret = %d", ret);
  928. }
  929. out:
  930. if (fd > 0)
  931. {
  932. close(fd);
  933. }
  934. return ret;
  935. }
  936. /* lwp-thread clean up routine */
  937. void lwp_cleanup(struct rt_thread *tid)
  938. {
  939. struct rt_lwp *lwp;
  940. if (tid == NULL)
  941. {
  942. LOG_I("%s: invalid parameter tid == NULL", __func__);
  943. return;
  944. }
  945. else
  946. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  947. /**
  948. * Brief: lwp thread cleanup
  949. *
  950. * Note: Critical Section
  951. * - thread control block (RW. It's ensured that no one else can access tcb
  952. * other than itself)
  953. */
  954. lwp = (struct rt_lwp *)tid->lwp;
  955. lwp_thread_signal_detach(&tid->signal);
  956. /* tty will be release in lwp_ref_dec() if ref is cleared */
  957. lwp_ref_dec(lwp);
  958. return;
  959. }
  960. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  961. {
  962. struct dfs_file *d;
  963. struct dfs_fdtable *lwp_fdt;
  964. lwp_fdt = &lwp->fdt;
  965. /* init 4 fds */
  966. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  967. if (lwp_fdt->fds)
  968. {
  969. lwp_fdt->maxfd = 4;
  970. d = fd_get(0);
  971. fdt_fd_associate_file(lwp_fdt, 0, d);
  972. d = fd_get(1);
  973. fdt_fd_associate_file(lwp_fdt, 1, d);
  974. d = fd_get(2);
  975. fdt_fd_associate_file(lwp_fdt, 2, d);
  976. }
  977. return;
  978. }
  979. static void _lwp_thread_entry(void *parameter)
  980. {
  981. rt_thread_t tid;
  982. struct rt_lwp *lwp;
  983. tid = rt_thread_self();
  984. lwp = (struct rt_lwp *)tid->lwp;
  985. tid->cleanup = lwp_cleanup;
  986. tid->user_stack = RT_NULL;
  987. if (lwp->debug)
  988. {
  989. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  990. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  991. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  992. icache_invalid_all();
  993. }
  994. /**
  995. * without ASID support, it will be a special case when trying to run application
  996. * and exit multiple times and a same page frame allocated to it bound to
  997. * different text segment. Then we are in a situation where icache contains
  998. * out-of-dated data and must be handle by the running core itself.
  999. * with ASID support, this should be a rare case that ASID & page frame both
  1000. * identical to previous running application.
  1001. *
  1002. * For a new application loaded into memory, icache are seen as empty. And there
  1003. * should be nothing in the icache entry to match. So this icache invalidation
  1004. * operation should have barely influence.
  1005. */
  1006. rt_hw_icache_invalidate_all();
  1007. #ifdef ARCH_MM_MMU
  1008. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  1009. #else
  1010. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  1011. #endif /* ARCH_MM_MMU */
  1012. }
  1013. struct rt_lwp *lwp_self(void)
  1014. {
  1015. rt_thread_t tid;
  1016. tid = rt_thread_self();
  1017. if (tid)
  1018. {
  1019. return (struct rt_lwp *)tid->lwp;
  1020. }
  1021. return RT_NULL;
  1022. }
  1023. rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
  1024. {
  1025. /* lwp add to children link */
  1026. LWP_LOCK(parent);
  1027. child->sibling = parent->first_child;
  1028. parent->first_child = child;
  1029. child->parent = parent;
  1030. LWP_UNLOCK(parent);
  1031. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1032. /* parent holds reference to child */
  1033. lwp_ref_inc(parent);
  1034. /* child holds reference to parent */
  1035. lwp_ref_inc(child);
  1036. return 0;
  1037. }
  1038. rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
  1039. {
  1040. struct rt_lwp **lwp_node;
  1041. LWP_LOCK(parent);
  1042. /* detach from children link */
  1043. lwp_node = &parent->first_child;
  1044. while (*lwp_node != child)
  1045. {
  1046. RT_ASSERT(*lwp_node != RT_NULL);
  1047. lwp_node = &(*lwp_node)->sibling;
  1048. }
  1049. (*lwp_node) = child->sibling;
  1050. child->parent = RT_NULL;
  1051. LWP_UNLOCK(parent);
  1052. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1053. lwp_ref_dec(child);
  1054. lwp_ref_dec(parent);
  1055. return 0;
  1056. }
  1057. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1058. {
  1059. int result;
  1060. struct rt_lwp *lwp;
  1061. char *thread_name;
  1062. char *argv_last = argv[argc - 1];
  1063. int bg = 0;
  1064. struct process_aux *aux;
  1065. int tid = 0;
  1066. int ret;
  1067. if (filename == RT_NULL)
  1068. {
  1069. return -EINVAL;
  1070. }
  1071. if (access(filename, X_OK) != 0)
  1072. {
  1073. return -EACCES;
  1074. }
  1075. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID);
  1076. if (lwp == RT_NULL)
  1077. {
  1078. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1079. return -ENOMEM;
  1080. }
  1081. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1082. if ((tid = lwp_tid_get()) == 0)
  1083. {
  1084. lwp_ref_dec(lwp);
  1085. return -ENOMEM;
  1086. }
  1087. #ifdef ARCH_MM_MMU
  1088. if (lwp_user_space_init(lwp, 0) != 0)
  1089. {
  1090. lwp_tid_put(tid);
  1091. lwp_ref_dec(lwp);
  1092. return -ENOMEM;
  1093. }
  1094. #endif
  1095. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1096. {
  1097. argc--;
  1098. bg = 1;
  1099. }
  1100. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1101. {
  1102. lwp_tid_put(tid);
  1103. lwp_ref_dec(lwp);
  1104. return -ENOMEM;
  1105. }
  1106. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1107. #ifdef ARCH_MM_MMU
  1108. if (result == 1)
  1109. {
  1110. /* dynmaic */
  1111. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1112. result = load_ldso(lwp, filename, argv, envp);
  1113. }
  1114. #endif /* ARCH_MM_MMU */
  1115. if (result == RT_EOK)
  1116. {
  1117. rt_thread_t thread = RT_NULL;
  1118. rt_uint32_t priority = 25, tick = 200;
  1119. lwp_copy_stdio_fdt(lwp);
  1120. /* obtain the base name */
  1121. thread_name = strrchr(filename, '/');
  1122. thread_name = thread_name ? thread_name + 1 : filename;
  1123. #ifndef ARCH_MM_MMU
  1124. struct lwp_app_head *app_head = lwp->text_entry;
  1125. if (app_head->priority)
  1126. {
  1127. priority = app_head->priority;
  1128. }
  1129. if (app_head->tick)
  1130. {
  1131. tick = app_head->tick;
  1132. }
  1133. #endif /* not defined ARCH_MM_MMU */
  1134. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1135. LWP_TASK_STACK_SIZE, priority, tick);
  1136. if (thread != RT_NULL)
  1137. {
  1138. struct rt_lwp *self_lwp;
  1139. thread->tid = tid;
  1140. lwp_tid_set_thread(tid, thread);
  1141. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1142. (rt_size_t)thread->stack_addr + thread->stack_size);
  1143. self_lwp = lwp_self();
  1144. if (self_lwp)
  1145. {
  1146. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1147. lwp->__pgrp = tid;
  1148. lwp->session = self_lwp->session;
  1149. /* lwp add to children link */
  1150. lwp_children_register(self_lwp, lwp);
  1151. }
  1152. else
  1153. {
  1154. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1155. lwp->__pgrp = tid;
  1156. }
  1157. if (!bg)
  1158. {
  1159. if (lwp->session == -1)
  1160. {
  1161. struct tty_struct *tty = RT_NULL;
  1162. struct rt_lwp *old_lwp;
  1163. tty = (struct tty_struct *)console_tty_get();
  1164. old_lwp = tty->foreground;
  1165. if (old_lwp)
  1166. {
  1167. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1168. ret = tty_push(&tty->head, old_lwp);
  1169. rt_mutex_release(&tty->lock);
  1170. if (ret < 0)
  1171. {
  1172. lwp_tid_put(tid);
  1173. lwp_ref_dec(lwp);
  1174. LOG_E("malloc fail!\n");
  1175. return -ENOMEM;
  1176. }
  1177. }
  1178. lwp->tty = tty;
  1179. lwp->tty->pgrp = lwp->__pgrp;
  1180. lwp->tty->session = lwp->session;
  1181. lwp->tty->foreground = lwp;
  1182. tcgetattr(1, &stdin_termios);
  1183. old_stdin_termios = stdin_termios;
  1184. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1185. tcsetattr(1, 0, &stdin_termios);
  1186. }
  1187. else
  1188. {
  1189. if (self_lwp != RT_NULL)
  1190. {
  1191. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1192. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1193. rt_mutex_release(&self_lwp->tty->lock);
  1194. if (ret < 0)
  1195. {
  1196. lwp_tid_put(tid);
  1197. lwp_ref_dec(lwp);
  1198. LOG_E("malloc fail!\n");
  1199. return -ENOMEM;
  1200. }
  1201. lwp->tty = self_lwp->tty;
  1202. lwp->tty->pgrp = lwp->__pgrp;
  1203. lwp->tty->session = lwp->session;
  1204. lwp->tty->foreground = lwp;
  1205. }
  1206. else
  1207. {
  1208. lwp->tty = RT_NULL;
  1209. }
  1210. }
  1211. }
  1212. else
  1213. {
  1214. lwp->background = RT_TRUE;
  1215. }
  1216. thread->lwp = lwp;
  1217. #ifndef ARCH_MM_MMU
  1218. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1219. thread->user_stack = app_head->stack_offset ?
  1220. (void *)(app_head->stack_offset -
  1221. app_head->data_offset +
  1222. (uint32_t)lwp->data_entry) : RT_NULL;
  1223. thread->user_stack_size = app_head->stack_size;
  1224. /* init data area */
  1225. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1226. /* init user stack */
  1227. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1228. #endif /* not defined ARCH_MM_MMU */
  1229. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1230. if (debug && rt_dbg_ops)
  1231. {
  1232. lwp->debug = debug;
  1233. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1234. }
  1235. rt_thread_startup(thread);
  1236. return lwp_to_pid(lwp);
  1237. }
  1238. }
  1239. lwp_tid_put(tid);
  1240. lwp_ref_dec(lwp);
  1241. return -RT_ERROR;
  1242. }
  1243. #ifdef RT_USING_MUSLLIBC
  1244. extern char **__environ;
  1245. #else
  1246. char **__environ = 0;
  1247. #endif
  1248. pid_t exec(char *filename, int debug, int argc, char **argv)
  1249. {
  1250. setenv("OS", "RT-Thread", 1);
  1251. return lwp_execve(filename, debug, argc, argv, __environ);
  1252. }
  1253. #ifdef ARCH_MM_MMU
  1254. void lwp_user_setting_save(rt_thread_t thread)
  1255. {
  1256. if (thread)
  1257. {
  1258. thread->thread_idr = arch_get_tidr();
  1259. }
  1260. }
  1261. void lwp_user_setting_restore(rt_thread_t thread)
  1262. {
  1263. if (!thread)
  1264. {
  1265. return;
  1266. }
  1267. #if !defined(ARCH_RISCV64)
  1268. /* tidr will be set in RESTORE_ALL in risc-v */
  1269. arch_set_tidr(thread->thread_idr);
  1270. #endif
  1271. if (rt_dbg_ops)
  1272. {
  1273. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1274. if (l != 0)
  1275. {
  1276. rt_hw_set_process_id((size_t)l->pid);
  1277. }
  1278. else
  1279. {
  1280. rt_hw_set_process_id(0);
  1281. }
  1282. if (l && l->debug)
  1283. {
  1284. uint32_t step_type = 0;
  1285. step_type = dbg_step_type();
  1286. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1287. {
  1288. dbg_activate_step();
  1289. }
  1290. else
  1291. {
  1292. dbg_deactivate_step();
  1293. }
  1294. }
  1295. }
  1296. }
  1297. #endif /* ARCH_MM_MMU */
  1298. void lwp_uthread_ctx_save(void *ctx)
  1299. {
  1300. rt_thread_t thread;
  1301. thread = rt_thread_self();
  1302. thread->user_ctx.ctx = ctx;
  1303. }
  1304. void lwp_uthread_ctx_restore(void)
  1305. {
  1306. rt_thread_t thread;
  1307. thread = rt_thread_self();
  1308. thread->user_ctx.ctx = RT_NULL;
  1309. }
  1310. rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
  1311. {
  1312. rt_err_t rc = -RT_ERROR;
  1313. long nesting = 0;
  1314. char **argv;
  1315. rt_lwp_t lwp;
  1316. if (uthread->lwp)
  1317. {
  1318. lwp = uthread->lwp;
  1319. argv = lwp_get_command_line_args(lwp);
  1320. if (argv)
  1321. {
  1322. LOG_RAW("please use: addr2line -e %s -a -f", argv[0]);
  1323. lwp_free_command_line_args(argv);
  1324. }
  1325. else
  1326. {
  1327. LOG_RAW("please use: addr2line -e %s -a -f", lwp->cmd);
  1328. }
  1329. while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
  1330. {
  1331. LOG_RAW(" 0x%lx", frame->pc);
  1332. if (rt_hw_backtrace_frame_unwind(uthread, frame))
  1333. {
  1334. break;
  1335. }
  1336. nesting++;
  1337. }
  1338. LOG_RAW("\n");
  1339. rc = RT_EOK;
  1340. }
  1341. return rc;
  1342. }
  1343. void rt_update_process_times(void)
  1344. {
  1345. struct rt_thread *thread;
  1346. #ifdef RT_USING_SMP
  1347. struct rt_cpu* pcpu;
  1348. pcpu = rt_cpu_self();
  1349. #endif
  1350. thread = rt_thread_self();
  1351. if (!IS_USER_MODE(thread))
  1352. {
  1353. thread->user_time += 1;
  1354. #ifdef RT_USING_SMP
  1355. pcpu->cpu_stat.user += 1;
  1356. #endif
  1357. }
  1358. else
  1359. {
  1360. thread->system_time += 1;
  1361. #ifdef RT_USING_SMP
  1362. if (thread == pcpu->idle_thread)
  1363. {
  1364. pcpu->cpu_stat.idle += 1;
  1365. }
  1366. else
  1367. {
  1368. pcpu->cpu_stat.system += 1;
  1369. }
  1370. #endif
  1371. }
  1372. }