lwp.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. * 2023-10-16 Shell Support a new backtrace framework
  15. */
  16. #define DBG_TAG "lwp"
  17. #define DBG_LVL DBG_WARNING
  18. #include <rtdbg.h>
  19. #include <rthw.h>
  20. #include <rtthread.h>
  21. #include <dfs_file.h>
  22. #include <unistd.h>
  23. #include <stdio.h> /* rename() */
  24. #include <fcntl.h>
  25. #include <sys/stat.h>
  26. #include <sys/statfs.h> /* statfs() */
  27. #include <lwp_elf.h>
  28. #ifndef RT_USING_DFS
  29. #error "lwp need file system(RT_USING_DFS)"
  30. #endif
  31. #include "lwp_internal.h"
  32. #include "lwp_arch.h"
  33. #include "lwp_arch_comm.h"
  34. #include "lwp_signal.h"
  35. #include "lwp_dbg.h"
  36. #include "console.h"
  37. #ifdef ARCH_MM_MMU
  38. #include <lwp_user_mm.h>
  39. #endif /* end of ARCH_MM_MMU */
  40. #ifndef O_DIRECTORY
  41. #define O_DIRECTORY 0x200000
  42. #endif
  43. #ifndef O_BINARY
  44. #define O_BINARY 0x10000
  45. #endif
  46. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  47. #ifdef DFS_USING_WORKDIR
  48. extern char working_directory[];
  49. #endif
  50. static struct termios stdin_termios, old_stdin_termios;
  51. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  52. struct termios *get_old_termios(void)
  53. {
  54. return &old_stdin_termios;
  55. }
  56. int lwp_component_init(void)
  57. {
  58. int rc;
  59. if ((rc = lwp_tid_init()) != RT_EOK)
  60. {
  61. LOG_E("%s: lwp_component_init() failed", __func__);
  62. }
  63. else if ((rc = lwp_pid_init()) != RT_EOK)
  64. {
  65. LOG_E("%s: lwp_pid_init() failed", __func__);
  66. }
  67. return rc;
  68. }
  69. INIT_COMPONENT_EXPORT(lwp_component_init);
  70. void lwp_setcwd(char *buf)
  71. {
  72. struct rt_lwp *lwp = RT_NULL;
  73. if(strlen(buf) >= DFS_PATH_MAX)
  74. {
  75. rt_kprintf("buf too long!\n");
  76. return ;
  77. }
  78. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  79. if (lwp)
  80. {
  81. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  82. }
  83. else
  84. {
  85. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  86. }
  87. return ;
  88. }
  89. char *lwp_getcwd(void)
  90. {
  91. char *dir_buf = RT_NULL;
  92. struct rt_lwp *lwp = RT_NULL;
  93. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  94. if (lwp)
  95. {
  96. if(lwp->working_directory[0] != '/')
  97. {
  98. dir_buf = &working_directory[0];
  99. }
  100. else
  101. {
  102. dir_buf = &lwp->working_directory[0];
  103. }
  104. }
  105. else
  106. dir_buf = &working_directory[0];
  107. return dir_buf;
  108. }
  109. /**
  110. * RT-Thread light-weight process
  111. */
  112. void lwp_set_kernel_sp(uint32_t *sp)
  113. {
  114. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  115. }
  116. uint32_t *lwp_get_kernel_sp(void)
  117. {
  118. #ifdef ARCH_MM_MMU
  119. return (uint32_t *)rt_thread_self()->sp;
  120. #else
  121. uint32_t* kernel_sp;
  122. extern rt_uint32_t rt_interrupt_from_thread;
  123. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  124. if (rt_thread_switch_interrupt_flag)
  125. {
  126. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  127. }
  128. else
  129. {
  130. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  131. }
  132. return kernel_sp;
  133. #endif
  134. }
  135. #ifdef ARCH_MM_MMU
  136. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  137. {
  138. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  139. int *args;
  140. char *str;
  141. char *str_k;
  142. char **new_argve;
  143. int i;
  144. int len;
  145. size_t *args_k;
  146. struct process_aux *aux;
  147. size_t prot = PROT_READ | PROT_WRITE;
  148. size_t flags = MAP_FIXED | MAP_PRIVATE;
  149. size_t zero = 0;
  150. for (i = 0; i < argc; i++)
  151. {
  152. size += (rt_strlen(argv[i]) + 1);
  153. }
  154. size += (sizeof(size_t) * argc);
  155. i = 0;
  156. if (envp)
  157. {
  158. while (envp[i] != 0)
  159. {
  160. size += (rt_strlen(envp[i]) + 1);
  161. size += sizeof(size_t);
  162. i++;
  163. }
  164. }
  165. /* for aux */
  166. size += sizeof(struct process_aux);
  167. if (size > ARCH_PAGE_SIZE)
  168. {
  169. return RT_NULL;
  170. }
  171. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  172. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  173. {
  174. return RT_NULL;
  175. }
  176. args_k = (size_t *)lwp_v2p(lwp, args);
  177. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  178. /* argc, argv[], 0, envp[], 0 , aux[] */
  179. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  180. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  181. new_argve = (char **)&args_k[1];
  182. args_k[0] = argc;
  183. for (i = 0; i < argc; i++)
  184. {
  185. len = rt_strlen(argv[i]) + 1;
  186. new_argve[i] = str;
  187. lwp_memcpy(str_k, argv[i], len);
  188. str += len;
  189. str_k += len;
  190. }
  191. new_argve[i] = 0;
  192. i++;
  193. new_argve[i] = 0;
  194. if (envp)
  195. {
  196. int j;
  197. for (j = 0; envp[j] != 0; j++)
  198. {
  199. len = rt_strlen(envp[j]) + 1;
  200. new_argve[i] = str;
  201. lwp_memcpy(str_k, envp[j], len);
  202. str += len;
  203. str_k += len;
  204. i++;
  205. }
  206. new_argve[i] = 0;
  207. }
  208. i++;
  209. /* aux */
  210. aux = (struct process_aux *)(new_argve + i);
  211. aux->item[0].key = AT_EXECFN;
  212. aux->item[0].value = (size_t)(size_t)new_argve[0];
  213. i += AUX_ARRAY_ITEMS_NR * 2;
  214. new_argve[i] = 0;
  215. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  216. lwp->args = args;
  217. return aux;
  218. }
  219. #else
  220. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  221. {
  222. #ifdef ARCH_MM_MMU
  223. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  224. struct process_aux *aux;
  225. #else
  226. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  227. #endif /* ARCH_MM_MMU */
  228. int *args;
  229. char *str;
  230. char **new_argve;
  231. int i;
  232. int len;
  233. for (i = 0; i < argc; i++)
  234. {
  235. size += (rt_strlen(argv[i]) + 1);
  236. }
  237. size += (sizeof(int) * argc);
  238. i = 0;
  239. if (envp)
  240. {
  241. while (envp[i] != 0)
  242. {
  243. size += (rt_strlen(envp[i]) + 1);
  244. size += sizeof(int);
  245. i++;
  246. }
  247. }
  248. #ifdef ARCH_MM_MMU
  249. /* for aux */
  250. size += sizeof(struct process_aux);
  251. args = (int *)rt_malloc(size);
  252. if (args == RT_NULL)
  253. {
  254. return RT_NULL;
  255. }
  256. /* argc, argv[], 0, envp[], 0 */
  257. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  258. #else
  259. args = (int *)rt_malloc(size);
  260. if (args == RT_NULL)
  261. {
  262. return RT_NULL;
  263. }
  264. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  265. #endif /* ARCH_MM_MMU */
  266. new_argve = (char **)&args[1];
  267. args[0] = argc;
  268. for (i = 0; i < argc; i++)
  269. {
  270. len = rt_strlen(argv[i]) + 1;
  271. new_argve[i] = str;
  272. lwp_memcpy(str, argv[i], len);
  273. str += len;
  274. }
  275. new_argve[i] = 0;
  276. i++;
  277. new_argve[i] = 0;
  278. if (envp)
  279. {
  280. int j;
  281. for (j = 0; envp[j] != 0; j++)
  282. {
  283. len = rt_strlen(envp[j]) + 1;
  284. new_argve[i] = str;
  285. lwp_memcpy(str, envp[j], len);
  286. str += len;
  287. i++;
  288. }
  289. new_argve[i] = 0;
  290. }
  291. #ifdef ARCH_MM_MMU
  292. /* aux */
  293. aux = (struct process_aux *)(new_argve + i);
  294. aux->item[0].key = AT_EXECFN;
  295. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  296. i += AUX_ARRAY_ITEMS_NR * 2;
  297. new_argve[i] = 0;
  298. lwp->args = args;
  299. return aux;
  300. #else
  301. lwp->args = args;
  302. lwp->args_length = size;
  303. return (struct process_aux *)(new_argve + i);
  304. #endif /* ARCH_MM_MMU */
  305. }
  306. #endif
  307. #ifdef ARCH_MM_MMU
  308. #define check_off(voff, vlen) \
  309. do \
  310. { \
  311. if (voff > vlen) \
  312. { \
  313. result = -RT_ERROR; \
  314. goto _exit; \
  315. } \
  316. } while (0)
  317. #define check_read(vrlen, vrlen_want) \
  318. do \
  319. { \
  320. if (vrlen < vrlen_want) \
  321. { \
  322. result = -RT_ERROR; \
  323. goto _exit; \
  324. } \
  325. } while (0)
  326. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  327. {
  328. size_t read_block = 0;
  329. while (nmemb)
  330. {
  331. size_t count;
  332. count = read(fd, ptr, size * nmemb) / size;
  333. if (count < nmemb)
  334. {
  335. LOG_E("ERROR: file size error!");
  336. break;
  337. }
  338. ptr = (void *)((uint8_t *)ptr + (count * size));
  339. nmemb -= count;
  340. read_block += count;
  341. }
  342. return read_block;
  343. }
  344. typedef struct
  345. {
  346. Elf_Word st_name;
  347. Elf_Addr st_value;
  348. Elf_Word st_size;
  349. unsigned char st_info;
  350. unsigned char st_other;
  351. Elf_Half st_shndx;
  352. } Elf_sym;
  353. #ifdef ARCH_MM_MMU
  354. struct map_range
  355. {
  356. void *start;
  357. size_t size;
  358. };
  359. static void expand_map_range(struct map_range *m, void *start, size_t size)
  360. {
  361. if (!m->start)
  362. {
  363. m->start = start;
  364. m->size = size;
  365. }
  366. else
  367. {
  368. void *end = (void *)((char*)start + size);
  369. void *mend = (void *)((char*)m->start + m->size);
  370. if (m->start > start)
  371. {
  372. m->start = start;
  373. }
  374. if (mend < end)
  375. {
  376. mend = end;
  377. }
  378. m->size = (char *)mend - (char *)m->start;
  379. }
  380. }
  381. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  382. {
  383. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  384. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  385. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  386. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  387. if (m1->size)
  388. {
  389. if (m1_start < (void *)USER_LOAD_VADDR)
  390. {
  391. return -1;
  392. }
  393. if (m1_start > (void *)USER_STACK_VSTART)
  394. {
  395. return -1;
  396. }
  397. if (m1_end < (void *)USER_LOAD_VADDR)
  398. {
  399. return -1;
  400. }
  401. if (m1_end > (void *)USER_STACK_VSTART)
  402. {
  403. return -1;
  404. }
  405. }
  406. if (m2->size)
  407. {
  408. if (m2_start < (void *)USER_LOAD_VADDR)
  409. {
  410. return -1;
  411. }
  412. if (m2_start > (void *)USER_STACK_VSTART)
  413. {
  414. return -1;
  415. }
  416. if (m2_end < (void *)USER_LOAD_VADDR)
  417. {
  418. return -1;
  419. }
  420. if (m2_end > (void *)USER_STACK_VSTART)
  421. {
  422. return -1;
  423. }
  424. }
  425. if ((m1->size != 0) && (m2->size != 0))
  426. {
  427. if (m1_start < m2_start)
  428. {
  429. if (m1_end > m2_start)
  430. {
  431. return -1;
  432. }
  433. }
  434. else /* m2_start <= m1_start */
  435. {
  436. if (m2_end > m1_start)
  437. {
  438. return -1;
  439. }
  440. }
  441. }
  442. return 0;
  443. }
  444. #endif
  445. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  446. {
  447. uint32_t i;
  448. uint32_t off = 0;
  449. size_t load_off = 0;
  450. char *p_section_str = 0;
  451. Elf_sym *dynsym = 0;
  452. Elf_Ehdr eheader;
  453. Elf_Phdr pheader;
  454. Elf_Shdr sheader;
  455. int result = RT_EOK;
  456. uint32_t magic;
  457. size_t read_len;
  458. void *got_start = 0;
  459. size_t got_size = 0;
  460. void *rel_dyn_start = 0;
  461. size_t rel_dyn_size = 0;
  462. size_t dynsym_off = 0;
  463. size_t dynsym_size = 0;
  464. #ifdef ARCH_MM_MMU
  465. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  466. void *pa, *va;
  467. void *va_self;
  468. #endif
  469. if (len < sizeof eheader)
  470. {
  471. LOG_E("len < sizeof eheader!");
  472. return -RT_ERROR;
  473. }
  474. lseek(fd, 0, SEEK_SET);
  475. read_len = load_fread(&magic, 1, sizeof magic, fd);
  476. check_read(read_len, sizeof magic);
  477. if (memcmp(elf_magic, &magic, 4) != 0)
  478. {
  479. LOG_E("elf_magic not same, magic:0x%x!", magic);
  480. return -RT_ERROR;
  481. }
  482. lseek(fd, off, SEEK_SET);
  483. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  484. check_read(read_len, sizeof eheader);
  485. #ifndef ARCH_CPU_64BIT
  486. if (eheader.e_ident[4] != 1)
  487. { /* not 32bit */
  488. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  489. return -RT_ERROR;
  490. }
  491. #else
  492. if (eheader.e_ident[4] != 2)
  493. { /* not 64bit */
  494. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  495. return -RT_ERROR;
  496. }
  497. #endif
  498. if (eheader.e_ident[6] != 1)
  499. { /* ver not 1 */
  500. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  501. return -RT_ERROR;
  502. }
  503. if ((eheader.e_type != ET_DYN)
  504. #ifdef ARCH_MM_MMU
  505. && (eheader.e_type != ET_EXEC)
  506. #endif
  507. )
  508. {
  509. /* not pie or exec elf */
  510. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  511. return -RT_ERROR;
  512. }
  513. #ifdef ARCH_MM_MMU
  514. {
  515. off = eheader.e_phoff;
  516. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  517. {
  518. check_off(off, len);
  519. lseek(fd, off, SEEK_SET);
  520. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  521. check_read(read_len, sizeof pheader);
  522. if (pheader.p_type == PT_DYNAMIC)
  523. {
  524. /* load ld.so */
  525. return 1; /* 1 means dynamic */
  526. }
  527. }
  528. }
  529. #endif
  530. if (eheader.e_entry != 0)
  531. {
  532. if ((eheader.e_entry != USER_LOAD_VADDR)
  533. && (eheader.e_entry != LDSO_LOAD_VADDR))
  534. {
  535. /* the entry is invalidate */
  536. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  537. return -RT_ERROR;
  538. }
  539. }
  540. { /* load aux */
  541. uint8_t *process_header;
  542. size_t process_header_size;
  543. off = eheader.e_phoff;
  544. process_header_size = eheader.e_phnum * sizeof pheader;
  545. #ifdef ARCH_MM_MMU
  546. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  547. {
  548. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  549. return -RT_ERROR;
  550. }
  551. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  552. if (!va)
  553. {
  554. LOG_E("lwp map user failed!");
  555. return -RT_ERROR;
  556. }
  557. pa = lwp_v2p(lwp, va);
  558. process_header = (uint8_t *)pa - PV_OFFSET;
  559. #else
  560. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  561. if (!process_header)
  562. {
  563. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  564. return -RT_ERROR;
  565. }
  566. #endif
  567. check_off(off, len);
  568. lseek(fd, off, SEEK_SET);
  569. read_len = load_fread(process_header, 1, process_header_size, fd);
  570. check_read(read_len, process_header_size);
  571. #ifdef ARCH_MM_MMU
  572. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  573. #endif
  574. aux->item[1].key = AT_PAGESZ;
  575. #ifdef ARCH_MM_MMU
  576. aux->item[1].value = ARCH_PAGE_SIZE;
  577. #else
  578. aux->item[1].value = RT_MM_PAGE_SIZE;
  579. #endif
  580. aux->item[2].key = AT_RANDOM;
  581. {
  582. uint32_t random_value = rt_tick_get();
  583. uint8_t *random;
  584. #ifdef ARCH_MM_MMU
  585. uint8_t *krandom;
  586. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  587. krandom = (uint8_t *)lwp_v2p(lwp, random);
  588. krandom = (uint8_t *)krandom - PV_OFFSET;
  589. rt_memcpy(krandom, &random_value, sizeof random_value);
  590. #else
  591. random = (uint8_t *)(process_header + process_header_size);
  592. rt_memcpy(random, &random_value, sizeof random_value);
  593. #endif
  594. aux->item[2].value = (size_t)random;
  595. }
  596. aux->item[3].key = AT_PHDR;
  597. #ifdef ARCH_MM_MMU
  598. aux->item[3].value = (size_t)va;
  599. #else
  600. aux->item[3].value = (size_t)process_header;
  601. #endif
  602. aux->item[4].key = AT_PHNUM;
  603. aux->item[4].value = eheader.e_phnum;
  604. aux->item[5].key = AT_PHENT;
  605. aux->item[5].value = sizeof pheader;
  606. #ifdef ARCH_MM_MMU
  607. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  608. #endif
  609. }
  610. if (load_addr)
  611. {
  612. load_off = (size_t)load_addr;
  613. }
  614. #ifdef ARCH_MM_MMU
  615. else
  616. {
  617. /* map user */
  618. off = eheader.e_shoff;
  619. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  620. {
  621. check_off(off, len);
  622. lseek(fd, off, SEEK_SET);
  623. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  624. check_read(read_len, sizeof sheader);
  625. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  626. {
  627. continue;
  628. }
  629. switch (sheader.sh_type)
  630. {
  631. case SHT_PROGBITS:
  632. if ((sheader.sh_flags & SHF_WRITE) == 0)
  633. {
  634. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  635. }
  636. else
  637. {
  638. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  639. }
  640. break;
  641. case SHT_NOBITS:
  642. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  643. break;
  644. default:
  645. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  646. break;
  647. }
  648. }
  649. if (user_area[0].size == 0)
  650. {
  651. /* no code */
  652. result = -RT_ERROR;
  653. goto _exit;
  654. }
  655. if (user_area[0].start == NULL)
  656. {
  657. /* DYN */
  658. load_off = USER_LOAD_VADDR;
  659. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  660. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  661. }
  662. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  663. {
  664. result = -RT_ERROR;
  665. goto _exit;
  666. }
  667. /* text and data */
  668. for (i = 0; i < 2; i++)
  669. {
  670. if (user_area[i].size != 0)
  671. {
  672. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  673. if (!va || (va != user_area[i].start))
  674. {
  675. result = -RT_ERROR;
  676. goto _exit;
  677. }
  678. }
  679. }
  680. lwp->text_size = user_area[0].size;
  681. }
  682. #else
  683. else
  684. {
  685. size_t start = -1UL;
  686. size_t end = 0UL;
  687. size_t total_size;
  688. off = eheader.e_shoff;
  689. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  690. {
  691. check_off(off, len);
  692. lseek(fd, off, SEEK_SET);
  693. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  694. check_read(read_len, sizeof sheader);
  695. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  696. {
  697. continue;
  698. }
  699. switch (sheader.sh_type)
  700. {
  701. case SHT_PROGBITS:
  702. case SHT_NOBITS:
  703. if (start > sheader.sh_addr)
  704. {
  705. start = sheader.sh_addr;
  706. }
  707. if (sheader.sh_addr + sheader.sh_size > end)
  708. {
  709. end = sheader.sh_addr + sheader.sh_size;
  710. }
  711. break;
  712. default:
  713. break;
  714. }
  715. }
  716. total_size = end - start;
  717. #ifdef RT_USING_CACHE
  718. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  719. #else
  720. load_off = (size_t)rt_malloc(total_size);
  721. #endif
  722. if (load_off == 0)
  723. {
  724. LOG_E("alloc text memory faild!");
  725. result = -RT_ENOMEM;
  726. goto _exit;
  727. }
  728. else
  729. {
  730. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  731. }
  732. lwp->load_off = load_off; /* for free */
  733. lwp->text_size = total_size;
  734. }
  735. #endif
  736. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  737. off = eheader.e_phoff;
  738. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  739. {
  740. check_off(off, len);
  741. lseek(fd, off, SEEK_SET);
  742. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  743. check_read(read_len, sizeof pheader);
  744. if (pheader.p_type == PT_LOAD)
  745. {
  746. if (pheader.p_filesz > pheader.p_memsz)
  747. {
  748. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  749. return -RT_ERROR;
  750. }
  751. check_off(pheader.p_offset, len);
  752. lseek(fd, pheader.p_offset, SEEK_SET);
  753. #ifdef ARCH_MM_MMU
  754. {
  755. uint32_t size = pheader.p_filesz;
  756. size_t tmp_len = 0;
  757. va = (void *)(pheader.p_vaddr + load_addr);
  758. read_len = 0;
  759. while (size)
  760. {
  761. pa = lwp_v2p(lwp, va);
  762. va_self = (void *)((char *)pa - PV_OFFSET);
  763. LOG_D("va_self = %p pa = %p", va_self, pa);
  764. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  765. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  766. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  767. read_len += tmp_len;
  768. size -= tmp_len;
  769. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  770. }
  771. }
  772. #else
  773. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  774. #endif
  775. check_read(read_len, pheader.p_filesz);
  776. if (pheader.p_filesz < pheader.p_memsz)
  777. {
  778. #ifdef ARCH_MM_MMU
  779. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  780. uint32_t size_s;
  781. uint32_t off;
  782. off = pheader.p_filesz & ARCH_PAGE_MASK;
  783. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  784. while (size)
  785. {
  786. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  787. pa = lwp_v2p(lwp, va);
  788. va_self = (void *)((char *)pa - PV_OFFSET);
  789. memset((void *)((char *)va_self + off), 0, size_s);
  790. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  791. off = 0;
  792. size -= size_s;
  793. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  794. }
  795. #else
  796. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  797. #endif
  798. }
  799. }
  800. }
  801. /* relocate */
  802. if (eheader.e_type == ET_DYN)
  803. {
  804. /* section info */
  805. off = eheader.e_shoff;
  806. /* find section string table */
  807. check_off(off, len);
  808. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  809. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  810. check_read(read_len, sizeof sheader);
  811. p_section_str = (char *)rt_malloc(sheader.sh_size);
  812. if (!p_section_str)
  813. {
  814. LOG_E("out of memory!");
  815. result = -ENOMEM;
  816. goto _exit;
  817. }
  818. check_off(sheader.sh_offset, len);
  819. lseek(fd, sheader.sh_offset, SEEK_SET);
  820. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  821. check_read(read_len, sheader.sh_size);
  822. check_off(off, len);
  823. lseek(fd, off, SEEK_SET);
  824. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  825. {
  826. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  827. check_read(read_len, sizeof sheader);
  828. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  829. {
  830. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  831. got_size = (size_t)sheader.sh_size;
  832. }
  833. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  834. {
  835. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  836. rel_dyn_size = (size_t)sheader.sh_size;
  837. }
  838. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  839. {
  840. dynsym_off = (size_t)sheader.sh_offset;
  841. dynsym_size = (size_t)sheader.sh_size;
  842. }
  843. }
  844. /* reloc */
  845. if (dynsym_size)
  846. {
  847. dynsym = rt_malloc(dynsym_size);
  848. if (!dynsym)
  849. {
  850. LOG_E("ERROR: Malloc error!");
  851. result = -ENOMEM;
  852. goto _exit;
  853. }
  854. check_off(dynsym_off, len);
  855. lseek(fd, dynsym_off, SEEK_SET);
  856. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  857. check_read(read_len, dynsym_size);
  858. }
  859. #ifdef ARCH_MM_MMU
  860. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  861. #else
  862. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  863. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  864. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  865. #endif
  866. }
  867. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  868. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  869. _exit:
  870. if (dynsym)
  871. {
  872. rt_free(dynsym);
  873. }
  874. if (p_section_str)
  875. {
  876. rt_free(p_section_str);
  877. }
  878. if (result != RT_EOK)
  879. {
  880. LOG_E("lwp load faild, %d", result);
  881. }
  882. return result;
  883. }
  884. #endif /* ARCH_MM_MMU */
  885. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  886. {
  887. uint8_t *ptr;
  888. int ret = -1;
  889. int len;
  890. int fd = -1;
  891. /* check file name */
  892. RT_ASSERT(filename != RT_NULL);
  893. /* check lwp control block */
  894. RT_ASSERT(lwp != RT_NULL);
  895. /* copy file name to process name */
  896. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  897. if (load_addr != RT_NULL)
  898. {
  899. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  900. ptr = load_addr;
  901. }
  902. else
  903. {
  904. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  905. ptr = RT_NULL;
  906. }
  907. fd = open(filename, O_BINARY | O_RDONLY, 0);
  908. if (fd < 0)
  909. {
  910. LOG_E("ERROR: Can't open elf file %s!", filename);
  911. goto out;
  912. }
  913. len = lseek(fd, 0, SEEK_END);
  914. if (len < 0)
  915. {
  916. LOG_E("ERROR: File %s size error!", filename);
  917. goto out;
  918. }
  919. lseek(fd, 0, SEEK_SET);
  920. ret = load_elf(fd, len, lwp, ptr, aux);
  921. if ((ret != RT_EOK) && (ret != 1))
  922. {
  923. LOG_E("lwp load ret = %d", ret);
  924. }
  925. out:
  926. if (fd > 0)
  927. {
  928. close(fd);
  929. }
  930. return ret;
  931. }
  932. /* lwp-thread clean up routine */
  933. void lwp_cleanup(struct rt_thread *tid)
  934. {
  935. struct rt_lwp *lwp;
  936. if (tid == NULL)
  937. {
  938. LOG_I("%s: invalid parameter tid == NULL", __func__);
  939. return;
  940. }
  941. else
  942. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  943. /**
  944. * Brief: lwp thread cleanup
  945. *
  946. * Note: Critical Section
  947. * - thread control block (RW. It's ensured that no one else can access tcb
  948. * other than itself)
  949. */
  950. lwp = (struct rt_lwp *)tid->lwp;
  951. lwp_thread_signal_detach(&tid->signal);
  952. /* tty will be release in lwp_ref_dec() if ref is cleared */
  953. lwp_ref_dec(lwp);
  954. return;
  955. }
  956. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  957. {
  958. struct dfs_file *d;
  959. struct dfs_fdtable *lwp_fdt;
  960. lwp_fdt = &lwp->fdt;
  961. /* init 4 fds */
  962. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  963. if (lwp_fdt->fds)
  964. {
  965. lwp_fdt->maxfd = 4;
  966. d = fd_get(0);
  967. fd_associate(lwp_fdt, 0, d);
  968. d = fd_get(1);
  969. fd_associate(lwp_fdt, 1, d);
  970. d = fd_get(2);
  971. fd_associate(lwp_fdt, 2, d);
  972. }
  973. return;
  974. }
  975. static void _lwp_thread_entry(void *parameter)
  976. {
  977. rt_thread_t tid;
  978. struct rt_lwp *lwp;
  979. tid = rt_thread_self();
  980. lwp = (struct rt_lwp *)tid->lwp;
  981. tid->cleanup = lwp_cleanup;
  982. tid->user_stack = RT_NULL;
  983. if (lwp->debug)
  984. {
  985. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  986. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  987. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  988. icache_invalid_all();
  989. }
  990. /**
  991. * without ASID support, it will be a special case when trying to run application
  992. * and exit multiple times and a same page frame allocated to it bound to
  993. * different text segment. Then we are in a situation where icache contains
  994. * out-of-dated data and must be handle by the running core itself.
  995. * with ASID support, this should be a rare case that ASID & page frame both
  996. * identical to previous running application.
  997. *
  998. * For a new application loaded into memory, icache are seen as empty. And there
  999. * should be nothing in the icache entry to match. So this icache invalidation
  1000. * operation should have barely influence.
  1001. */
  1002. rt_hw_icache_invalidate_all();
  1003. #ifdef ARCH_MM_MMU
  1004. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  1005. #else
  1006. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  1007. #endif /* ARCH_MM_MMU */
  1008. }
  1009. struct rt_lwp *lwp_self(void)
  1010. {
  1011. rt_thread_t tid;
  1012. tid = rt_thread_self();
  1013. if (tid)
  1014. {
  1015. return (struct rt_lwp *)tid->lwp;
  1016. }
  1017. return RT_NULL;
  1018. }
  1019. rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
  1020. {
  1021. /* lwp add to children link */
  1022. LWP_LOCK(parent);
  1023. child->sibling = parent->first_child;
  1024. parent->first_child = child;
  1025. child->parent = parent;
  1026. LWP_UNLOCK(parent);
  1027. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1028. return 0;
  1029. }
  1030. rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
  1031. {
  1032. struct rt_lwp **lwp_node;
  1033. LWP_LOCK(parent);
  1034. /* detach from children link */
  1035. lwp_node = &parent->first_child;
  1036. while (*lwp_node != child)
  1037. {
  1038. RT_ASSERT(*lwp_node != RT_NULL);
  1039. lwp_node = &(*lwp_node)->sibling;
  1040. }
  1041. (*lwp_node) = child->sibling;
  1042. child->parent = RT_NULL;
  1043. LWP_UNLOCK(parent);
  1044. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1045. return 0;
  1046. }
  1047. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1048. {
  1049. int result;
  1050. struct rt_lwp *lwp;
  1051. char *thread_name;
  1052. char *argv_last = argv[argc - 1];
  1053. int bg = 0;
  1054. struct process_aux *aux;
  1055. int tid = 0;
  1056. int ret;
  1057. if (filename == RT_NULL)
  1058. {
  1059. return -RT_ERROR;
  1060. }
  1061. if (access(filename, X_OK) != 0)
  1062. {
  1063. return -EACCES;
  1064. }
  1065. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID);
  1066. if (lwp == RT_NULL)
  1067. {
  1068. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1069. return -RT_ENOMEM;
  1070. }
  1071. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1072. if ((tid = lwp_tid_get()) == 0)
  1073. {
  1074. lwp_ref_dec(lwp);
  1075. return -ENOMEM;
  1076. }
  1077. #ifdef ARCH_MM_MMU
  1078. if (lwp_user_space_init(lwp, 0) != 0)
  1079. {
  1080. lwp_tid_put(tid);
  1081. lwp_ref_dec(lwp);
  1082. return -ENOMEM;
  1083. }
  1084. #endif
  1085. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1086. {
  1087. argc--;
  1088. bg = 1;
  1089. }
  1090. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1091. {
  1092. lwp_tid_put(tid);
  1093. lwp_ref_dec(lwp);
  1094. return -ENOMEM;
  1095. }
  1096. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1097. #ifdef ARCH_MM_MMU
  1098. if (result == 1)
  1099. {
  1100. /* dynmaic */
  1101. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1102. result = load_ldso(lwp, filename, argv, envp);
  1103. }
  1104. #endif /* ARCH_MM_MMU */
  1105. if (result == RT_EOK)
  1106. {
  1107. rt_thread_t thread = RT_NULL;
  1108. rt_uint32_t priority = 25, tick = 200;
  1109. lwp_copy_stdio_fdt(lwp);
  1110. /* obtain the base name */
  1111. thread_name = strrchr(filename, '/');
  1112. thread_name = thread_name ? thread_name + 1 : filename;
  1113. #ifndef ARCH_MM_MMU
  1114. struct lwp_app_head *app_head = lwp->text_entry;
  1115. if (app_head->priority)
  1116. {
  1117. priority = app_head->priority;
  1118. }
  1119. if (app_head->tick)
  1120. {
  1121. tick = app_head->tick;
  1122. }
  1123. #endif /* not defined ARCH_MM_MMU */
  1124. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1125. LWP_TASK_STACK_SIZE, priority, tick);
  1126. if (thread != RT_NULL)
  1127. {
  1128. struct rt_lwp *self_lwp;
  1129. thread->tid = tid;
  1130. lwp_tid_set_thread(tid, thread);
  1131. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1132. (rt_size_t)thread->stack_addr + thread->stack_size);
  1133. self_lwp = lwp_self();
  1134. if (self_lwp)
  1135. {
  1136. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1137. lwp->__pgrp = tid;
  1138. lwp->session = self_lwp->session;
  1139. /* lwp add to children link */
  1140. lwp_children_register(self_lwp, lwp);
  1141. }
  1142. else
  1143. {
  1144. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1145. lwp->__pgrp = tid;
  1146. }
  1147. if (!bg)
  1148. {
  1149. if (lwp->session == -1)
  1150. {
  1151. struct tty_struct *tty = RT_NULL;
  1152. struct rt_lwp *old_lwp;
  1153. tty = (struct tty_struct *)console_tty_get();
  1154. old_lwp = tty->foreground;
  1155. if (old_lwp)
  1156. {
  1157. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1158. ret = tty_push(&tty->head, old_lwp);
  1159. rt_mutex_release(&tty->lock);
  1160. if (ret < 0)
  1161. {
  1162. lwp_tid_put(tid);
  1163. lwp_ref_dec(lwp);
  1164. LOG_E("malloc fail!\n");
  1165. return -ENOMEM;
  1166. }
  1167. }
  1168. lwp->tty = tty;
  1169. lwp->tty->pgrp = lwp->__pgrp;
  1170. lwp->tty->session = lwp->session;
  1171. lwp->tty->foreground = lwp;
  1172. tcgetattr(1, &stdin_termios);
  1173. old_stdin_termios = stdin_termios;
  1174. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1175. tcsetattr(1, 0, &stdin_termios);
  1176. }
  1177. else
  1178. {
  1179. if (self_lwp != RT_NULL)
  1180. {
  1181. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1182. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1183. rt_mutex_release(&self_lwp->tty->lock);
  1184. if (ret < 0)
  1185. {
  1186. lwp_tid_put(tid);
  1187. lwp_ref_dec(lwp);
  1188. LOG_E("malloc fail!\n");
  1189. return -ENOMEM;
  1190. }
  1191. lwp->tty = self_lwp->tty;
  1192. lwp->tty->pgrp = lwp->__pgrp;
  1193. lwp->tty->session = lwp->session;
  1194. lwp->tty->foreground = lwp;
  1195. }
  1196. else
  1197. {
  1198. lwp->tty = RT_NULL;
  1199. }
  1200. }
  1201. }
  1202. else
  1203. {
  1204. lwp->background = RT_TRUE;
  1205. }
  1206. thread->lwp = lwp;
  1207. #ifndef ARCH_MM_MMU
  1208. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1209. thread->user_stack = app_head->stack_offset ?
  1210. (void *)(app_head->stack_offset -
  1211. app_head->data_offset +
  1212. (uint32_t)lwp->data_entry) : RT_NULL;
  1213. thread->user_stack_size = app_head->stack_size;
  1214. /* init data area */
  1215. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1216. /* init user stack */
  1217. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1218. #endif /* not defined ARCH_MM_MMU */
  1219. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1220. if (debug && rt_dbg_ops)
  1221. {
  1222. lwp->debug = debug;
  1223. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1224. }
  1225. rt_thread_startup(thread);
  1226. return lwp_to_pid(lwp);
  1227. }
  1228. }
  1229. lwp_tid_put(tid);
  1230. lwp_ref_dec(lwp);
  1231. return -RT_ERROR;
  1232. }
  1233. #ifdef RT_USING_MUSLLIBC
  1234. extern char **__environ;
  1235. #else
  1236. char **__environ = 0;
  1237. #endif
  1238. pid_t exec(char *filename, int debug, int argc, char **argv)
  1239. {
  1240. setenv("OS", "RT-Thread", 1);
  1241. return lwp_execve(filename, debug, argc, argv, __environ);
  1242. }
  1243. #ifdef ARCH_MM_MMU
  1244. void lwp_user_setting_save(rt_thread_t thread)
  1245. {
  1246. if (thread)
  1247. {
  1248. thread->thread_idr = arch_get_tidr();
  1249. }
  1250. }
  1251. void lwp_user_setting_restore(rt_thread_t thread)
  1252. {
  1253. if (!thread)
  1254. {
  1255. return;
  1256. }
  1257. #if !defined(ARCH_RISCV64)
  1258. /* tidr will be set in RESTORE_ALL in risc-v */
  1259. arch_set_tidr(thread->thread_idr);
  1260. #endif
  1261. if (rt_dbg_ops)
  1262. {
  1263. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1264. if (l != 0)
  1265. {
  1266. rt_hw_set_process_id((size_t)l->pid);
  1267. }
  1268. else
  1269. {
  1270. rt_hw_set_process_id(0);
  1271. }
  1272. if (l && l->debug)
  1273. {
  1274. uint32_t step_type = 0;
  1275. step_type = dbg_step_type();
  1276. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1277. {
  1278. dbg_activate_step();
  1279. }
  1280. else
  1281. {
  1282. dbg_deactivate_step();
  1283. }
  1284. }
  1285. }
  1286. }
  1287. #endif /* ARCH_MM_MMU */
  1288. void lwp_uthread_ctx_save(void *ctx)
  1289. {
  1290. rt_thread_t thread;
  1291. thread = rt_thread_self();
  1292. thread->user_ctx.ctx = ctx;
  1293. }
  1294. void lwp_uthread_ctx_restore(void)
  1295. {
  1296. rt_thread_t thread;
  1297. thread = rt_thread_self();
  1298. thread->user_ctx.ctx = RT_NULL;
  1299. }
  1300. rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
  1301. {
  1302. rt_err_t rc = -RT_ERROR;
  1303. long nesting = 0;
  1304. char **argv;
  1305. rt_lwp_t lwp;
  1306. if (uthread->lwp)
  1307. {
  1308. lwp = uthread->lwp;
  1309. argv = lwp_get_command_line_args(lwp);
  1310. if (argv)
  1311. {
  1312. LOG_RAW("please use: addr2line -e %s -a -f", argv[0]);
  1313. lwp_free_command_line_args(argv);
  1314. }
  1315. else
  1316. {
  1317. LOG_RAW("please use: addr2line -e %s -a -f", lwp->cmd);
  1318. }
  1319. while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
  1320. {
  1321. LOG_RAW(" 0x%lx", frame->pc);
  1322. if (rt_hw_backtrace_frame_unwind(uthread, frame))
  1323. {
  1324. break;
  1325. }
  1326. nesting++;
  1327. }
  1328. LOG_RAW("\n");
  1329. rc = RT_EOK;
  1330. }
  1331. return rc;
  1332. }
  1333. void rt_update_process_times(void)
  1334. {
  1335. struct rt_thread *thread;
  1336. #ifdef RT_USING_SMP
  1337. struct rt_cpu* pcpu;
  1338. pcpu = rt_cpu_self();
  1339. #endif
  1340. thread = rt_thread_self();
  1341. if (!IS_USER_MODE(thread))
  1342. {
  1343. thread->user_time += 1;
  1344. #ifdef RT_USING_SMP
  1345. pcpu->cpu_stat.user += 1;
  1346. #endif
  1347. }
  1348. else
  1349. {
  1350. thread->system_time += 1;
  1351. #ifdef RT_USING_SMP
  1352. if (thread == pcpu->idle_thread)
  1353. {
  1354. pcpu->cpu_stat.idle += 1;
  1355. }
  1356. else
  1357. {
  1358. pcpu->cpu_stat.system += 1;
  1359. }
  1360. #endif
  1361. }
  1362. }