lwp.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. */
  15. #define DBG_TAG "LWP"
  16. #define DBG_LVL DBG_WARNING
  17. #include <rtdbg.h>
  18. #include <rthw.h>
  19. #include <rtthread.h>
  20. #include <dfs_file.h>
  21. #include <unistd.h>
  22. #include <stdio.h> /* rename() */
  23. #include <fcntl.h>
  24. #include <sys/stat.h>
  25. #include <sys/statfs.h> /* statfs() */
  26. #include <lwp_elf.h>
  27. #ifndef RT_USING_DFS
  28. #error "lwp need file system(RT_USING_DFS)"
  29. #endif
  30. #include "lwp.h"
  31. #include "lwp_arch.h"
  32. #include "lwp_arch_comm.h"
  33. #include "lwp_signal.h"
  34. #include "lwp_dbg.h"
  35. #include "console.h"
  36. #ifdef ARCH_MM_MMU
  37. #include <lwp_user_mm.h>
  38. #endif /* end of ARCH_MM_MMU */
  39. #ifndef O_DIRECTORY
  40. #define O_DIRECTORY 0x200000
  41. #endif
  42. #ifndef O_BINARY
  43. #define O_BINARY 0x10000
  44. #endif
  45. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  46. #ifdef DFS_USING_WORKDIR
  47. extern char working_directory[];
  48. #endif
  49. static struct termios stdin_termios, old_stdin_termios;
  50. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  51. struct termios *get_old_termios(void)
  52. {
  53. return &old_stdin_termios;
  54. }
  55. void lwp_setcwd(char *buf)
  56. {
  57. struct rt_lwp *lwp = RT_NULL;
  58. if(strlen(buf) >= DFS_PATH_MAX)
  59. {
  60. rt_kprintf("buf too long!\n");
  61. return ;
  62. }
  63. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  64. if (lwp)
  65. {
  66. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  67. }
  68. else
  69. {
  70. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  71. }
  72. return ;
  73. }
  74. char *lwp_getcwd(void)
  75. {
  76. char *dir_buf = RT_NULL;
  77. struct rt_lwp *lwp = RT_NULL;
  78. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  79. if (lwp)
  80. {
  81. if(lwp->working_directory[0] != '/')
  82. {
  83. dir_buf = &working_directory[0];
  84. }
  85. else
  86. {
  87. dir_buf = &lwp->working_directory[0];
  88. }
  89. }
  90. else
  91. dir_buf = &working_directory[0];
  92. return dir_buf;
  93. }
  94. /**
  95. * RT-Thread light-weight process
  96. */
  97. void lwp_set_kernel_sp(uint32_t *sp)
  98. {
  99. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  100. }
  101. uint32_t *lwp_get_kernel_sp(void)
  102. {
  103. #ifdef ARCH_MM_MMU
  104. return (uint32_t *)rt_thread_self()->sp;
  105. #else
  106. uint32_t* kernel_sp;
  107. extern rt_uint32_t rt_interrupt_from_thread;
  108. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  109. if (rt_thread_switch_interrupt_flag)
  110. {
  111. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  112. }
  113. else
  114. {
  115. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  116. }
  117. return kernel_sp;
  118. #endif
  119. }
  120. #ifdef ARCH_MM_MMU
  121. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  122. {
  123. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  124. int *args;
  125. char *str;
  126. char *str_k;
  127. char **new_argve;
  128. int i;
  129. int len;
  130. size_t *args_k;
  131. struct process_aux *aux;
  132. size_t prot = PROT_READ | PROT_WRITE;
  133. size_t flags = MAP_FIXED | MAP_PRIVATE;
  134. size_t zero = 0;
  135. for (i = 0; i < argc; i++)
  136. {
  137. size += (rt_strlen(argv[i]) + 1);
  138. }
  139. size += (sizeof(size_t) * argc);
  140. i = 0;
  141. if (envp)
  142. {
  143. while (envp[i] != 0)
  144. {
  145. size += (rt_strlen(envp[i]) + 1);
  146. size += sizeof(size_t);
  147. i++;
  148. }
  149. }
  150. /* for aux */
  151. size += sizeof(struct process_aux);
  152. if (size > ARCH_PAGE_SIZE)
  153. {
  154. return RT_NULL;
  155. }
  156. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  157. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  158. {
  159. return RT_NULL;
  160. }
  161. args_k = (size_t *)lwp_v2p(lwp, args);
  162. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  163. /* argc, argv[], 0, envp[], 0 , aux[] */
  164. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  165. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  166. new_argve = (char **)&args_k[1];
  167. args_k[0] = argc;
  168. for (i = 0; i < argc; i++)
  169. {
  170. len = rt_strlen(argv[i]) + 1;
  171. new_argve[i] = str;
  172. lwp_memcpy(str_k, argv[i], len);
  173. str += len;
  174. str_k += len;
  175. }
  176. new_argve[i] = 0;
  177. i++;
  178. new_argve[i] = 0;
  179. if (envp)
  180. {
  181. int j;
  182. for (j = 0; envp[j] != 0; j++)
  183. {
  184. len = rt_strlen(envp[j]) + 1;
  185. new_argve[i] = str;
  186. lwp_memcpy(str_k, envp[j], len);
  187. str += len;
  188. str_k += len;
  189. i++;
  190. }
  191. new_argve[i] = 0;
  192. }
  193. i++;
  194. /* aux */
  195. aux = (struct process_aux *)(new_argve + i);
  196. aux->item[0].key = AT_EXECFN;
  197. aux->item[0].value = (size_t)(size_t)new_argve[0];
  198. i += AUX_ARRAY_ITEMS_NR * 2;
  199. new_argve[i] = 0;
  200. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  201. lwp->args = args;
  202. return aux;
  203. }
  204. #else
  205. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  206. {
  207. #ifdef ARCH_MM_MMU
  208. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  209. struct process_aux *aux;
  210. #else
  211. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  212. #endif /* ARCH_MM_MMU */
  213. int *args;
  214. char *str;
  215. char **new_argve;
  216. int i;
  217. int len;
  218. for (i = 0; i < argc; i++)
  219. {
  220. size += (rt_strlen(argv[i]) + 1);
  221. }
  222. size += (sizeof(int) * argc);
  223. i = 0;
  224. if (envp)
  225. {
  226. while (envp[i] != 0)
  227. {
  228. size += (rt_strlen(envp[i]) + 1);
  229. size += sizeof(int);
  230. i++;
  231. }
  232. }
  233. #ifdef ARCH_MM_MMU
  234. /* for aux */
  235. size += sizeof(struct process_aux);
  236. args = (int *)rt_malloc(size);
  237. if (args == RT_NULL)
  238. {
  239. return RT_NULL;
  240. }
  241. /* argc, argv[], 0, envp[], 0 */
  242. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  243. #else
  244. args = (int *)rt_malloc(size);
  245. if (args == RT_NULL)
  246. {
  247. return RT_NULL;
  248. }
  249. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  250. #endif /* ARCH_MM_MMU */
  251. new_argve = (char **)&args[1];
  252. args[0] = argc;
  253. for (i = 0; i < argc; i++)
  254. {
  255. len = rt_strlen(argv[i]) + 1;
  256. new_argve[i] = str;
  257. lwp_memcpy(str, argv[i], len);
  258. str += len;
  259. }
  260. new_argve[i] = 0;
  261. i++;
  262. new_argve[i] = 0;
  263. if (envp)
  264. {
  265. int j;
  266. for (j = 0; envp[j] != 0; j++)
  267. {
  268. len = rt_strlen(envp[j]) + 1;
  269. new_argve[i] = str;
  270. lwp_memcpy(str, envp[j], len);
  271. str += len;
  272. i++;
  273. }
  274. new_argve[i] = 0;
  275. }
  276. #ifdef ARCH_MM_MMU
  277. /* aux */
  278. aux = (struct process_aux *)(new_argve + i);
  279. aux->item[0].key = AT_EXECFN;
  280. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  281. i += AUX_ARRAY_ITEMS_NR * 2;
  282. new_argve[i] = 0;
  283. lwp->args = args;
  284. return aux;
  285. #else
  286. lwp->args = args;
  287. lwp->args_length = size;
  288. return (struct process_aux *)(new_argve + i);
  289. #endif /* ARCH_MM_MMU */
  290. }
  291. #endif
  292. #ifdef ARCH_MM_MMU
  293. #define check_off(voff, vlen) \
  294. do \
  295. { \
  296. if (voff > vlen) \
  297. { \
  298. result = -RT_ERROR; \
  299. goto _exit; \
  300. } \
  301. } while (0)
  302. #define check_read(vrlen, vrlen_want) \
  303. do \
  304. { \
  305. if (vrlen < vrlen_want) \
  306. { \
  307. result = -RT_ERROR; \
  308. goto _exit; \
  309. } \
  310. } while (0)
  311. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  312. {
  313. size_t read_block = 0;
  314. while (nmemb)
  315. {
  316. size_t count;
  317. count = read(fd, ptr, size * nmemb) / size;
  318. if (count < nmemb)
  319. {
  320. LOG_E("ERROR: file size error!");
  321. break;
  322. }
  323. ptr = (void *)((uint8_t *)ptr + (count * size));
  324. nmemb -= count;
  325. read_block += count;
  326. }
  327. return read_block;
  328. }
  329. typedef struct
  330. {
  331. Elf_Word st_name;
  332. Elf_Addr st_value;
  333. Elf_Word st_size;
  334. unsigned char st_info;
  335. unsigned char st_other;
  336. Elf_Half st_shndx;
  337. } Elf_sym;
  338. #ifdef ARCH_MM_MMU
  339. struct map_range
  340. {
  341. void *start;
  342. size_t size;
  343. };
  344. static void expand_map_range(struct map_range *m, void *start, size_t size)
  345. {
  346. if (!m->start)
  347. {
  348. m->start = start;
  349. m->size = size;
  350. }
  351. else
  352. {
  353. void *end = (void *)((char*)start + size);
  354. void *mend = (void *)((char*)m->start + m->size);
  355. if (m->start > start)
  356. {
  357. m->start = start;
  358. }
  359. if (mend < end)
  360. {
  361. mend = end;
  362. }
  363. m->size = (char *)mend - (char *)m->start;
  364. }
  365. }
  366. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  367. {
  368. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  369. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  370. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  371. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  372. if (m1->size)
  373. {
  374. if (m1_start < (void *)USER_LOAD_VADDR)
  375. {
  376. return -1;
  377. }
  378. if (m1_start > (void *)USER_STACK_VSTART)
  379. {
  380. return -1;
  381. }
  382. if (m1_end < (void *)USER_LOAD_VADDR)
  383. {
  384. return -1;
  385. }
  386. if (m1_end > (void *)USER_STACK_VSTART)
  387. {
  388. return -1;
  389. }
  390. }
  391. if (m2->size)
  392. {
  393. if (m2_start < (void *)USER_LOAD_VADDR)
  394. {
  395. return -1;
  396. }
  397. if (m2_start > (void *)USER_STACK_VSTART)
  398. {
  399. return -1;
  400. }
  401. if (m2_end < (void *)USER_LOAD_VADDR)
  402. {
  403. return -1;
  404. }
  405. if (m2_end > (void *)USER_STACK_VSTART)
  406. {
  407. return -1;
  408. }
  409. }
  410. if ((m1->size != 0) && (m2->size != 0))
  411. {
  412. if (m1_start < m2_start)
  413. {
  414. if (m1_end > m2_start)
  415. {
  416. return -1;
  417. }
  418. }
  419. else /* m2_start <= m1_start */
  420. {
  421. if (m2_end > m1_start)
  422. {
  423. return -1;
  424. }
  425. }
  426. }
  427. return 0;
  428. }
  429. #endif
  430. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  431. {
  432. uint32_t i;
  433. uint32_t off = 0;
  434. size_t load_off = 0;
  435. char *p_section_str = 0;
  436. Elf_sym *dynsym = 0;
  437. Elf_Ehdr eheader;
  438. Elf_Phdr pheader;
  439. Elf_Shdr sheader;
  440. int result = RT_EOK;
  441. uint32_t magic;
  442. size_t read_len;
  443. void *got_start = 0;
  444. size_t got_size = 0;
  445. void *rel_dyn_start = 0;
  446. size_t rel_dyn_size = 0;
  447. size_t dynsym_off = 0;
  448. size_t dynsym_size = 0;
  449. #ifdef ARCH_MM_MMU
  450. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  451. void *pa, *va;
  452. void *va_self;
  453. #endif
  454. if (len < sizeof eheader)
  455. {
  456. LOG_E("len < sizeof eheader!");
  457. return -RT_ERROR;
  458. }
  459. lseek(fd, 0, SEEK_SET);
  460. read_len = load_fread(&magic, 1, sizeof magic, fd);
  461. check_read(read_len, sizeof magic);
  462. if (memcmp(elf_magic, &magic, 4) != 0)
  463. {
  464. LOG_E("elf_magic not same, magic:0x%x!", magic);
  465. return -RT_ERROR;
  466. }
  467. lseek(fd, off, SEEK_SET);
  468. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  469. check_read(read_len, sizeof eheader);
  470. #ifndef ARCH_CPU_64BIT
  471. if (eheader.e_ident[4] != 1)
  472. { /* not 32bit */
  473. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  474. return -RT_ERROR;
  475. }
  476. #else
  477. if (eheader.e_ident[4] != 2)
  478. { /* not 64bit */
  479. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  480. return -RT_ERROR;
  481. }
  482. #endif
  483. if (eheader.e_ident[6] != 1)
  484. { /* ver not 1 */
  485. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  486. return -RT_ERROR;
  487. }
  488. if ((eheader.e_type != ET_DYN)
  489. #ifdef ARCH_MM_MMU
  490. && (eheader.e_type != ET_EXEC)
  491. #endif
  492. )
  493. {
  494. /* not pie or exec elf */
  495. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  496. return -RT_ERROR;
  497. }
  498. #ifdef ARCH_MM_MMU
  499. {
  500. off = eheader.e_phoff;
  501. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  502. {
  503. check_off(off, len);
  504. lseek(fd, off, SEEK_SET);
  505. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  506. check_read(read_len, sizeof pheader);
  507. if (pheader.p_type == PT_DYNAMIC)
  508. {
  509. /* load ld.so */
  510. return 1; /* 1 means dynamic */
  511. }
  512. }
  513. }
  514. #endif
  515. if (eheader.e_entry != 0)
  516. {
  517. if ((eheader.e_entry != USER_LOAD_VADDR)
  518. && (eheader.e_entry != LDSO_LOAD_VADDR))
  519. {
  520. /* the entry is invalidate */
  521. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  522. return -RT_ERROR;
  523. }
  524. }
  525. { /* load aux */
  526. uint8_t *process_header;
  527. size_t process_header_size;
  528. off = eheader.e_phoff;
  529. process_header_size = eheader.e_phnum * sizeof pheader;
  530. #ifdef ARCH_MM_MMU
  531. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  532. {
  533. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  534. return -RT_ERROR;
  535. }
  536. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  537. if (!va)
  538. {
  539. LOG_E("lwp map user failed!");
  540. return -RT_ERROR;
  541. }
  542. pa = lwp_v2p(lwp, va);
  543. process_header = (uint8_t *)pa - PV_OFFSET;
  544. #else
  545. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  546. if (!process_header)
  547. {
  548. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  549. return -RT_ERROR;
  550. }
  551. #endif
  552. check_off(off, len);
  553. lseek(fd, off, SEEK_SET);
  554. read_len = load_fread(process_header, 1, process_header_size, fd);
  555. check_read(read_len, process_header_size);
  556. #ifdef ARCH_MM_MMU
  557. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  558. #endif
  559. aux->item[1].key = AT_PAGESZ;
  560. #ifdef ARCH_MM_MMU
  561. aux->item[1].value = ARCH_PAGE_SIZE;
  562. #else
  563. aux->item[1].value = RT_MM_PAGE_SIZE;
  564. #endif
  565. aux->item[2].key = AT_RANDOM;
  566. {
  567. uint32_t random_value = rt_tick_get();
  568. uint8_t *random;
  569. #ifdef ARCH_MM_MMU
  570. uint8_t *krandom;
  571. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  572. krandom = (uint8_t *)lwp_v2p(lwp, random);
  573. krandom = (uint8_t *)krandom - PV_OFFSET;
  574. rt_memcpy(krandom, &random_value, sizeof random_value);
  575. #else
  576. random = (uint8_t *)(process_header + process_header_size);
  577. rt_memcpy(random, &random_value, sizeof random_value);
  578. #endif
  579. aux->item[2].value = (size_t)random;
  580. }
  581. aux->item[3].key = AT_PHDR;
  582. #ifdef ARCH_MM_MMU
  583. aux->item[3].value = (size_t)va;
  584. #else
  585. aux->item[3].value = (size_t)process_header;
  586. #endif
  587. aux->item[4].key = AT_PHNUM;
  588. aux->item[4].value = eheader.e_phnum;
  589. aux->item[5].key = AT_PHENT;
  590. aux->item[5].value = sizeof pheader;
  591. #ifdef ARCH_MM_MMU
  592. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  593. #endif
  594. }
  595. if (load_addr)
  596. {
  597. load_off = (size_t)load_addr;
  598. }
  599. #ifdef ARCH_MM_MMU
  600. else
  601. {
  602. /* map user */
  603. off = eheader.e_shoff;
  604. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  605. {
  606. check_off(off, len);
  607. lseek(fd, off, SEEK_SET);
  608. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  609. check_read(read_len, sizeof sheader);
  610. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  611. {
  612. continue;
  613. }
  614. switch (sheader.sh_type)
  615. {
  616. case SHT_PROGBITS:
  617. if ((sheader.sh_flags & SHF_WRITE) == 0)
  618. {
  619. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  620. }
  621. else
  622. {
  623. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  624. }
  625. break;
  626. case SHT_NOBITS:
  627. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  628. break;
  629. default:
  630. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  631. break;
  632. }
  633. }
  634. if (user_area[0].size == 0)
  635. {
  636. /* no code */
  637. result = -RT_ERROR;
  638. goto _exit;
  639. }
  640. if (user_area[0].start == NULL)
  641. {
  642. /* DYN */
  643. load_off = USER_LOAD_VADDR;
  644. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  645. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  646. }
  647. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  648. {
  649. result = -RT_ERROR;
  650. goto _exit;
  651. }
  652. /* text and data */
  653. for (i = 0; i < 2; i++)
  654. {
  655. if (user_area[i].size != 0)
  656. {
  657. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  658. if (!va || (va != user_area[i].start))
  659. {
  660. result = -RT_ERROR;
  661. goto _exit;
  662. }
  663. }
  664. }
  665. lwp->text_size = user_area[0].size;
  666. }
  667. #else
  668. else
  669. {
  670. size_t start = -1UL;
  671. size_t end = 0UL;
  672. size_t total_size;
  673. off = eheader.e_shoff;
  674. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  675. {
  676. check_off(off, len);
  677. lseek(fd, off, SEEK_SET);
  678. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  679. check_read(read_len, sizeof sheader);
  680. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  681. {
  682. continue;
  683. }
  684. switch (sheader.sh_type)
  685. {
  686. case SHT_PROGBITS:
  687. case SHT_NOBITS:
  688. if (start > sheader.sh_addr)
  689. {
  690. start = sheader.sh_addr;
  691. }
  692. if (sheader.sh_addr + sheader.sh_size > end)
  693. {
  694. end = sheader.sh_addr + sheader.sh_size;
  695. }
  696. break;
  697. default:
  698. break;
  699. }
  700. }
  701. total_size = end - start;
  702. #ifdef RT_USING_CACHE
  703. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  704. #else
  705. load_off = (size_t)rt_malloc(total_size);
  706. #endif
  707. if (load_off == 0)
  708. {
  709. LOG_E("alloc text memory faild!");
  710. result = -RT_ENOMEM;
  711. goto _exit;
  712. }
  713. else
  714. {
  715. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  716. }
  717. lwp->load_off = load_off; /* for free */
  718. lwp->text_size = total_size;
  719. }
  720. #endif
  721. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  722. off = eheader.e_phoff;
  723. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  724. {
  725. check_off(off, len);
  726. lseek(fd, off, SEEK_SET);
  727. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  728. check_read(read_len, sizeof pheader);
  729. if (pheader.p_type == PT_LOAD)
  730. {
  731. if (pheader.p_filesz > pheader.p_memsz)
  732. {
  733. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  734. return -RT_ERROR;
  735. }
  736. check_off(pheader.p_offset, len);
  737. lseek(fd, pheader.p_offset, SEEK_SET);
  738. #ifdef ARCH_MM_MMU
  739. {
  740. uint32_t size = pheader.p_filesz;
  741. size_t tmp_len = 0;
  742. va = (void *)(pheader.p_vaddr + load_addr);
  743. read_len = 0;
  744. while (size)
  745. {
  746. pa = lwp_v2p(lwp, va);
  747. va_self = (void *)((char *)pa - PV_OFFSET);
  748. LOG_D("va_self = %p pa = %p", va_self, pa);
  749. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  750. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  751. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  752. read_len += tmp_len;
  753. size -= tmp_len;
  754. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  755. }
  756. }
  757. #else
  758. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  759. #endif
  760. check_read(read_len, pheader.p_filesz);
  761. if (pheader.p_filesz < pheader.p_memsz)
  762. {
  763. #ifdef ARCH_MM_MMU
  764. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  765. uint32_t size_s;
  766. uint32_t off;
  767. off = pheader.p_filesz & ARCH_PAGE_MASK;
  768. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  769. while (size)
  770. {
  771. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  772. pa = lwp_v2p(lwp, va);
  773. va_self = (void *)((char *)pa - PV_OFFSET);
  774. memset((void *)((char *)va_self + off), 0, size_s);
  775. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  776. off = 0;
  777. size -= size_s;
  778. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  779. }
  780. #else
  781. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  782. #endif
  783. }
  784. }
  785. }
  786. /* relocate */
  787. if (eheader.e_type == ET_DYN)
  788. {
  789. /* section info */
  790. off = eheader.e_shoff;
  791. /* find section string table */
  792. check_off(off, len);
  793. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  794. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  795. check_read(read_len, sizeof sheader);
  796. p_section_str = (char *)rt_malloc(sheader.sh_size);
  797. if (!p_section_str)
  798. {
  799. LOG_E("out of memory!");
  800. result = -ENOMEM;
  801. goto _exit;
  802. }
  803. check_off(sheader.sh_offset, len);
  804. lseek(fd, sheader.sh_offset, SEEK_SET);
  805. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  806. check_read(read_len, sheader.sh_size);
  807. check_off(off, len);
  808. lseek(fd, off, SEEK_SET);
  809. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  810. {
  811. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  812. check_read(read_len, sizeof sheader);
  813. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  814. {
  815. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  816. got_size = (size_t)sheader.sh_size;
  817. }
  818. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  819. {
  820. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  821. rel_dyn_size = (size_t)sheader.sh_size;
  822. }
  823. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  824. {
  825. dynsym_off = (size_t)sheader.sh_offset;
  826. dynsym_size = (size_t)sheader.sh_size;
  827. }
  828. }
  829. /* reloc */
  830. if (dynsym_size)
  831. {
  832. dynsym = rt_malloc(dynsym_size);
  833. if (!dynsym)
  834. {
  835. LOG_E("ERROR: Malloc error!");
  836. result = -ENOMEM;
  837. goto _exit;
  838. }
  839. check_off(dynsym_off, len);
  840. lseek(fd, dynsym_off, SEEK_SET);
  841. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  842. check_read(read_len, dynsym_size);
  843. }
  844. #ifdef ARCH_MM_MMU
  845. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  846. #else
  847. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  848. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  849. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  850. #endif
  851. }
  852. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  853. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  854. _exit:
  855. if (dynsym)
  856. {
  857. rt_free(dynsym);
  858. }
  859. if (p_section_str)
  860. {
  861. rt_free(p_section_str);
  862. }
  863. if (result != RT_EOK)
  864. {
  865. LOG_E("lwp load faild, %d", result);
  866. }
  867. return result;
  868. }
  869. #endif /* ARCH_MM_MMU */
  870. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  871. {
  872. uint8_t *ptr;
  873. int ret = -1;
  874. int len;
  875. int fd = -1;
  876. /* check file name */
  877. RT_ASSERT(filename != RT_NULL);
  878. /* check lwp control block */
  879. RT_ASSERT(lwp != RT_NULL);
  880. /* copy file name to process name */
  881. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  882. if (load_addr != RT_NULL)
  883. {
  884. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  885. ptr = load_addr;
  886. }
  887. else
  888. {
  889. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  890. ptr = RT_NULL;
  891. }
  892. fd = open(filename, O_BINARY | O_RDONLY, 0);
  893. if (fd < 0)
  894. {
  895. LOG_E("ERROR: Can't open elf file %s!", filename);
  896. goto out;
  897. }
  898. len = lseek(fd, 0, SEEK_END);
  899. if (len < 0)
  900. {
  901. LOG_E("ERROR: File %s size error!", filename);
  902. goto out;
  903. }
  904. lseek(fd, 0, SEEK_SET);
  905. ret = load_elf(fd, len, lwp, ptr, aux);
  906. if ((ret != RT_EOK) && (ret != 1))
  907. {
  908. LOG_E("lwp load ret = %d", ret);
  909. }
  910. out:
  911. if (fd > 0)
  912. {
  913. close(fd);
  914. }
  915. return ret;
  916. }
  917. /* lwp thread clean up */
  918. void lwp_cleanup(struct rt_thread *tid)
  919. {
  920. rt_base_t level;
  921. struct rt_lwp *lwp;
  922. if (tid == NULL)
  923. {
  924. LOG_I("%s: invalid parameter tid == NULL", __func__);
  925. return;
  926. }
  927. else
  928. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  929. level = rt_hw_interrupt_disable();
  930. lwp = (struct rt_lwp *)tid->lwp;
  931. /* lwp thread cleanup */
  932. lwp_tid_put(tid->tid);
  933. rt_list_remove(&tid->sibling);
  934. lwp_thread_signal_detach(&tid->signal);
  935. rt_hw_interrupt_enable(level);
  936. /* tty will be release in lwp_ref_dec() if ref is cleared */
  937. lwp_ref_dec(lwp);
  938. return;
  939. }
  940. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  941. {
  942. struct dfs_file *d;
  943. struct dfs_fdtable *lwp_fdt;
  944. lwp_fdt = &lwp->fdt;
  945. /* init 4 fds */
  946. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  947. if (lwp_fdt->fds)
  948. {
  949. lwp_fdt->maxfd = 4;
  950. d = fd_get(0);
  951. fd_associate(lwp_fdt, 0, d);
  952. d = fd_get(1);
  953. fd_associate(lwp_fdt, 1, d);
  954. d = fd_get(2);
  955. fd_associate(lwp_fdt, 2, d);
  956. }
  957. return;
  958. }
  959. static void _lwp_thread_entry(void *parameter)
  960. {
  961. rt_thread_t tid;
  962. struct rt_lwp *lwp;
  963. tid = rt_thread_self();
  964. lwp = (struct rt_lwp *)tid->lwp;
  965. tid->cleanup = lwp_cleanup;
  966. tid->user_stack = RT_NULL;
  967. if (lwp->debug)
  968. {
  969. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  970. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  971. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  972. icache_invalid_all();
  973. }
  974. /**
  975. * without ASID support, it will be a special case when trying to run application
  976. * and exit multiple times and a same page frame allocated to it bound to
  977. * different text segment. Then we are in a situation where icache contains
  978. * out-of-dated data and must be handle by the running core itself.
  979. * with ASID support, this should be a rare case that ASID & page frame both
  980. * identical to previous running application.
  981. *
  982. * For a new application loaded into memory, icache are seen as empty. And there
  983. * should be nothing in the icache entry to match. So this icache invalidation
  984. * operation should have barely influence.
  985. */
  986. rt_hw_icache_invalidate_all();
  987. #ifdef ARCH_MM_MMU
  988. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  989. #else
  990. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  991. #endif /* ARCH_MM_MMU */
  992. }
  993. struct rt_lwp *lwp_self(void)
  994. {
  995. rt_thread_t tid;
  996. tid = rt_thread_self();
  997. if (tid)
  998. {
  999. return (struct rt_lwp *)tid->lwp;
  1000. }
  1001. return RT_NULL;
  1002. }
  1003. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1004. {
  1005. int result;
  1006. rt_base_t level;
  1007. struct rt_lwp *lwp;
  1008. char *thread_name;
  1009. char *argv_last = argv[argc - 1];
  1010. int bg = 0;
  1011. struct process_aux *aux;
  1012. int tid = 0;
  1013. int ret;
  1014. if (filename == RT_NULL)
  1015. {
  1016. return -RT_ERROR;
  1017. }
  1018. if (access(filename, X_OK) != 0)
  1019. {
  1020. return -EACCES;
  1021. }
  1022. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID);
  1023. if (lwp == RT_NULL)
  1024. {
  1025. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1026. return -RT_ENOMEM;
  1027. }
  1028. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1029. if ((tid = lwp_tid_get()) == 0)
  1030. {
  1031. lwp_ref_dec(lwp);
  1032. return -ENOMEM;
  1033. }
  1034. #ifdef ARCH_MM_MMU
  1035. if (lwp_user_space_init(lwp, 0) != 0)
  1036. {
  1037. lwp_tid_put(tid);
  1038. lwp_ref_dec(lwp);
  1039. return -ENOMEM;
  1040. }
  1041. #endif
  1042. if (argv_last[0] == '&' && argv_last[1] == '\0')
  1043. {
  1044. argc--;
  1045. bg = 1;
  1046. }
  1047. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1048. {
  1049. lwp_tid_put(tid);
  1050. lwp_ref_dec(lwp);
  1051. return -ENOMEM;
  1052. }
  1053. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1054. #ifdef ARCH_MM_MMU
  1055. if (result == 1)
  1056. {
  1057. /* dynmaic */
  1058. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1059. result = load_ldso(lwp, filename, argv, envp);
  1060. }
  1061. #endif /* ARCH_MM_MMU */
  1062. if (result == RT_EOK)
  1063. {
  1064. rt_thread_t thread = RT_NULL;
  1065. rt_uint32_t priority = 25, tick = 200;
  1066. lwp_copy_stdio_fdt(lwp);
  1067. /* obtain the base name */
  1068. thread_name = strrchr(filename, '/');
  1069. thread_name = thread_name ? thread_name + 1 : filename;
  1070. #ifndef ARCH_MM_MMU
  1071. struct lwp_app_head *app_head = lwp->text_entry;
  1072. if (app_head->priority)
  1073. {
  1074. priority = app_head->priority;
  1075. }
  1076. if (app_head->tick)
  1077. {
  1078. tick = app_head->tick;
  1079. }
  1080. #endif /* not defined ARCH_MM_MMU */
  1081. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1082. LWP_TASK_STACK_SIZE, priority, tick);
  1083. if (thread != RT_NULL)
  1084. {
  1085. struct rt_lwp *self_lwp;
  1086. thread->tid = tid;
  1087. lwp_tid_set_thread(tid, thread);
  1088. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1089. (rt_size_t)thread->stack_addr + thread->stack_size);
  1090. level = rt_hw_interrupt_disable();
  1091. self_lwp = lwp_self();
  1092. if (self_lwp)
  1093. {
  1094. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1095. lwp->__pgrp = tid;
  1096. lwp->session = self_lwp->session;
  1097. /* lwp add to children link */
  1098. lwp->sibling = self_lwp->first_child;
  1099. self_lwp->first_child = lwp;
  1100. lwp->parent = self_lwp;
  1101. }
  1102. else
  1103. {
  1104. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1105. lwp->__pgrp = tid;
  1106. }
  1107. if (!bg)
  1108. {
  1109. if (lwp->session == -1)
  1110. {
  1111. struct tty_struct *tty = RT_NULL;
  1112. struct rt_lwp *old_lwp;
  1113. tty = (struct tty_struct *)console_tty_get();
  1114. old_lwp = tty->foreground;
  1115. if (old_lwp)
  1116. {
  1117. rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
  1118. ret = tty_push(&tty->head, old_lwp);
  1119. rt_mutex_release(&tty->lock);
  1120. if (ret < 0)
  1121. {
  1122. lwp_tid_put(tid);
  1123. lwp_ref_dec(lwp);
  1124. LOG_E("malloc fail!\n");
  1125. return -ENOMEM;
  1126. }
  1127. }
  1128. lwp->tty = tty;
  1129. lwp->tty->pgrp = lwp->__pgrp;
  1130. lwp->tty->session = lwp->session;
  1131. lwp->tty->foreground = lwp;
  1132. tcgetattr(1, &stdin_termios);
  1133. old_stdin_termios = stdin_termios;
  1134. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1135. tcsetattr(1, 0, &stdin_termios);
  1136. }
  1137. else
  1138. {
  1139. if (self_lwp != RT_NULL)
  1140. {
  1141. rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
  1142. ret = tty_push(&self_lwp->tty->head, self_lwp);
  1143. rt_mutex_release(&self_lwp->tty->lock);
  1144. if (ret < 0)
  1145. {
  1146. lwp_tid_put(tid);
  1147. lwp_ref_dec(lwp);
  1148. LOG_E("malloc fail!\n");
  1149. return -ENOMEM;
  1150. }
  1151. lwp->tty = self_lwp->tty;
  1152. lwp->tty->pgrp = lwp->__pgrp;
  1153. lwp->tty->session = lwp->session;
  1154. lwp->tty->foreground = lwp;
  1155. }
  1156. else
  1157. {
  1158. lwp->tty = RT_NULL;
  1159. }
  1160. }
  1161. }
  1162. else
  1163. {
  1164. lwp->background = RT_TRUE;
  1165. }
  1166. thread->lwp = lwp;
  1167. #ifndef ARCH_MM_MMU
  1168. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1169. thread->user_stack = app_head->stack_offset ?
  1170. (void *)(app_head->stack_offset -
  1171. app_head->data_offset +
  1172. (uint32_t)lwp->data_entry) : RT_NULL;
  1173. thread->user_stack_size = app_head->stack_size;
  1174. /* init data area */
  1175. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1176. /* init user stack */
  1177. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1178. #endif /* not defined ARCH_MM_MMU */
  1179. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1180. if (debug && rt_dbg_ops)
  1181. {
  1182. lwp->debug = debug;
  1183. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1184. }
  1185. rt_hw_interrupt_enable(level);
  1186. rt_thread_startup(thread);
  1187. return lwp_to_pid(lwp);
  1188. }
  1189. }
  1190. lwp_tid_put(tid);
  1191. lwp_ref_dec(lwp);
  1192. return -RT_ERROR;
  1193. }
  1194. #ifdef RT_USING_MUSLLIBC
  1195. extern char **__environ;
  1196. #else
  1197. char **__environ = 0;
  1198. #endif
  1199. pid_t exec(char *filename, int debug, int argc, char **argv)
  1200. {
  1201. setenv("OS", "RT-Thread", 1);
  1202. return lwp_execve(filename, debug, argc, argv, __environ);
  1203. }
  1204. #ifdef ARCH_MM_MMU
  1205. void lwp_user_setting_save(rt_thread_t thread)
  1206. {
  1207. if (thread)
  1208. {
  1209. thread->thread_idr = arch_get_tidr();
  1210. }
  1211. }
  1212. void lwp_user_setting_restore(rt_thread_t thread)
  1213. {
  1214. if (!thread)
  1215. {
  1216. return;
  1217. }
  1218. #if !defined(ARCH_RISCV64)
  1219. /* tidr will be set in RESTORE_ALL in risc-v */
  1220. arch_set_tidr(thread->thread_idr);
  1221. #endif
  1222. if (rt_dbg_ops)
  1223. {
  1224. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1225. if (l != 0)
  1226. {
  1227. rt_hw_set_process_id((size_t)l->pid);
  1228. }
  1229. else
  1230. {
  1231. rt_hw_set_process_id(0);
  1232. }
  1233. if (l && l->debug)
  1234. {
  1235. uint32_t step_type = 0;
  1236. step_type = dbg_step_type();
  1237. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1238. {
  1239. dbg_activate_step();
  1240. }
  1241. else
  1242. {
  1243. dbg_deactivate_step();
  1244. }
  1245. }
  1246. }
  1247. }
  1248. #endif /* ARCH_MM_MMU */
  1249. void lwp_uthread_ctx_save(void *ctx)
  1250. {
  1251. rt_thread_t thread;
  1252. thread = rt_thread_self();
  1253. thread->user_ctx.ctx = ctx;
  1254. }
  1255. void lwp_uthread_ctx_restore(void)
  1256. {
  1257. rt_thread_t thread;
  1258. thread = rt_thread_self();
  1259. thread->user_ctx.ctx = RT_NULL;
  1260. }
  1261. void rt_update_process_times(void)
  1262. {
  1263. struct rt_thread *thread;
  1264. #ifdef RT_USING_SMP
  1265. struct rt_cpu* pcpu;
  1266. pcpu = rt_cpu_self();
  1267. #endif
  1268. thread = rt_thread_self();
  1269. if (!IS_USER_MODE(thread))
  1270. {
  1271. thread->user_time += 1;
  1272. #ifdef RT_USING_SMP
  1273. pcpu->cpu_stat.user += 1;
  1274. #endif
  1275. }
  1276. else
  1277. {
  1278. thread->system_time += 1;
  1279. #ifdef RT_USING_SMP
  1280. if (thread == pcpu->idle_thread)
  1281. {
  1282. pcpu->cpu_stat.idle += 1;
  1283. }
  1284. else
  1285. {
  1286. pcpu->cpu_stat.system += 1;
  1287. }
  1288. #endif
  1289. }
  1290. }