lwp.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_posix.h>
  16. #include <lwp_elf.h>
  17. #ifndef RT_USING_DFS
  18. #error "lwp need file system(RT_USING_DFS)"
  19. #endif
  20. #include "lwp.h"
  21. #include "lwp_arch.h"
  22. #include "console.h"
  23. #define DBG_TAG "LWP"
  24. #define DBG_LVL DBG_WARNING
  25. #include <rtdbg.h>
  26. #ifdef RT_USING_USERSPACE
  27. #include <lwp_mm_area.h>
  28. #include <lwp_user_mm.h>
  29. #endif /* end of RT_USING_USERSPACE */
  30. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  31. #ifdef DFS_USING_WORKDIR
  32. extern char working_directory[];
  33. #endif
  34. static struct termios stdin_termios, old_stdin_termios;
  35. extern void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack);
  36. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  37. struct termios *get_old_termios(void)
  38. {
  39. return &old_stdin_termios;
  40. }
  41. void lwp_setcwd(char *buf)
  42. {
  43. struct rt_lwp *lwp = RT_NULL;
  44. if(strlen(buf) >= DFS_PATH_MAX)
  45. {
  46. rt_kprintf("buf too long!\n");
  47. return ;
  48. }
  49. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  50. if (lwp)
  51. {
  52. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  53. }
  54. else
  55. {
  56. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  57. }
  58. return ;
  59. }
  60. char *lwp_getcwd(void)
  61. {
  62. char *dir_buf = RT_NULL;
  63. struct rt_lwp *lwp = RT_NULL;
  64. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  65. if (lwp)
  66. {
  67. if(lwp->working_directory[0] != '/')
  68. {
  69. dir_buf = &working_directory[0];
  70. }
  71. else
  72. {
  73. dir_buf = &lwp->working_directory[0];
  74. }
  75. }
  76. else
  77. dir_buf = &working_directory[0];
  78. return dir_buf;
  79. }
  80. /**
  81. * RT-Thread light-weight process
  82. */
  83. void lwp_set_kernel_sp(uint32_t *sp)
  84. {
  85. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  86. }
  87. uint32_t *lwp_get_kernel_sp(void)
  88. {
  89. #ifdef RT_USING_USERSPACE
  90. return (uint32_t *)rt_thread_self()->sp;
  91. #else
  92. uint32_t* kernel_sp;
  93. extern rt_uint32_t rt_interrupt_from_thread;
  94. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  95. if (rt_thread_switch_interrupt_flag)
  96. {
  97. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  98. }
  99. else
  100. {
  101. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  102. }
  103. return kernel_sp;
  104. #endif
  105. }
  106. #ifdef RT_USING_USERSPACE
  107. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  108. {
  109. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  110. int *args;
  111. char *str;
  112. char *str_k;
  113. char **new_argve;
  114. int i;
  115. int len;
  116. size_t *args_k;
  117. struct process_aux *aux;
  118. for (i = 0; i < argc; i++)
  119. {
  120. size += (rt_strlen(argv[i]) + 1);
  121. }
  122. size += (sizeof(size_t) * argc);
  123. i = 0;
  124. if (envp)
  125. {
  126. while (envp[i] != 0)
  127. {
  128. size += (rt_strlen(envp[i]) + 1);
  129. size += sizeof(size_t);
  130. i++;
  131. }
  132. }
  133. /* for aux */
  134. size += sizeof(struct process_aux);
  135. if (size > ARCH_PAGE_SIZE)
  136. {
  137. return RT_NULL;
  138. }
  139. /* args = (int *)lwp_map_user(lwp, 0, size); */
  140. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  141. if (args == RT_NULL)
  142. {
  143. return RT_NULL;
  144. }
  145. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  146. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  147. /* argc, argv[], 0, envp[], 0 , aux[] */
  148. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  149. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  150. new_argve = (char **)&args_k[1];
  151. args_k[0] = argc;
  152. for (i = 0; i < argc; i++)
  153. {
  154. len = rt_strlen(argv[i]) + 1;
  155. new_argve[i] = str;
  156. rt_memcpy(str_k, argv[i], len);
  157. str += len;
  158. str_k += len;
  159. }
  160. new_argve[i] = 0;
  161. i++;
  162. new_argve[i] = 0;
  163. if (envp)
  164. {
  165. int j;
  166. for (j = 0; envp[j] != 0; j++)
  167. {
  168. len = rt_strlen(envp[j]) + 1;
  169. new_argve[i] = str;
  170. rt_memcpy(str_k, envp[j], len);
  171. str += len;
  172. str_k += len;
  173. i++;
  174. }
  175. new_argve[i] = 0;
  176. }
  177. i++;
  178. /* aux */
  179. aux = (struct process_aux *)(new_argve + i);
  180. aux->item[0].key = AT_EXECFN;
  181. aux->item[0].value = (size_t)(size_t)new_argve[0];
  182. i += AUX_ARRAY_ITEMS_NR * 2;
  183. new_argve[i] = 0;
  184. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  185. lwp->args = args;
  186. return aux;
  187. }
  188. #else
  189. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  190. {
  191. #ifdef ARCH_MM_MMU
  192. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  193. struct process_aux *aux;
  194. #else
  195. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  196. #endif /* ARCH_MM_MMU */
  197. int *args;
  198. char *str;
  199. char **new_argve;
  200. int i;
  201. int len;
  202. for (i = 0; i < argc; i++)
  203. {
  204. size += (rt_strlen(argv[i]) + 1);
  205. }
  206. size += (sizeof(int) * argc);
  207. i = 0;
  208. if (envp)
  209. {
  210. while (envp[i] != 0)
  211. {
  212. size += (rt_strlen(envp[i]) + 1);
  213. size += sizeof(int);
  214. i++;
  215. }
  216. }
  217. #ifdef ARCH_MM_MMU
  218. /* for aux */
  219. size += sizeof(struct process_aux);
  220. args = (int *)rt_malloc(size);
  221. if (args == RT_NULL)
  222. {
  223. return RT_NULL;
  224. }
  225. /* argc, argv[], 0, envp[], 0 */
  226. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  227. #else
  228. args = (int *)rt_malloc(size);
  229. if (args == RT_NULL)
  230. {
  231. return RT_NULL;
  232. }
  233. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  234. #endif /* ARCH_MM_MMU */
  235. new_argve = (char **)&args[1];
  236. args[0] = argc;
  237. for (i = 0; i < argc; i++)
  238. {
  239. len = rt_strlen(argv[i]) + 1;
  240. new_argve[i] = str;
  241. rt_memcpy(str, argv[i], len);
  242. str += len;
  243. }
  244. new_argve[i] = 0;
  245. i++;
  246. new_argve[i] = 0;
  247. if (envp)
  248. {
  249. int j;
  250. for (j = 0; envp[j] != 0; j++)
  251. {
  252. len = rt_strlen(envp[j]) + 1;
  253. new_argve[i] = str;
  254. rt_memcpy(str, envp[j], len);
  255. str += len;
  256. i++;
  257. }
  258. new_argve[i] = 0;
  259. }
  260. #ifdef ARCH_MM_MMU
  261. /* aux */
  262. aux = (struct process_aux *)(new_argve + i);
  263. aux->item[0].key = AT_EXECFN;
  264. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  265. i += AUX_ARRAY_ITEMS_NR * 2;
  266. new_argve[i] = 0;
  267. lwp->args = args;
  268. return aux;
  269. #else
  270. lwp->args = args;
  271. lwp->args_length = size;
  272. return (struct process_aux *)(new_argve + i);
  273. #endif /* ARCH_MM_MMU */
  274. }
  275. #endif
  276. #ifdef ARCH_MM_MMU
  277. #define check_off(voff, vlen) \
  278. do \
  279. { \
  280. if (voff > vlen) \
  281. { \
  282. result = -RT_ERROR; \
  283. goto _exit; \
  284. } \
  285. } while (0)
  286. #define check_read(vrlen, vrlen_want) \
  287. do \
  288. { \
  289. if (vrlen < vrlen_want) \
  290. { \
  291. result = -RT_ERROR; \
  292. goto _exit; \
  293. } \
  294. } while (0)
  295. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  296. {
  297. size_t read_block = 0;
  298. while (nmemb)
  299. {
  300. size_t count;
  301. count = read(fd, ptr, size * nmemb) / size;
  302. if (count < nmemb)
  303. {
  304. LOG_E("ERROR: file size error!");
  305. break;
  306. }
  307. ptr = (void *)((uint8_t *)ptr + (count * size));
  308. nmemb -= count;
  309. read_block += count;
  310. }
  311. return read_block;
  312. }
  313. typedef struct
  314. {
  315. Elf_Word st_name;
  316. Elf_Addr st_value;
  317. Elf_Word st_size;
  318. unsigned char st_info;
  319. unsigned char st_other;
  320. Elf_Half st_shndx;
  321. } Elf_sym;
  322. #ifdef RT_USING_USERSPACE
  323. void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  324. #else
  325. void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  326. #endif
  327. #ifdef RT_USING_USERSPACE
  328. struct map_range
  329. {
  330. void *start;
  331. size_t size;
  332. };
  333. static void expand_map_range(struct map_range *m, void *start, size_t size)
  334. {
  335. if (!m->start)
  336. {
  337. m->start = start;
  338. m->size = size;
  339. }
  340. else
  341. {
  342. void *end = (void *)((char*)start + size);
  343. void *mend = (void *)((char*)m->start + m->size);
  344. if (m->start > start)
  345. {
  346. m->start = start;
  347. }
  348. if (mend < end)
  349. {
  350. mend = end;
  351. }
  352. m->size = (char *)mend - (char *)m->start;
  353. }
  354. }
  355. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  356. {
  357. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  358. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  359. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  360. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  361. if (m1->size)
  362. {
  363. if (m1_start < (void *)USER_LOAD_VADDR)
  364. {
  365. return -1;
  366. }
  367. if (m1_start > (void *)USER_STACK_VSTART)
  368. {
  369. return -1;
  370. }
  371. if (m1_end < (void *)USER_LOAD_VADDR)
  372. {
  373. return -1;
  374. }
  375. if (m1_end > (void *)USER_STACK_VSTART)
  376. {
  377. return -1;
  378. }
  379. }
  380. if (m2->size)
  381. {
  382. if (m2_start < (void *)USER_LOAD_VADDR)
  383. {
  384. return -1;
  385. }
  386. if (m2_start > (void *)USER_STACK_VSTART)
  387. {
  388. return -1;
  389. }
  390. if (m2_end < (void *)USER_LOAD_VADDR)
  391. {
  392. return -1;
  393. }
  394. if (m2_end > (void *)USER_STACK_VSTART)
  395. {
  396. return -1;
  397. }
  398. }
  399. if ((m1->size != 0) && (m2->size != 0))
  400. {
  401. if (m1_start < m2_start)
  402. {
  403. if (m1_end > m2_start)
  404. {
  405. return -1;
  406. }
  407. }
  408. else /* m2_start <= m1_start */
  409. {
  410. if (m2_end > m1_start)
  411. {
  412. return -1;
  413. }
  414. }
  415. }
  416. return 0;
  417. }
  418. #endif
  419. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  420. {
  421. uint32_t i;
  422. uint32_t off = 0;
  423. size_t load_off = 0;
  424. char *p_section_str = 0;
  425. Elf_sym *dynsym = 0;
  426. Elf_Ehdr eheader;
  427. Elf_Phdr pheader;
  428. Elf_Shdr sheader;
  429. int result = RT_EOK;
  430. uint32_t magic;
  431. size_t read_len;
  432. void *got_start = 0;
  433. size_t got_size = 0;
  434. void *rel_dyn_start = 0;
  435. size_t rel_dyn_size = 0;
  436. size_t dynsym_off = 0;
  437. size_t dynsym_size = 0;
  438. #ifdef RT_USING_USERSPACE
  439. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  440. void *pa, *va;
  441. void *va_self;
  442. rt_mmu_info *m_info = &lwp->mmu_info;
  443. #endif
  444. if (len < sizeof eheader)
  445. {
  446. return -RT_ERROR;
  447. }
  448. lseek(fd, 0, SEEK_SET);
  449. read_len = load_fread(&magic, 1, sizeof magic, fd);
  450. check_read(read_len, sizeof magic);
  451. if (memcmp(elf_magic, &magic, 4) != 0)
  452. {
  453. return -RT_ERROR;
  454. }
  455. lseek(fd, off, SEEK_SET);
  456. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  457. check_read(read_len, sizeof eheader);
  458. #ifndef ARCH_CPU_64BIT
  459. if (eheader.e_ident[4] != 1)
  460. { /* not 32bit */
  461. return -RT_ERROR;
  462. }
  463. #else
  464. if (eheader.e_ident[4] != 2)
  465. { /* not 64bit */
  466. return -RT_ERROR;
  467. }
  468. #endif
  469. if (eheader.e_ident[6] != 1)
  470. { /* ver not 1 */
  471. return -RT_ERROR;
  472. }
  473. if ((eheader.e_type != ET_DYN)
  474. #ifdef RT_USING_USERSPACE
  475. && (eheader.e_type != ET_EXEC)
  476. #endif
  477. )
  478. {
  479. /* not pie or exec elf */
  480. return -RT_ERROR;
  481. }
  482. #ifdef RT_USING_USERSPACE
  483. {
  484. off = eheader.e_phoff;
  485. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  486. {
  487. check_off(off, len);
  488. lseek(fd, off, SEEK_SET);
  489. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  490. check_read(read_len, sizeof pheader);
  491. if (pheader.p_type == PT_DYNAMIC)
  492. {
  493. /* load ld.so */
  494. return 1; /* 1 means dynamic */
  495. }
  496. }
  497. }
  498. #endif
  499. if (eheader.e_entry != 0)
  500. {
  501. if ((eheader.e_entry != USER_LOAD_VADDR)
  502. && (eheader.e_entry != LDSO_LOAD_VADDR))
  503. {
  504. /* the entry is invalidate */
  505. return -RT_ERROR;
  506. }
  507. }
  508. { /* load aux */
  509. uint8_t *process_header;
  510. size_t process_header_size;
  511. off = eheader.e_phoff;
  512. process_header_size = eheader.e_phnum * sizeof pheader;
  513. #ifdef RT_USING_USERSPACE
  514. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  515. {
  516. return -RT_ERROR;
  517. }
  518. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  519. if (!va)
  520. {
  521. return -RT_ERROR;
  522. }
  523. pa = rt_hw_mmu_v2p(m_info, va);
  524. process_header = (uint8_t *)pa - PV_OFFSET;
  525. #else
  526. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  527. if (!process_header)
  528. {
  529. return -RT_ERROR;
  530. }
  531. #endif
  532. check_off(off, len);
  533. lseek(fd, off, SEEK_SET);
  534. read_len = load_fread(process_header, 1, process_header_size, fd);
  535. check_read(read_len, process_header_size);
  536. #ifdef RT_USING_USERSPACE
  537. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  538. #endif
  539. aux->item[1].key = AT_PAGESZ;
  540. #ifdef RT_USING_USERSPACE
  541. aux->item[1].value = ARCH_PAGE_SIZE;
  542. #else
  543. aux->item[1].value = RT_MM_PAGE_SIZE;
  544. #endif
  545. aux->item[2].key = AT_RANDOM;
  546. {
  547. uint32_t random_value = rt_tick_get();
  548. uint8_t *random;
  549. #ifdef RT_USING_USERSPACE
  550. uint8_t *krandom;
  551. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  552. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  553. krandom = (uint8_t *)krandom - PV_OFFSET;
  554. rt_memcpy(krandom, &random_value, sizeof random_value);
  555. #else
  556. random = (uint8_t *)(process_header + process_header_size);
  557. rt_memcpy(random, &random_value, sizeof random_value);
  558. #endif
  559. aux->item[2].value = (size_t)random;
  560. }
  561. aux->item[3].key = AT_PHDR;
  562. #ifdef RT_USING_USERSPACE
  563. aux->item[3].value = (size_t)va;
  564. #else
  565. aux->item[3].value = (size_t)process_header;
  566. #endif
  567. aux->item[4].key = AT_PHNUM;
  568. aux->item[4].value = eheader.e_phnum;
  569. aux->item[5].key = AT_PHENT;
  570. aux->item[5].value = sizeof pheader;
  571. #ifdef RT_USING_USERSPACE
  572. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  573. #endif
  574. }
  575. if (load_addr)
  576. {
  577. load_off = (size_t)load_addr;
  578. }
  579. #ifdef RT_USING_USERSPACE
  580. else
  581. {
  582. /* map user */
  583. off = eheader.e_shoff;
  584. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  585. {
  586. check_off(off, len);
  587. lseek(fd, off, SEEK_SET);
  588. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  589. check_read(read_len, sizeof sheader);
  590. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  591. {
  592. continue;
  593. }
  594. switch (sheader.sh_type)
  595. {
  596. case SHT_PROGBITS:
  597. if ((sheader.sh_flags & SHF_WRITE) == 0)
  598. {
  599. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  600. }
  601. else
  602. {
  603. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  604. }
  605. break;
  606. case SHT_NOBITS:
  607. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  608. break;
  609. default:
  610. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  611. break;
  612. }
  613. }
  614. if (user_area[0].size == 0)
  615. {
  616. /* no code */
  617. result = -RT_ERROR;
  618. goto _exit;
  619. }
  620. if (user_area[0].start == NULL)
  621. {
  622. /* DYN */
  623. load_off = USER_LOAD_VADDR;
  624. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  625. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  626. }
  627. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  628. {
  629. result = -RT_ERROR;
  630. goto _exit;
  631. }
  632. /* text and data */
  633. for (i = 0; i < 2; i++)
  634. {
  635. if (user_area[i].size != 0)
  636. {
  637. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  638. if (!va || (va != user_area[i].start))
  639. {
  640. result = -RT_ERROR;
  641. goto _exit;
  642. }
  643. }
  644. }
  645. lwp->text_size = user_area[0].size;
  646. }
  647. #else
  648. else
  649. {
  650. size_t start = -1UL;
  651. size_t end = 0UL;
  652. size_t total_size;
  653. off = eheader.e_shoff;
  654. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  655. {
  656. check_off(off, len);
  657. lseek(fd, off, SEEK_SET);
  658. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  659. check_read(read_len, sizeof sheader);
  660. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  661. {
  662. continue;
  663. }
  664. switch (sheader.sh_type)
  665. {
  666. case SHT_PROGBITS:
  667. case SHT_NOBITS:
  668. if (start > sheader.sh_addr)
  669. {
  670. start = sheader.sh_addr;
  671. }
  672. if (sheader.sh_addr + sheader.sh_size > end)
  673. {
  674. end = sheader.sh_addr + sheader.sh_size;
  675. }
  676. break;
  677. default:
  678. break;
  679. }
  680. }
  681. total_size = end - start;
  682. #ifdef RT_USING_CACHE
  683. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  684. #else
  685. load_off = (size_t)rt_malloc(total_size);
  686. #endif
  687. if (load_off == 0)
  688. {
  689. LOG_E("alloc text memory faild!");
  690. result = -RT_ENOMEM;
  691. goto _exit;
  692. }
  693. else
  694. {
  695. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  696. }
  697. lwp->load_off = load_off; /* for free */
  698. lwp->text_size = total_size;
  699. }
  700. #endif
  701. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  702. off = eheader.e_phoff;
  703. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  704. {
  705. check_off(off, len);
  706. lseek(fd, off, SEEK_SET);
  707. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  708. check_read(read_len, sizeof pheader);
  709. if (pheader.p_type == PT_LOAD)
  710. {
  711. if (pheader.p_filesz > pheader.p_memsz)
  712. {
  713. return -RT_ERROR;
  714. }
  715. check_off(pheader.p_offset, len);
  716. lseek(fd, pheader.p_offset, SEEK_SET);
  717. #ifdef RT_USING_USERSPACE
  718. {
  719. uint32_t size = pheader.p_filesz;
  720. size_t tmp_len = 0;
  721. va = (void *)(pheader.p_vaddr + load_addr);
  722. read_len = 0;
  723. while (size)
  724. {
  725. pa = rt_hw_mmu_v2p(m_info, va);
  726. va_self = (void *)((char *)pa - PV_OFFSET);
  727. LOG_D("va_self = %p pa = %p", va_self, pa);
  728. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  729. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  730. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  731. read_len += tmp_len;
  732. size -= tmp_len;
  733. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  734. }
  735. }
  736. #else
  737. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  738. #endif
  739. check_read(read_len, pheader.p_filesz);
  740. if (pheader.p_filesz < pheader.p_memsz)
  741. {
  742. #ifdef RT_USING_USERSPACE
  743. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  744. uint32_t size_s;
  745. uint32_t off;
  746. off = pheader.p_filesz & ARCH_PAGE_MASK;
  747. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  748. while (size)
  749. {
  750. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  751. pa = rt_hw_mmu_v2p(m_info, va);
  752. va_self = (void *)((char *)pa - PV_OFFSET);
  753. memset((void *)((char *)va_self + off), 0, size_s);
  754. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  755. off = 0;
  756. size -= size_s;
  757. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  758. }
  759. #else
  760. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  761. #endif
  762. }
  763. }
  764. }
  765. /* relocate */
  766. if (eheader.e_type == ET_DYN)
  767. {
  768. /* section info */
  769. off = eheader.e_shoff;
  770. /* find section string table */
  771. check_off(off, len);
  772. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  773. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  774. check_read(read_len, sizeof sheader);
  775. p_section_str = (char *)rt_malloc(sheader.sh_size);
  776. if (!p_section_str)
  777. {
  778. LOG_E("out of memory!");
  779. result = -ENOMEM;
  780. goto _exit;
  781. }
  782. check_off(sheader.sh_offset, len);
  783. lseek(fd, sheader.sh_offset, SEEK_SET);
  784. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  785. check_read(read_len, sheader.sh_size);
  786. check_off(off, len);
  787. lseek(fd, off, SEEK_SET);
  788. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  789. {
  790. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  791. check_read(read_len, sizeof sheader);
  792. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  793. {
  794. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  795. got_size = (size_t)sheader.sh_size;
  796. }
  797. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  798. {
  799. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  800. rel_dyn_size = (size_t)sheader.sh_size;
  801. }
  802. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  803. {
  804. dynsym_off = (size_t)sheader.sh_offset;
  805. dynsym_size = (size_t)sheader.sh_size;
  806. }
  807. }
  808. /* reloc */
  809. if (dynsym_size)
  810. {
  811. dynsym = rt_malloc(dynsym_size);
  812. if (!dynsym)
  813. {
  814. LOG_E("ERROR: Malloc error!");
  815. result = -ENOMEM;
  816. goto _exit;
  817. }
  818. check_off(dynsym_off, len);
  819. lseek(fd, dynsym_off, SEEK_SET);
  820. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  821. check_read(read_len, dynsym_size);
  822. }
  823. #ifdef RT_USING_USERSPACE
  824. lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  825. #else
  826. lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  827. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  828. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  829. #endif
  830. }
  831. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  832. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  833. _exit:
  834. if (dynsym)
  835. {
  836. rt_free(dynsym);
  837. }
  838. if (p_section_str)
  839. {
  840. rt_free(p_section_str);
  841. }
  842. if (result != RT_EOK)
  843. {
  844. LOG_E("lwp load faild, %d", result);
  845. }
  846. return result;
  847. }
  848. #endif /* ARCH_MM_MMU */
  849. RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  850. {
  851. uint8_t *ptr;
  852. int ret = -1;
  853. int len;
  854. int fd = -1;
  855. /* check file name */
  856. RT_ASSERT(filename != RT_NULL);
  857. /* check lwp control block */
  858. RT_ASSERT(lwp != RT_NULL);
  859. /* copy file name to process name */
  860. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  861. if (load_addr != RT_NULL)
  862. {
  863. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  864. ptr = load_addr;
  865. }
  866. else
  867. {
  868. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  869. ptr = RT_NULL;
  870. }
  871. fd = open(filename, O_BINARY | O_RDONLY, 0);
  872. if (fd < 0)
  873. {
  874. LOG_E("ERROR: Can't open elf file %s!", filename);
  875. goto out;
  876. }
  877. len = lseek(fd, 0, SEEK_END);
  878. if (len < 0)
  879. {
  880. LOG_E("ERROR: File %s size error!", filename);
  881. goto out;
  882. }
  883. lseek(fd, 0, SEEK_SET);
  884. ret = load_elf(fd, len, lwp, ptr, aux);
  885. if ((ret != RT_EOK) && (ret != 1))
  886. {
  887. LOG_E("lwp load ret = %d", ret);
  888. }
  889. out:
  890. if (fd > 0)
  891. {
  892. close(fd);
  893. }
  894. return ret;
  895. }
  896. void lwp_cleanup(struct rt_thread *tid)
  897. {
  898. rt_base_t level;
  899. struct rt_lwp *lwp;
  900. if (tid == NULL)
  901. {
  902. return;
  903. }
  904. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  905. level = rt_hw_interrupt_disable();
  906. lwp = (struct rt_lwp *)tid->lwp;
  907. lwp_tid_put(tid->tid);
  908. rt_list_remove(&tid->sibling);
  909. rt_hw_interrupt_enable(level);
  910. lwp_ref_dec(lwp);
  911. return;
  912. }
  913. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  914. {
  915. struct dfs_fd *d;
  916. struct dfs_fdtable *lwp_fdt;
  917. lwp_fdt = &lwp->fdt;
  918. /* init 4 fds */
  919. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  920. if (lwp_fdt->fds)
  921. {
  922. lwp_fdt->maxfd = 4;
  923. d = fd_get(0);
  924. fd_associate(lwp_fdt, 0, d);
  925. d = fd_get(1);
  926. fd_associate(lwp_fdt, 1, d);
  927. d = fd_get(2);
  928. fd_associate(lwp_fdt, 2, d);
  929. }
  930. return;
  931. }
  932. static void lwp_thread_entry(void *parameter)
  933. {
  934. rt_thread_t tid;
  935. struct rt_lwp *lwp;
  936. tid = rt_thread_self();
  937. lwp = (struct rt_lwp *)tid->lwp;
  938. tid->cleanup = lwp_cleanup;
  939. tid->user_stack = RT_NULL;
  940. if (lwp->debug)
  941. {
  942. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  943. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  944. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  945. icache_invalid_all();
  946. }
  947. #ifdef ARCH_MM_MMU
  948. lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  949. #else
  950. lwp_user_entry(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  951. #endif /* ARCH_MM_MMU */
  952. }
  953. struct rt_lwp *lwp_self(void)
  954. {
  955. rt_thread_t tid;
  956. tid = rt_thread_self();
  957. if (tid)
  958. {
  959. return (struct rt_lwp *)tid->lwp;
  960. }
  961. return RT_NULL;
  962. }
  963. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  964. {
  965. int result;
  966. rt_base_t level;
  967. struct rt_lwp *lwp;
  968. char *thread_name;
  969. char *argv_last = argv[argc - 1];
  970. int bg = 0;
  971. struct process_aux *aux;
  972. int tid = 0;
  973. if (filename == RT_NULL)
  974. {
  975. return -RT_ERROR;
  976. }
  977. lwp = lwp_new();
  978. if (lwp == RT_NULL)
  979. {
  980. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  981. return -RT_ENOMEM;
  982. }
  983. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  984. if ((tid = lwp_tid_get()) == 0)
  985. {
  986. lwp_ref_dec(lwp);
  987. return -ENOMEM;
  988. }
  989. #ifdef RT_USING_USERSPACE
  990. if (lwp_user_space_init(lwp) != 0)
  991. {
  992. lwp_tid_put(tid);
  993. lwp_ref_dec(lwp);
  994. return -ENOMEM;
  995. }
  996. #endif
  997. if (argv_last[0] == '&' && argv_last[1] == '\0')
  998. {
  999. argc--;
  1000. bg = 1;
  1001. }
  1002. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1003. {
  1004. lwp_tid_put(tid);
  1005. lwp_ref_dec(lwp);
  1006. return -ENOMEM;
  1007. }
  1008. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1009. #ifdef ARCH_MM_MMU
  1010. if (result == 1)
  1011. {
  1012. /* dynmaic */
  1013. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1014. result = load_ldso(lwp, filename, argv, envp);
  1015. }
  1016. #endif /* ARCH_MM_MMU */
  1017. if (result == RT_EOK)
  1018. {
  1019. rt_thread_t thread = RT_NULL;
  1020. rt_uint32_t priority = 25, tick = 200;
  1021. lwp_copy_stdio_fdt(lwp);
  1022. /* obtain the base name */
  1023. thread_name = strrchr(filename, '/');
  1024. thread_name = thread_name ? thread_name + 1 : filename;
  1025. #ifndef ARCH_MM_MMU
  1026. struct lwp_app_head *app_head = lwp->text_entry;
  1027. if (app_head->priority)
  1028. {
  1029. priority = app_head->priority;
  1030. }
  1031. if (app_head->tick)
  1032. {
  1033. tick = app_head->tick;
  1034. }
  1035. #endif /* not defined ARCH_MM_MMU */
  1036. thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
  1037. LWP_TASK_STACK_SIZE, priority, tick);
  1038. if (thread != RT_NULL)
  1039. {
  1040. struct rt_lwp *self_lwp;
  1041. thread->tid = tid;
  1042. lwp_tid_set_thread(tid, thread);
  1043. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_uint32_t)thread->stack_addr,
  1044. (rt_uint32_t)thread->stack_addr + thread->stack_size);
  1045. level = rt_hw_interrupt_disable();
  1046. self_lwp = lwp_self();
  1047. if (self_lwp)
  1048. {
  1049. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1050. lwp->__pgrp = tid;
  1051. lwp->session = self_lwp->session;
  1052. /* lwp add to children link */
  1053. lwp->sibling = self_lwp->first_child;
  1054. self_lwp->first_child = lwp;
  1055. lwp->parent = self_lwp;
  1056. }
  1057. else
  1058. {
  1059. //lwp->tgroup_leader = &thread; //add thread group leader for lwp
  1060. lwp->__pgrp = tid;
  1061. }
  1062. if (!bg)
  1063. {
  1064. if (lwp->session == -1)
  1065. {
  1066. struct tty_struct *tty = RT_NULL;
  1067. tty = (struct tty_struct *)console_tty_get();
  1068. lwp->tty = tty;
  1069. lwp->tty->pgrp = lwp->__pgrp;
  1070. lwp->tty->session = lwp->session;
  1071. lwp->tty->foreground = lwp;
  1072. tcgetattr(1, &stdin_termios);
  1073. old_stdin_termios = stdin_termios;
  1074. stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
  1075. tcsetattr(1, 0, &stdin_termios);
  1076. }
  1077. else
  1078. {
  1079. if (self_lwp != RT_NULL)
  1080. {
  1081. lwp->tty = self_lwp->tty;
  1082. lwp->tty->pgrp = lwp->__pgrp;
  1083. lwp->tty->session = lwp->session;
  1084. lwp->tty->foreground = lwp;
  1085. }
  1086. else
  1087. {
  1088. lwp->tty = RT_NULL;
  1089. }
  1090. }
  1091. }
  1092. thread->lwp = lwp;
  1093. #ifndef ARCH_MM_MMU
  1094. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1095. thread->user_stack = app_head->stack_offset ?
  1096. (void *)(app_head->stack_offset -
  1097. app_head->data_offset +
  1098. (uint32_t)lwp->data_entry) : RT_NULL;
  1099. thread->user_stack_size = app_head->stack_size;
  1100. /* init data area */
  1101. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1102. /* init user stack */
  1103. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1104. #endif /* not defined ARCH_MM_MMU */
  1105. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1106. if (debug && rt_dbg_ops)
  1107. {
  1108. lwp->debug = debug;
  1109. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1110. }
  1111. rt_hw_interrupt_enable(level);
  1112. rt_thread_startup(thread);
  1113. return lwp_to_pid(lwp);
  1114. }
  1115. }
  1116. lwp_tid_put(tid);
  1117. lwp_ref_dec(lwp);
  1118. return -RT_ERROR;
  1119. }
  1120. #ifdef RT_USING_MUSL
  1121. extern char **__environ;
  1122. #else
  1123. char __environ = 0;
  1124. #endif
  1125. pid_t exec(char *filename, int debug, int argc, char **argv)
  1126. {
  1127. return lwp_execve(filename, debug, argc, argv, __environ);
  1128. }
  1129. #ifdef ARCH_MM_MMU
  1130. void lwp_user_setting_save(rt_thread_t thread)
  1131. {
  1132. if (thread)
  1133. {
  1134. thread->thread_idr = rt_cpu_get_thread_idr();
  1135. }
  1136. }
  1137. void lwp_user_setting_restore(rt_thread_t thread)
  1138. {
  1139. if (!thread)
  1140. {
  1141. return;
  1142. }
  1143. rt_cpu_set_thread_idr(thread->thread_idr);
  1144. if (rt_dbg_ops)
  1145. {
  1146. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1147. if (l != 0)
  1148. {
  1149. set_process_id((size_t)l->pid);
  1150. }
  1151. else
  1152. {
  1153. set_process_id(0);
  1154. }
  1155. if (l && l->debug)
  1156. {
  1157. uint32_t step_type = 0;
  1158. step_type = dbg_step_type();
  1159. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1160. {
  1161. dbg_activate_step();
  1162. }
  1163. else
  1164. {
  1165. dbg_deactivate_step();
  1166. }
  1167. }
  1168. }
  1169. }
  1170. #endif /* ARCH_MM_MMU */