lwp.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #include <dfs_posix.h>
  16. #include <lwp_elf.h>
  17. #include <lwp_console.h>
  18. #ifndef RT_USING_DFS
  19. #error "lwp need file system(RT_USING_DFS)"
  20. #endif
  21. #include "lwp.h"
  22. #include "lwp_arch.h"
  23. #define DBG_TAG "LWP"
  24. #define DBG_LVL DBG_WARNING
  25. #include <rtdbg.h>
  26. #ifdef RT_USING_USERSPACE
  27. #ifdef RT_USING_GDBSERVER
  28. #include <hw_breakpoint.h>
  29. #include <lwp_gdbserver.h>
  30. #endif
  31. #include <lwp_mm_area.h>
  32. #include <lwp_user_mm.h>
  33. #endif
  34. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  35. #ifdef DFS_USING_WORKDIR
  36. extern char working_directory[];
  37. #endif
  38. extern void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack);
  39. extern int libc_stdio_get_console(void);
  40. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  41. void lwp_setcwd(char *buf)
  42. {
  43. if(strlen(buf) >= DFS_PATH_MAX)
  44. {
  45. rt_kprintf("buf too long!\n");
  46. return ;
  47. }
  48. #ifdef RT_USING_LWP
  49. struct rt_lwp *lwp;
  50. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  51. if (lwp)
  52. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
  53. else
  54. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  55. #else
  56. #ifdef DFS_USING_WORKDIR
  57. rt_strncpy(working_directory, buf, DFS_PATH_MAX);
  58. #endif
  59. #endif
  60. return ;
  61. }
  62. char *lwp_getcwd(void)
  63. {
  64. char *dir_buf = RT_NULL;
  65. #ifdef RT_USING_LWP
  66. struct rt_lwp *lwp;
  67. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  68. if (lwp)
  69. {
  70. if(lwp->working_directory[0] != '/')
  71. {
  72. dir_buf = &working_directory[0];
  73. }
  74. else
  75. {
  76. dir_buf = &lwp->working_directory[0];
  77. }
  78. }
  79. else
  80. dir_buf = &working_directory[0];
  81. #else
  82. #ifdef DFS_USING_WORKDIR
  83. dir_buf = &working_directory[0];
  84. #endif
  85. #endif
  86. return dir_buf;
  87. }
  88. /**
  89. * RT-Thread light-weight process
  90. */
  91. void lwp_set_kernel_sp(uint32_t *sp)
  92. {
  93. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  94. }
  95. uint32_t *lwp_get_kernel_sp(void)
  96. {
  97. #ifdef RT_USING_USERSPACE
  98. return (uint32_t *)rt_thread_self()->sp;
  99. #else
  100. return (uint32_t *)rt_thread_self()->kernel_sp;
  101. #endif
  102. }
  103. #ifdef RT_USING_USERSPACE
  104. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  105. {
  106. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  107. int *args;
  108. char *str;
  109. char *str_k;
  110. char **new_argve;
  111. int i;
  112. int len;
  113. size_t *args_k;
  114. struct process_aux *aux;
  115. for (i = 0; i < argc; i++)
  116. {
  117. size += (rt_strlen(argv[i]) + 1);
  118. }
  119. size += (sizeof(size_t) * argc);
  120. i = 0;
  121. if (envp)
  122. {
  123. while (envp[i] != 0)
  124. {
  125. size += (rt_strlen(envp[i]) + 1);
  126. size += sizeof(size_t);
  127. i++;
  128. }
  129. }
  130. /* for aux */
  131. size += sizeof(struct process_aux);
  132. if (size > ARCH_PAGE_SIZE)
  133. {
  134. return RT_NULL;
  135. }
  136. /* args = (int *)lwp_map_user(lwp, 0, size); */
  137. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  138. if (args == RT_NULL)
  139. {
  140. return RT_NULL;
  141. }
  142. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  143. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  144. /* argc, argv[], 0, envp[], 0 , aux[] */
  145. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  146. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  147. new_argve = (char **)&args_k[1];
  148. args_k[0] = argc;
  149. for (i = 0; i < argc; i++)
  150. {
  151. len = rt_strlen(argv[i]) + 1;
  152. new_argve[i] = str;
  153. rt_memcpy(str_k, argv[i], len);
  154. str += len;
  155. str_k += len;
  156. }
  157. new_argve[i] = 0;
  158. i++;
  159. new_argve[i] = 0;
  160. if (envp)
  161. {
  162. int j;
  163. for (j = 0; envp[j] != 0; j++)
  164. {
  165. len = rt_strlen(envp[j]) + 1;
  166. new_argve[i] = str;
  167. rt_memcpy(str_k, envp[j], len);
  168. str += len;
  169. str_k += len;
  170. i++;
  171. }
  172. new_argve[i] = 0;
  173. }
  174. i++;
  175. /* aux */
  176. aux = (struct process_aux *)(new_argve + i);
  177. aux->item[0].key = AT_EXECFN;
  178. aux->item[0].value = (size_t)(size_t)new_argve[0];
  179. i += AUX_ARRAY_ITEMS_NR * 2;
  180. new_argve[i] = 0;
  181. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  182. lwp->args = args;
  183. return aux;
  184. }
  185. #else
  186. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  187. {
  188. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  189. int *args;
  190. char *str;
  191. char **new_argve;
  192. int i;
  193. int len;
  194. struct process_aux *aux;
  195. for (i = 0; i < argc; i++)
  196. {
  197. size += (rt_strlen(argv[i]) + 1);
  198. }
  199. size += (sizeof(int) * argc);
  200. i = 0;
  201. if (envp)
  202. {
  203. while (envp[i] != 0)
  204. {
  205. size += (rt_strlen(envp[i]) + 1);
  206. size += sizeof(int);
  207. i++;
  208. }
  209. }
  210. /* for aux */
  211. size += sizeof(struct process_aux);
  212. args = (int *)rt_malloc(size);
  213. if (args == RT_NULL)
  214. {
  215. return RT_NULL;
  216. }
  217. /* argc, argv[], 0, envp[], 0 */
  218. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  219. new_argve = (char **)&args[1];
  220. args[0] = argc;
  221. for (i = 0; i < argc; i++)
  222. {
  223. len = rt_strlen(argv[i]) + 1;
  224. new_argve[i] = str;
  225. rt_memcpy(str, argv[i], len);
  226. str += len;
  227. }
  228. new_argve[i] = 0;
  229. i++;
  230. new_argve[i] = 0;
  231. if (envp)
  232. {
  233. int j;
  234. for (j = 0; envp[j] != 0; j++)
  235. {
  236. len = rt_strlen(envp[j]) + 1;
  237. new_argve[i] = str;
  238. rt_memcpy(str, envp[j], len);
  239. str += len;
  240. i++;
  241. }
  242. new_argve[i] = 0;
  243. }
  244. /* aux */
  245. aux = (struct process_aux *)(new_argve + i);
  246. aux->item[0].key = AT_EXECFN;
  247. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  248. i += AUX_ARRAY_ITEMS_NR * 2;
  249. new_argve[i] = 0;
  250. lwp->args = args;
  251. return aux;
  252. }
  253. #endif
  254. #define check_off(voff, vlen) \
  255. do \
  256. { \
  257. if (voff > vlen) \
  258. { \
  259. result = -RT_ERROR; \
  260. goto _exit; \
  261. } \
  262. } while (0)
  263. #define check_read(vrlen, vrlen_want) \
  264. do \
  265. { \
  266. if (vrlen < vrlen_want) \
  267. { \
  268. result = -RT_ERROR; \
  269. goto _exit; \
  270. } \
  271. } while (0)
  272. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  273. {
  274. size_t read_block = 0;
  275. while (nmemb)
  276. {
  277. size_t count;
  278. count = read(fd, ptr, size * nmemb) / size;
  279. if (count < nmemb)
  280. {
  281. LOG_E("ERROR: file size error!");
  282. break;
  283. }
  284. ptr = (void *)((uint8_t *)ptr + (count * size));
  285. nmemb -= count;
  286. read_block += count;
  287. }
  288. return read_block;
  289. }
  290. typedef struct
  291. {
  292. Elf_Word st_name;
  293. Elf_Addr st_value;
  294. Elf_Word st_size;
  295. unsigned char st_info;
  296. unsigned char st_other;
  297. Elf_Half st_shndx;
  298. } Elf_sym;
  299. #ifdef RT_USING_USERSPACE
  300. void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  301. #else
  302. void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  303. #endif
  304. #ifdef RT_USING_USERSPACE
  305. struct map_range
  306. {
  307. void *start;
  308. size_t size;
  309. };
  310. static void expand_map_range(struct map_range *m, void *start, size_t size)
  311. {
  312. if (!m->start)
  313. {
  314. m->start = start;
  315. m->size = size;
  316. }
  317. else
  318. {
  319. void *end = (void *)((char*)start + size);
  320. void *mend = (void *)((char*)m->start + m->size);
  321. if (m->start > start)
  322. {
  323. m->start = start;
  324. }
  325. if (mend < end)
  326. {
  327. mend = end;
  328. }
  329. m->size = (char *)mend - (char *)m->start;
  330. }
  331. }
  332. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  333. {
  334. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  335. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  336. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  337. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  338. if (m1->size)
  339. {
  340. if (m1_start < (void *)USER_LOAD_VADDR)
  341. {
  342. return -1;
  343. }
  344. if (m1_start > (void *)USER_STACK_VSTART)
  345. {
  346. return -1;
  347. }
  348. if (m1_end < (void *)USER_LOAD_VADDR)
  349. {
  350. return -1;
  351. }
  352. if (m1_end > (void *)USER_STACK_VSTART)
  353. {
  354. return -1;
  355. }
  356. }
  357. if (m2->size)
  358. {
  359. if (m2_start < (void *)USER_LOAD_VADDR)
  360. {
  361. return -1;
  362. }
  363. if (m2_start > (void *)USER_STACK_VSTART)
  364. {
  365. return -1;
  366. }
  367. if (m2_end < (void *)USER_LOAD_VADDR)
  368. {
  369. return -1;
  370. }
  371. if (m2_end > (void *)USER_STACK_VSTART)
  372. {
  373. return -1;
  374. }
  375. }
  376. if ((m1->size != 0) && (m2->size != 0))
  377. {
  378. if (m1_start < m2_start)
  379. {
  380. if (m1_end > m2_start)
  381. {
  382. return -1;
  383. }
  384. }
  385. else /* m2_start <= m1_start */
  386. {
  387. if (m2_end > m1_start)
  388. {
  389. return -1;
  390. }
  391. }
  392. }
  393. return 0;
  394. }
  395. #endif
  396. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  397. {
  398. uint32_t i;
  399. uint32_t off = 0;
  400. size_t load_off = 0;
  401. char *p_section_str = 0;
  402. Elf_sym *dynsym = 0;
  403. Elf_Ehdr eheader;
  404. Elf_Phdr pheader;
  405. Elf_Shdr sheader;
  406. int result = RT_EOK;
  407. uint32_t magic;
  408. size_t read_len;
  409. void *got_start = 0;
  410. size_t got_size = 0;
  411. void *rel_dyn_start = 0;
  412. size_t rel_dyn_size = 0;
  413. size_t dynsym_off = 0;
  414. size_t dynsym_size = 0;
  415. #ifdef RT_USING_USERSPACE
  416. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  417. void *pa, *va;
  418. void *va_self;
  419. #endif
  420. #ifdef RT_USING_USERSPACE
  421. rt_mmu_info *m_info = &lwp->mmu_info;
  422. #endif
  423. if (len < sizeof eheader)
  424. {
  425. return -RT_ERROR;
  426. }
  427. lseek(fd, 0, SEEK_SET);
  428. read_len = load_fread(&magic, 1, sizeof magic, fd);
  429. check_read(read_len, sizeof magic);
  430. if (memcmp(elf_magic, &magic, 4) != 0)
  431. {
  432. return -RT_ERROR;
  433. }
  434. lseek(fd, off, SEEK_SET);
  435. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  436. check_read(read_len, sizeof eheader);
  437. #ifndef ARCH_CPU_64BIT
  438. if (eheader.e_ident[4] != 1)
  439. { /* not 32bit */
  440. return -RT_ERROR;
  441. }
  442. #else
  443. if (eheader.e_ident[4] != 2)
  444. { /* not 64bit */
  445. return -RT_ERROR;
  446. }
  447. #endif
  448. if (eheader.e_ident[6] != 1)
  449. { /* ver not 1 */
  450. return -RT_ERROR;
  451. }
  452. if ((eheader.e_type != ET_DYN)
  453. #ifdef RT_USING_USERSPACE
  454. && (eheader.e_type != ET_EXEC)
  455. #endif
  456. )
  457. {
  458. /* not pie or exec elf */
  459. return -RT_ERROR;
  460. }
  461. #ifdef RT_USING_USERSPACE
  462. {
  463. off = eheader.e_phoff;
  464. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  465. {
  466. check_off(off, len);
  467. lseek(fd, off, SEEK_SET);
  468. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  469. check_read(read_len, sizeof pheader);
  470. if (pheader.p_type == PT_DYNAMIC)
  471. {
  472. /* load ld.so */
  473. return 1; /* 1 means dynamic */
  474. }
  475. }
  476. }
  477. #endif
  478. if (eheader.e_entry != 0)
  479. {
  480. if ((eheader.e_entry != USER_LOAD_VADDR)
  481. && (eheader.e_entry != LDSO_LOAD_VADDR))
  482. {
  483. /* the entry is invalidate */
  484. return -RT_ERROR;
  485. }
  486. }
  487. { /* load aux */
  488. uint8_t *process_header;
  489. size_t process_header_size;
  490. off = eheader.e_phoff;
  491. process_header_size = eheader.e_phnum * sizeof pheader;
  492. #ifdef RT_USING_USERSPACE
  493. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  494. {
  495. return -RT_ERROR;
  496. }
  497. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  498. if (!va)
  499. {
  500. return -RT_ERROR;
  501. }
  502. pa = rt_hw_mmu_v2p(m_info, va);
  503. process_header = (uint8_t *)pa - PV_OFFSET;
  504. #else
  505. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  506. if (!process_header)
  507. {
  508. return -RT_ERROR;
  509. }
  510. #endif
  511. check_off(off, len);
  512. lseek(fd, off, SEEK_SET);
  513. read_len = load_fread(process_header, 1, process_header_size, fd);
  514. check_read(read_len, process_header_size);
  515. #ifdef RT_USING_USERSPACE
  516. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  517. #endif
  518. aux->item[1].key = AT_PAGESZ;
  519. #ifdef RT_USING_USERSPACE
  520. aux->item[1].value = ARCH_PAGE_SIZE;
  521. #else
  522. aux->item[1].value = RT_MM_PAGE_SIZE;
  523. #endif
  524. aux->item[2].key = AT_RANDOM;
  525. {
  526. uint32_t random_value = rt_tick_get();
  527. uint8_t *random;
  528. #ifdef RT_USING_USERSPACE
  529. uint8_t *krandom;
  530. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  531. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  532. krandom = (uint8_t *)krandom - PV_OFFSET;
  533. rt_memcpy(krandom, &random_value, sizeof random_value);
  534. #else
  535. random = (uint8_t *)(process_header + process_header_size);
  536. rt_memcpy(random, &random_value, sizeof random_value);
  537. #endif
  538. aux->item[2].value = (size_t)random;
  539. }
  540. aux->item[3].key = AT_PHDR;
  541. #ifdef RT_USING_USERSPACE
  542. aux->item[3].value = (size_t)va;
  543. #else
  544. aux->item[3].value = (size_t)process_header;
  545. #endif
  546. aux->item[4].key = AT_PHNUM;
  547. aux->item[4].value = eheader.e_phnum;
  548. aux->item[5].key = AT_PHENT;
  549. aux->item[5].value = sizeof pheader;
  550. #ifdef RT_USING_USERSPACE
  551. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  552. #endif
  553. }
  554. if (load_addr)
  555. {
  556. load_off = (size_t)load_addr;
  557. }
  558. #ifdef RT_USING_USERSPACE
  559. else
  560. {
  561. /* map user */
  562. off = eheader.e_shoff;
  563. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  564. {
  565. check_off(off, len);
  566. lseek(fd, off, SEEK_SET);
  567. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  568. check_read(read_len, sizeof sheader);
  569. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  570. {
  571. continue;
  572. }
  573. switch (sheader.sh_type)
  574. {
  575. case SHT_PROGBITS:
  576. if ((sheader.sh_flags & SHF_WRITE) == 0)
  577. {
  578. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  579. }
  580. else
  581. {
  582. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  583. }
  584. break;
  585. case SHT_NOBITS:
  586. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  587. break;
  588. default:
  589. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  590. break;
  591. }
  592. }
  593. if (user_area[0].size == 0)
  594. {
  595. /* no code */
  596. result = -RT_ERROR;
  597. goto _exit;
  598. }
  599. if (user_area[0].start == NULL)
  600. {
  601. /* DYN */
  602. load_off = USER_LOAD_VADDR;
  603. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  604. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  605. }
  606. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  607. {
  608. result = -RT_ERROR;
  609. goto _exit;
  610. }
  611. /* text and data */
  612. for (i = 0; i < 2; i++)
  613. {
  614. if (user_area[i].size != 0)
  615. {
  616. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  617. if (!va || (va != user_area[i].start))
  618. {
  619. result = -RT_ERROR;
  620. goto _exit;
  621. }
  622. }
  623. }
  624. lwp->text_size = user_area[0].size;
  625. }
  626. #else
  627. else
  628. {
  629. size_t start = -1UL;
  630. size_t end = 0UL;
  631. size_t total_size;
  632. off = eheader.e_shoff;
  633. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  634. {
  635. check_off(off, len);
  636. lseek(fd, off, SEEK_SET);
  637. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  638. check_read(read_len, sizeof sheader);
  639. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  640. {
  641. continue;
  642. }
  643. switch (sheader.sh_type)
  644. {
  645. case SHT_PROGBITS:
  646. case SHT_NOBITS:
  647. if (start > sheader.sh_addr)
  648. {
  649. start = sheader.sh_addr;
  650. }
  651. if (sheader.sh_addr + sheader.sh_size > end)
  652. {
  653. end = sheader.sh_addr + sheader.sh_size;
  654. }
  655. break;
  656. default:
  657. break;
  658. }
  659. }
  660. total_size = end - start;
  661. #ifdef RT_USING_CACHE
  662. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  663. #else
  664. load_off = (size_t)rt_malloc(total_size);
  665. #endif
  666. if (load_off == 0)
  667. {
  668. LOG_E("alloc text memory faild!");
  669. result = -RT_ENOMEM;
  670. goto _exit;
  671. }
  672. else
  673. {
  674. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  675. }
  676. lwp->load_off = load_off; /* for free */
  677. lwp->text_size = total_size;
  678. }
  679. #endif
  680. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  681. off = eheader.e_phoff;
  682. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  683. {
  684. check_off(off, len);
  685. lseek(fd, off, SEEK_SET);
  686. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  687. check_read(read_len, sizeof pheader);
  688. if (pheader.p_type == PT_LOAD)
  689. {
  690. if (pheader.p_filesz > pheader.p_memsz)
  691. {
  692. return -RT_ERROR;
  693. }
  694. check_off(pheader.p_offset, len);
  695. lseek(fd, pheader.p_offset, SEEK_SET);
  696. #ifdef RT_USING_USERSPACE
  697. {
  698. uint32_t size = pheader.p_filesz;
  699. size_t tmp_len = 0;
  700. va = (void *)(pheader.p_vaddr + load_addr);
  701. read_len = 0;
  702. while (size)
  703. {
  704. pa = rt_hw_mmu_v2p(m_info, va);
  705. va_self = (void *)((char *)pa - PV_OFFSET);
  706. LOG_D("va_self = %p pa = %p", va_self, pa);
  707. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  708. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  709. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  710. read_len += tmp_len;
  711. size -= tmp_len;
  712. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  713. }
  714. }
  715. #else
  716. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  717. #endif
  718. check_read(read_len, pheader.p_filesz);
  719. if (pheader.p_filesz < pheader.p_memsz)
  720. {
  721. #ifdef RT_USING_USERSPACE
  722. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  723. uint32_t size_s;
  724. uint32_t off;
  725. off = pheader.p_filesz & ARCH_PAGE_MASK;
  726. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  727. while (size)
  728. {
  729. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  730. pa = rt_hw_mmu_v2p(m_info, va);
  731. va_self = (void *)((char *)pa - PV_OFFSET);
  732. memset((void *)((char *)va_self + off), 0, size_s);
  733. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  734. off = 0;
  735. size -= size_s;
  736. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  737. }
  738. #else
  739. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  740. #endif
  741. }
  742. }
  743. }
  744. /* relocate */
  745. if (eheader.e_type == ET_DYN)
  746. {
  747. /* section info */
  748. off = eheader.e_shoff;
  749. /* find section string table */
  750. check_off(off, len);
  751. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  752. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  753. check_read(read_len, sizeof sheader);
  754. p_section_str = (char *)rt_malloc(sheader.sh_size);
  755. if (!p_section_str)
  756. {
  757. LOG_E("out of memory!");
  758. result = -ENOMEM;
  759. goto _exit;
  760. }
  761. check_off(sheader.sh_offset, len);
  762. lseek(fd, sheader.sh_offset, SEEK_SET);
  763. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  764. check_read(read_len, sheader.sh_size);
  765. check_off(off, len);
  766. lseek(fd, off, SEEK_SET);
  767. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  768. {
  769. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  770. check_read(read_len, sizeof sheader);
  771. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  772. {
  773. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  774. got_size = (size_t)sheader.sh_size;
  775. }
  776. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  777. {
  778. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  779. rel_dyn_size = (size_t)sheader.sh_size;
  780. }
  781. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  782. {
  783. dynsym_off = (size_t)sheader.sh_offset;
  784. dynsym_size = (size_t)sheader.sh_size;
  785. }
  786. }
  787. /* reloc */
  788. if (dynsym_size)
  789. {
  790. dynsym = rt_malloc(dynsym_size);
  791. if (!dynsym)
  792. {
  793. LOG_E("ERROR: Malloc error!");
  794. result = -ENOMEM;
  795. goto _exit;
  796. }
  797. check_off(dynsym_off, len);
  798. lseek(fd, dynsym_off, SEEK_SET);
  799. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  800. check_read(read_len, dynsym_size);
  801. }
  802. #ifdef RT_USING_USERSPACE
  803. lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  804. #else
  805. lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  806. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  807. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  808. #endif
  809. }
  810. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  811. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  812. _exit:
  813. if (dynsym)
  814. {
  815. rt_free(dynsym);
  816. }
  817. if (p_section_str)
  818. {
  819. rt_free(p_section_str);
  820. }
  821. if (result != RT_EOK)
  822. {
  823. LOG_E("lwp load faild, %d", result);
  824. }
  825. return result;
  826. }
  827. int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux);
  828. RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  829. {
  830. uint8_t *ptr;
  831. int ret = -1;
  832. int len;
  833. int fd = -1;
  834. /* check file name */
  835. RT_ASSERT(filename != RT_NULL);
  836. /* check lwp control block */
  837. RT_ASSERT(lwp != RT_NULL);
  838. /* copy file name to process name */
  839. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  840. if (load_addr != RT_NULL)
  841. {
  842. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  843. ptr = load_addr;
  844. }
  845. else
  846. {
  847. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  848. ptr = RT_NULL;
  849. }
  850. fd = open(filename, O_BINARY | O_RDONLY, 0);
  851. if (fd < 0)
  852. {
  853. LOG_E("ERROR: Can't open elf file %s!", filename);
  854. goto out;
  855. }
  856. len = lseek(fd, 0, SEEK_END);
  857. if (len < 0)
  858. {
  859. LOG_E("ERROR: File %s size error!", filename);
  860. goto out;
  861. }
  862. lseek(fd, 0, SEEK_SET);
  863. ret = load_elf(fd, len, lwp, ptr, aux);
  864. if ((ret != RT_EOK) && (ret != 1))
  865. {
  866. LOG_E("lwp load ret = %d", ret);
  867. }
  868. out:
  869. if (fd > 0)
  870. {
  871. close(fd);
  872. }
  873. return ret;
  874. }
  875. void lwp_cleanup(struct rt_thread *tid)
  876. {
  877. rt_base_t level;
  878. struct rt_lwp *lwp;
  879. if (tid == NULL)
  880. {
  881. return;
  882. }
  883. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  884. #ifndef RT_USING_USERSPACE
  885. if (tid->user_stack != RT_NULL)
  886. {
  887. rt_free(tid->user_stack);
  888. }
  889. #endif
  890. level = rt_hw_interrupt_disable();
  891. lwp = (struct rt_lwp *)tid->lwp;
  892. lwp_tid_put(tid->tid);
  893. rt_list_remove(&tid->sibling);
  894. lwp_ref_dec(lwp);
  895. rt_hw_interrupt_enable(level);
  896. return;
  897. }
  898. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  899. {
  900. struct dfs_fd *d;
  901. struct dfs_fdtable *lwp_fdt;
  902. lwp_fdt = &lwp->fdt;
  903. /* init 4 fds */
  904. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  905. if (lwp_fdt->fds)
  906. {
  907. lwp_fdt->maxfd = 4;
  908. d = fd_get(0);
  909. fd_associate(lwp_fdt, 0, d);
  910. d = fd_get(1);
  911. fd_associate(lwp_fdt, 1, d);
  912. d = fd_get(2);
  913. fd_associate(lwp_fdt, 2, d);
  914. }
  915. return;
  916. }
  917. static void lwp_thread_entry(void *parameter)
  918. {
  919. rt_thread_t tid;
  920. struct rt_lwp *lwp;
  921. tid = rt_thread_self();
  922. lwp = (struct rt_lwp *)tid->lwp;
  923. tid->cleanup = lwp_cleanup;
  924. tid->user_stack = RT_NULL;
  925. #ifdef RT_USING_GDBSERVER
  926. if (lwp->debug)
  927. {
  928. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  929. *(uint32_t *)lwp->text_entry = INS_BREAK_CONNECT;
  930. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  931. icache_invalid_all();
  932. }
  933. #endif
  934. lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  935. }
  936. struct rt_lwp *lwp_self(void)
  937. {
  938. rt_thread_t tid;
  939. tid = rt_thread_self();
  940. if (tid)
  941. {
  942. return (struct rt_lwp *)tid->lwp;
  943. }
  944. return RT_NULL;
  945. }
  946. #ifdef RT_USING_GDBSERVER
  947. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  948. #else
  949. pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
  950. #endif
  951. {
  952. int result;
  953. rt_base_t level;
  954. struct rt_lwp *lwp;
  955. char *thread_name;
  956. char *argv_last = argv[argc - 1];
  957. int bg = 0;
  958. struct process_aux *aux;
  959. int tid = 0;
  960. if (filename == RT_NULL)
  961. {
  962. return -RT_ERROR;
  963. }
  964. lwp = lwp_new();
  965. if (lwp == RT_NULL)
  966. {
  967. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  968. return -RT_ENOMEM;
  969. }
  970. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  971. if ((tid = lwp_tid_get()) == 0)
  972. {
  973. lwp_ref_dec(lwp);
  974. return -ENOMEM;
  975. }
  976. #ifdef RT_USING_USERSPACE
  977. if (lwp_user_space_init(lwp) != 0)
  978. {
  979. lwp_tid_put(tid);
  980. lwp_ref_dec(lwp);
  981. return -ENOMEM;
  982. }
  983. #endif
  984. if (argv_last[0] == '&' && argv_last[1] == '\0')
  985. {
  986. argc--;
  987. bg = 1;
  988. }
  989. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  990. {
  991. lwp_tid_put(tid);
  992. lwp_ref_dec(lwp);
  993. return -ENOMEM;
  994. }
  995. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  996. if (result == 1)
  997. {
  998. /* dynmaic */
  999. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1000. result = load_ldso(lwp, filename, argv, envp);
  1001. }
  1002. if (result == RT_EOK)
  1003. {
  1004. rt_thread_t thread = RT_NULL;
  1005. lwp_copy_stdio_fdt(lwp);
  1006. /* obtain the base name */
  1007. thread_name = strrchr(filename, '/');
  1008. thread_name = thread_name ? thread_name + 1 : filename;
  1009. thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
  1010. LWP_TASK_STACK_SIZE, 25, 200);
  1011. if (thread != RT_NULL)
  1012. {
  1013. struct rt_lwp *self_lwp;
  1014. thread->tid = tid;
  1015. lwp_tid_set_thread(tid, thread);
  1016. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_uint32_t)thread->stack_addr,
  1017. (rt_uint32_t)thread->stack_addr + thread->stack_size);
  1018. level = rt_hw_interrupt_disable();
  1019. self_lwp = lwp_self();
  1020. if (self_lwp)
  1021. {
  1022. /* lwp add to children link */
  1023. lwp->sibling = self_lwp->first_child;
  1024. self_lwp->first_child = lwp;
  1025. lwp->parent = self_lwp;
  1026. }
  1027. thread->lwp = lwp;
  1028. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1029. #ifdef RT_USING_GDBSERVER
  1030. if (debug)
  1031. {
  1032. lwp->debug = debug;
  1033. }
  1034. #endif
  1035. if ((rt_console_get_foreground() == self_lwp) && !bg)
  1036. {
  1037. rt_console_set_foreground(lwp);
  1038. }
  1039. rt_hw_interrupt_enable(level);
  1040. rt_thread_startup(thread);
  1041. return lwp_to_pid(lwp);
  1042. }
  1043. }
  1044. lwp_tid_put(tid);
  1045. lwp_ref_dec(lwp);
  1046. return -RT_ERROR;
  1047. }
  1048. #ifdef RT_USING_GDBSERVER
  1049. pid_t exec(char *filename, int debug, int argc, char **argv)
  1050. {
  1051. return lwp_execve(filename, debug, argc, argv, 0);
  1052. }
  1053. #else
  1054. pid_t exec(char *filename, int argc, char **argv)
  1055. {
  1056. return lwp_execve(filename, argc, argv, 0);
  1057. }
  1058. #endif
  1059. void lwp_user_setting_save(rt_thread_t thread)
  1060. {
  1061. if (thread)
  1062. {
  1063. thread->thread_idr = rt_cpu_get_thread_idr();
  1064. }
  1065. }
  1066. void lwp_user_setting_restore(rt_thread_t thread)
  1067. {
  1068. if (!thread)
  1069. {
  1070. return;
  1071. }
  1072. rt_cpu_set_thread_idr(thread->thread_idr);
  1073. #ifdef RT_USING_GDBSERVER
  1074. {
  1075. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1076. set_process_id((uint32_t)(size_t)l);
  1077. if (l && l->debug)
  1078. {
  1079. uint32_t step_type = 0;
  1080. step_type = gdb_get_step_type();
  1081. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1082. {
  1083. arch_activate_step();
  1084. }
  1085. else
  1086. {
  1087. arch_deactivate_step();
  1088. }
  1089. }
  1090. }
  1091. #endif
  1092. }