lwp.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include <dfs_posix.h>
  15. #include <lwp_elf.h>
  16. #include <lwp_console.h>
  17. #ifndef RT_USING_DFS
  18. #error "lwp need file system(RT_USING_DFS)"
  19. #endif
  20. #include "lwp.h"
  21. #include "lwp_arch.h"
  22. #define DBG_TAG "LWP"
  23. #define DBG_LVL DBG_WARNING
  24. #include <rtdbg.h>
  25. #ifdef RT_USING_USERSPACE
  26. #ifdef RT_USING_GDBSERVER
  27. #include <hw_breakpoint.h>
  28. #include <lwp_gdbserver.h>
  29. #endif
  30. #include <lwp_mm_area.h>
  31. #include <lwp_user_mm.h>
  32. #endif
  33. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  34. extern void lwp_user_entry(void *args, const void *text, void *ustack, void *k_stack);
  35. extern int libc_stdio_get_console(void);
  36. int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
  37. /**
  38. * RT-Thread light-weight process
  39. */
  40. void lwp_set_kernel_sp(uint32_t *sp)
  41. {
  42. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  43. }
  44. uint32_t *lwp_get_kernel_sp(void)
  45. {
  46. #ifdef RT_USING_USERSPACE
  47. return (uint32_t *)rt_thread_self()->sp;
  48. #else
  49. return (uint32_t *)rt_thread_self()->kernel_sp;
  50. #endif
  51. }
  52. #ifdef RT_USING_USERSPACE
  53. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  54. {
  55. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  56. int *args;
  57. char *str;
  58. char *str_k;
  59. char **new_argve;
  60. int i;
  61. int len;
  62. size_t *args_k;
  63. struct process_aux *aux;
  64. for (i = 0; i < argc; i++)
  65. {
  66. size += (rt_strlen(argv[i]) + 1);
  67. }
  68. size += (sizeof(size_t) * argc);
  69. i = 0;
  70. if (envp)
  71. {
  72. while (envp[i] != 0)
  73. {
  74. size += (rt_strlen(envp[i]) + 1);
  75. size += sizeof(size_t);
  76. i++;
  77. }
  78. }
  79. /* for aux */
  80. size += sizeof(struct process_aux);
  81. if (size > ARCH_PAGE_SIZE)
  82. {
  83. return RT_NULL;
  84. }
  85. /* args = (int *)lwp_map_user(lwp, 0, size); */
  86. args = (int *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE), size, 0);
  87. if (args == RT_NULL)
  88. {
  89. return RT_NULL;
  90. }
  91. args_k = (size_t *)rt_hw_mmu_v2p(&lwp->mmu_info, args);
  92. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  93. /* argc, argv[], 0, envp[], 0 , aux[] */
  94. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  95. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  96. new_argve = (char **)&args_k[1];
  97. args_k[0] = argc;
  98. for (i = 0; i < argc; i++)
  99. {
  100. len = rt_strlen(argv[i]) + 1;
  101. new_argve[i] = str;
  102. rt_memcpy(str_k, argv[i], len);
  103. str += len;
  104. str_k += len;
  105. }
  106. new_argve[i] = 0;
  107. i++;
  108. new_argve[i] = 0;
  109. if (envp)
  110. {
  111. int j;
  112. for (j = 0; envp[j] != 0; j++)
  113. {
  114. len = rt_strlen(envp[j]) + 1;
  115. new_argve[i] = str;
  116. rt_memcpy(str_k, envp[j], len);
  117. str += len;
  118. str_k += len;
  119. i++;
  120. }
  121. new_argve[i] = 0;
  122. }
  123. i++;
  124. /* aux */
  125. aux = (struct process_aux *)(new_argve + i);
  126. aux->item[0].key = AT_EXECFN;
  127. aux->item[0].value = (size_t)(size_t)new_argve[0];
  128. i += AUX_ARRAY_ITEMS_NR * 2;
  129. new_argve[i] = 0;
  130. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  131. lwp->args = args;
  132. return aux;
  133. }
  134. #else
  135. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  136. {
  137. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  138. int *args;
  139. char *str;
  140. char **new_argve;
  141. int i;
  142. int len;
  143. struct process_aux *aux;
  144. for (i = 0; i < argc; i++)
  145. {
  146. size += (rt_strlen(argv[i]) + 1);
  147. }
  148. size += (sizeof(int) * argc);
  149. i = 0;
  150. if (envp)
  151. {
  152. while (envp[i] != 0)
  153. {
  154. size += (rt_strlen(envp[i]) + 1);
  155. size += sizeof(int);
  156. i++;
  157. }
  158. }
  159. /* for aux */
  160. size += sizeof(struct process_aux);
  161. args = (int *)rt_malloc(size);
  162. if (args == RT_NULL)
  163. {
  164. return RT_NULL;
  165. }
  166. /* argc, argv[], 0, envp[], 0 */
  167. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  168. new_argve = (char **)&args[1];
  169. args[0] = argc;
  170. for (i = 0; i < argc; i++)
  171. {
  172. len = rt_strlen(argv[i]) + 1;
  173. new_argve[i] = str;
  174. rt_memcpy(str, argv[i], len);
  175. str += len;
  176. }
  177. new_argve[i] = 0;
  178. i++;
  179. new_argve[i] = 0;
  180. if (envp)
  181. {
  182. int j;
  183. for (j = 0; envp[j] != 0; j++)
  184. {
  185. len = rt_strlen(envp[j]) + 1;
  186. new_argve[i] = str;
  187. rt_memcpy(str, envp[j], len);
  188. str += len;
  189. i++;
  190. }
  191. new_argve[i] = 0;
  192. }
  193. /* aux */
  194. aux = (struct process_aux *)(new_argve + i);
  195. aux->item[0].key = AT_EXECFN;
  196. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  197. i += AUX_ARRAY_ITEMS_NR * 2;
  198. new_argve[i] = 0;
  199. lwp->args = args;
  200. return aux;
  201. }
  202. #endif
  203. #define check_off(voff, vlen) \
  204. do \
  205. { \
  206. if (voff > vlen) \
  207. { \
  208. result = -RT_ERROR; \
  209. goto _exit; \
  210. } \
  211. } while (0)
  212. #define check_read(vrlen, vrlen_want) \
  213. do \
  214. { \
  215. if (vrlen < vrlen_want) \
  216. { \
  217. result = -RT_ERROR; \
  218. goto _exit; \
  219. } \
  220. } while (0)
  221. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  222. {
  223. size_t read_block = 0;
  224. while (nmemb)
  225. {
  226. size_t count;
  227. count = read(fd, ptr, size * nmemb) / size;
  228. if (count < nmemb)
  229. {
  230. LOG_E("ERROR: file size error!");
  231. break;
  232. }
  233. ptr = (void *)((uint8_t *)ptr + (count * size));
  234. nmemb -= count;
  235. read_block += count;
  236. }
  237. return read_block;
  238. }
  239. typedef struct
  240. {
  241. Elf_Word st_name;
  242. Elf_Addr st_value;
  243. Elf_Word st_size;
  244. unsigned char st_info;
  245. unsigned char st_other;
  246. Elf_Half st_shndx;
  247. } Elf_sym;
  248. #ifdef RT_USING_USERSPACE
  249. void lwp_elf_reloc(rt_mmu_info *m_info, void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  250. #else
  251. void lwp_elf_reloc(void *text_start, void *rel_dyn_start, size_t rel_dyn_size, void *got_start, size_t got_size, Elf_sym *dynsym);
  252. #endif
  253. #ifdef RT_USING_USERSPACE
  254. struct map_range
  255. {
  256. void *start;
  257. size_t size;
  258. };
  259. static void expand_map_range(struct map_range *m, void *start, size_t size)
  260. {
  261. if (!m->start)
  262. {
  263. m->start = start;
  264. m->size = size;
  265. }
  266. else
  267. {
  268. void *end = (void *)((char*)start + size);
  269. void *mend = (void *)((char*)m->start + m->size);
  270. if (m->start > start)
  271. {
  272. m->start = start;
  273. }
  274. if (mend < end)
  275. {
  276. mend = end;
  277. }
  278. m->size = (char *)mend - (char *)m->start;
  279. }
  280. }
  281. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  282. {
  283. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  284. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  285. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  286. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  287. if (m1->size)
  288. {
  289. if (m1_start < (void *)USER_LOAD_VADDR)
  290. {
  291. return -1;
  292. }
  293. if (m1_start > (void *)USER_STACK_VSTART)
  294. {
  295. return -1;
  296. }
  297. if (m1_end < (void *)USER_LOAD_VADDR)
  298. {
  299. return -1;
  300. }
  301. if (m1_end > (void *)USER_STACK_VSTART)
  302. {
  303. return -1;
  304. }
  305. }
  306. if (m2->size)
  307. {
  308. if (m2_start < (void *)USER_LOAD_VADDR)
  309. {
  310. return -1;
  311. }
  312. if (m2_start > (void *)USER_STACK_VSTART)
  313. {
  314. return -1;
  315. }
  316. if (m2_end < (void *)USER_LOAD_VADDR)
  317. {
  318. return -1;
  319. }
  320. if (m2_end > (void *)USER_STACK_VSTART)
  321. {
  322. return -1;
  323. }
  324. }
  325. if ((m1->size != 0) && (m2->size != 0))
  326. {
  327. if (m1_start < m2_start)
  328. {
  329. if (m1_end > m2_start)
  330. {
  331. return -1;
  332. }
  333. }
  334. else /* m2_start <= m1_start */
  335. {
  336. if (m2_end > m1_start)
  337. {
  338. return -1;
  339. }
  340. }
  341. }
  342. return 0;
  343. }
  344. #endif
  345. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  346. {
  347. uint32_t i;
  348. uint32_t off = 0;
  349. size_t load_off = 0;
  350. char *p_section_str = 0;
  351. Elf_sym *dynsym = 0;
  352. Elf_Ehdr eheader;
  353. Elf_Phdr pheader;
  354. Elf_Shdr sheader;
  355. int result = RT_EOK;
  356. uint32_t magic;
  357. size_t read_len;
  358. void *got_start = 0;
  359. size_t got_size = 0;
  360. void *rel_dyn_start = 0;
  361. size_t rel_dyn_size = 0;
  362. size_t dynsym_off = 0;
  363. size_t dynsym_size = 0;
  364. #ifdef RT_USING_USERSPACE
  365. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  366. void *pa, *va;
  367. void *va_self;
  368. #endif
  369. #ifdef RT_USING_USERSPACE
  370. rt_mmu_info *m_info = &lwp->mmu_info;
  371. #endif
  372. if (len < sizeof eheader)
  373. {
  374. return -RT_ERROR;
  375. }
  376. lseek(fd, 0, SEEK_SET);
  377. read_len = load_fread(&magic, 1, sizeof magic, fd);
  378. check_read(read_len, sizeof magic);
  379. if (memcmp(elf_magic, &magic, 4) != 0)
  380. {
  381. return -RT_ERROR;
  382. }
  383. lseek(fd, off, SEEK_SET);
  384. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  385. check_read(read_len, sizeof eheader);
  386. #ifndef ARCH_CPU_64BIT
  387. if (eheader.e_ident[4] != 1)
  388. { /* not 32bit */
  389. return -RT_ERROR;
  390. }
  391. #else
  392. if (eheader.e_ident[4] != 2)
  393. { /* not 64bit */
  394. return -RT_ERROR;
  395. }
  396. #endif
  397. if (eheader.e_ident[6] != 1)
  398. { /* ver not 1 */
  399. return -RT_ERROR;
  400. }
  401. if ((eheader.e_type != ET_DYN)
  402. #ifdef RT_USING_USERSPACE
  403. && (eheader.e_type != ET_EXEC)
  404. #endif
  405. )
  406. {
  407. /* not pie or exec elf */
  408. return -RT_ERROR;
  409. }
  410. #ifdef RT_USING_USERSPACE
  411. {
  412. off = eheader.e_phoff;
  413. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  414. {
  415. check_off(off, len);
  416. lseek(fd, off, SEEK_SET);
  417. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  418. check_read(read_len, sizeof pheader);
  419. if (pheader.p_type == PT_DYNAMIC)
  420. {
  421. /* load ld.so */
  422. return 1; /* 1 means dynamic */
  423. }
  424. }
  425. }
  426. #endif
  427. if (eheader.e_entry != 0)
  428. {
  429. if ((eheader.e_entry != USER_LOAD_VADDR)
  430. && (eheader.e_entry != LDSO_LOAD_VADDR))
  431. {
  432. /* the entry is invalidate */
  433. return -RT_ERROR;
  434. }
  435. }
  436. { /* load aux */
  437. uint8_t *process_header;
  438. size_t process_header_size;
  439. off = eheader.e_phoff;
  440. process_header_size = eheader.e_phnum * sizeof pheader;
  441. #ifdef RT_USING_USERSPACE
  442. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  443. {
  444. return -RT_ERROR;
  445. }
  446. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  447. if (!va)
  448. {
  449. return -RT_ERROR;
  450. }
  451. pa = rt_hw_mmu_v2p(m_info, va);
  452. process_header = (uint8_t *)pa - PV_OFFSET;
  453. #else
  454. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  455. if (!process_header)
  456. {
  457. return -RT_ERROR;
  458. }
  459. #endif
  460. check_off(off, len);
  461. lseek(fd, off, SEEK_SET);
  462. read_len = load_fread(process_header, 1, process_header_size, fd);
  463. check_read(read_len, process_header_size);
  464. #ifdef RT_USING_USERSPACE
  465. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  466. #endif
  467. aux->item[1].key = AT_PAGESZ;
  468. #ifdef RT_USING_USERSPACE
  469. aux->item[1].value = ARCH_PAGE_SIZE;
  470. #else
  471. aux->item[1].value = RT_MM_PAGE_SIZE;
  472. #endif
  473. aux->item[2].key = AT_RANDOM;
  474. {
  475. uint32_t random_value = rt_tick_get();
  476. uint8_t *random;
  477. #ifdef RT_USING_USERSPACE
  478. uint8_t *krandom;
  479. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  480. krandom = (uint8_t *)rt_hw_mmu_v2p(m_info, random);
  481. krandom = (uint8_t *)krandom - PV_OFFSET;
  482. rt_memcpy(krandom, &random_value, sizeof random_value);
  483. #else
  484. random = (uint8_t *)(process_header + process_header_size);
  485. rt_memcpy(random, &random_value, sizeof random_value);
  486. #endif
  487. aux->item[2].value = (size_t)random;
  488. }
  489. aux->item[3].key = AT_PHDR;
  490. #ifdef RT_USING_USERSPACE
  491. aux->item[3].value = (size_t)va;
  492. #else
  493. aux->item[3].value = (size_t)process_header;
  494. #endif
  495. aux->item[4].key = AT_PHNUM;
  496. aux->item[4].value = eheader.e_phnum;
  497. aux->item[5].key = AT_PHENT;
  498. aux->item[5].value = sizeof pheader;
  499. #ifdef RT_USING_USERSPACE
  500. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  501. #endif
  502. }
  503. if (load_addr)
  504. {
  505. load_off = (size_t)load_addr;
  506. }
  507. #ifdef RT_USING_USERSPACE
  508. else
  509. {
  510. /* map user */
  511. off = eheader.e_shoff;
  512. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  513. {
  514. check_off(off, len);
  515. lseek(fd, off, SEEK_SET);
  516. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  517. check_read(read_len, sizeof sheader);
  518. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  519. {
  520. continue;
  521. }
  522. switch (sheader.sh_type)
  523. {
  524. case SHT_PROGBITS:
  525. if ((sheader.sh_flags & SHF_WRITE) == 0)
  526. {
  527. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  528. }
  529. else
  530. {
  531. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  532. }
  533. break;
  534. case SHT_NOBITS:
  535. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  536. break;
  537. default:
  538. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  539. break;
  540. }
  541. }
  542. if (user_area[0].size == 0)
  543. {
  544. /* no code */
  545. result = -RT_ERROR;
  546. goto _exit;
  547. }
  548. if (user_area[0].start == NULL)
  549. {
  550. /* DYN */
  551. load_off = USER_LOAD_VADDR;
  552. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  553. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  554. }
  555. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  556. {
  557. result = -RT_ERROR;
  558. goto _exit;
  559. }
  560. /* text and data */
  561. for (i = 0; i < 2; i++)
  562. {
  563. if (user_area[i].size != 0)
  564. {
  565. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (int)(i == 0));
  566. if (!va || (va != user_area[i].start))
  567. {
  568. result = -RT_ERROR;
  569. goto _exit;
  570. }
  571. }
  572. }
  573. lwp->text_size = user_area[0].size;
  574. }
  575. #else
  576. else
  577. {
  578. size_t start = -1UL;
  579. size_t end = 0UL;
  580. size_t total_size;
  581. off = eheader.e_shoff;
  582. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  583. {
  584. check_off(off, len);
  585. lseek(fd, off, SEEK_SET);
  586. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  587. check_read(read_len, sizeof sheader);
  588. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  589. {
  590. continue;
  591. }
  592. switch (sheader.sh_type)
  593. {
  594. case SHT_PROGBITS:
  595. case SHT_NOBITS:
  596. if (start > sheader.sh_addr)
  597. {
  598. start = sheader.sh_addr;
  599. }
  600. if (sheader.sh_addr + sheader.sh_size > end)
  601. {
  602. end = sheader.sh_addr + sheader.sh_size;
  603. }
  604. break;
  605. default:
  606. break;
  607. }
  608. }
  609. total_size = end - start;
  610. #ifdef RT_USING_CACHE
  611. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  612. #else
  613. load_off = (size_t)rt_malloc(total_size);
  614. #endif
  615. if (load_off == 0)
  616. {
  617. LOG_E("alloc text memory faild!");
  618. result = -RT_ENOMEM;
  619. goto _exit;
  620. }
  621. else
  622. {
  623. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  624. }
  625. lwp->load_off = load_off; /* for free */
  626. lwp->text_size = total_size;
  627. }
  628. #endif
  629. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  630. off = eheader.e_phoff;
  631. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  632. {
  633. check_off(off, len);
  634. lseek(fd, off, SEEK_SET);
  635. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  636. check_read(read_len, sizeof pheader);
  637. if (pheader.p_type == PT_LOAD)
  638. {
  639. if (pheader.p_filesz > pheader.p_memsz)
  640. {
  641. return -RT_ERROR;
  642. }
  643. check_off(pheader.p_offset, len);
  644. lseek(fd, pheader.p_offset, SEEK_SET);
  645. #ifdef RT_USING_USERSPACE
  646. {
  647. uint32_t size = pheader.p_filesz;
  648. size_t tmp_len = 0;
  649. va = (void *)(pheader.p_vaddr + load_addr);
  650. read_len = 0;
  651. while (size)
  652. {
  653. pa = rt_hw_mmu_v2p(m_info, va);
  654. va_self = (void *)((char *)pa - PV_OFFSET);
  655. LOG_D("va_self = %p pa = %p", va_self, pa);
  656. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  657. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  658. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  659. read_len += tmp_len;
  660. size -= tmp_len;
  661. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  662. }
  663. }
  664. #else
  665. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  666. #endif
  667. check_read(read_len, pheader.p_filesz);
  668. if (pheader.p_filesz < pheader.p_memsz)
  669. {
  670. #ifdef RT_USING_USERSPACE
  671. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  672. uint32_t size_s;
  673. uint32_t off;
  674. off = pheader.p_filesz & ARCH_PAGE_MASK;
  675. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  676. while (size)
  677. {
  678. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  679. pa = rt_hw_mmu_v2p(m_info, va);
  680. va_self = (void *)((char *)pa - PV_OFFSET);
  681. memset((void *)((char *)va_self + off), 0, size_s);
  682. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  683. off = 0;
  684. size -= size_s;
  685. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  686. }
  687. #else
  688. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  689. #endif
  690. }
  691. }
  692. }
  693. /* relocate */
  694. if (eheader.e_type == ET_DYN)
  695. {
  696. /* section info */
  697. off = eheader.e_shoff;
  698. /* find section string table */
  699. check_off(off, len);
  700. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  701. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  702. check_read(read_len, sizeof sheader);
  703. p_section_str = (char *)rt_malloc(sheader.sh_size);
  704. if (!p_section_str)
  705. {
  706. LOG_E("out of memory!");
  707. result = -ENOMEM;
  708. goto _exit;
  709. }
  710. check_off(sheader.sh_offset, len);
  711. lseek(fd, sheader.sh_offset, SEEK_SET);
  712. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  713. check_read(read_len, sheader.sh_size);
  714. check_off(off, len);
  715. lseek(fd, off, SEEK_SET);
  716. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  717. {
  718. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  719. check_read(read_len, sizeof sheader);
  720. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  721. {
  722. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  723. got_size = (size_t)sheader.sh_size;
  724. }
  725. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  726. {
  727. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  728. rel_dyn_size = (size_t)sheader.sh_size;
  729. }
  730. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  731. {
  732. dynsym_off = (size_t)sheader.sh_offset;
  733. dynsym_size = (size_t)sheader.sh_size;
  734. }
  735. }
  736. /* reloc */
  737. if (dynsym_size)
  738. {
  739. dynsym = rt_malloc(dynsym_size);
  740. if (!dynsym)
  741. {
  742. LOG_E("ERROR: Malloc error!");
  743. result = -ENOMEM;
  744. goto _exit;
  745. }
  746. check_off(dynsym_off, len);
  747. lseek(fd, dynsym_off, SEEK_SET);
  748. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  749. check_read(read_len, dynsym_size);
  750. }
  751. #ifdef RT_USING_USERSPACE
  752. lwp_elf_reloc(m_info, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  753. #else
  754. lwp_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  755. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  756. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  757. #endif
  758. }
  759. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  760. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  761. _exit:
  762. if (dynsym)
  763. {
  764. rt_free(dynsym);
  765. }
  766. if (p_section_str)
  767. {
  768. rt_free(p_section_str);
  769. }
  770. if (result != RT_EOK)
  771. {
  772. LOG_E("lwp load faild, %d", result);
  773. }
  774. return result;
  775. }
  776. int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux);
  777. RT_WEAK int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  778. {
  779. uint8_t *ptr;
  780. int ret = -1;
  781. int len;
  782. int fd = -1;
  783. /* check file name */
  784. RT_ASSERT(filename != RT_NULL);
  785. /* check lwp control block */
  786. RT_ASSERT(lwp != RT_NULL);
  787. /* copy file name to process name */
  788. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  789. if (load_addr != RT_NULL)
  790. {
  791. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  792. ptr = load_addr;
  793. }
  794. else
  795. {
  796. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  797. ptr = RT_NULL;
  798. }
  799. fd = open(filename, O_BINARY | O_RDONLY, 0);
  800. if (fd < 0)
  801. {
  802. LOG_E("ERROR: Can't open elf file %s!", filename);
  803. goto out;
  804. }
  805. len = lseek(fd, 0, SEEK_END);
  806. if (len < 0)
  807. {
  808. LOG_E("ERROR: File %s size error!", filename);
  809. goto out;
  810. }
  811. lseek(fd, 0, SEEK_SET);
  812. ret = load_elf(fd, len, lwp, ptr, aux);
  813. if ((ret != RT_EOK) && (ret != 1))
  814. {
  815. LOG_E("lwp load ret = %d", ret);
  816. }
  817. out:
  818. if (fd > 0)
  819. {
  820. close(fd);
  821. }
  822. return ret;
  823. }
  824. void lwp_cleanup(struct rt_thread *tid)
  825. {
  826. rt_base_t level;
  827. struct rt_lwp *lwp;
  828. if (tid == NULL)
  829. {
  830. return;
  831. }
  832. LOG_I("cleanup thread: %s, stack_addr: %08X", tid->name, tid->stack_addr);
  833. #ifndef RT_USING_USERSPACE
  834. if (tid->user_stack != RT_NULL)
  835. {
  836. rt_free(tid->user_stack);
  837. }
  838. #endif
  839. level = rt_hw_interrupt_disable();
  840. lwp = (struct rt_lwp *)tid->lwp;
  841. lwp_tid_put(tid->tid);
  842. rt_list_remove(&tid->sibling);
  843. lwp_ref_dec(lwp);
  844. rt_hw_interrupt_enable(level);
  845. return;
  846. }
  847. static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
  848. {
  849. struct dfs_fd *d;
  850. struct dfs_fdtable *lwp_fdt;
  851. lwp_fdt = &lwp->fdt;
  852. /* init 4 fds */
  853. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  854. if (lwp_fdt->fds)
  855. {
  856. lwp_fdt->maxfd = 4;
  857. d = fd_get(0);
  858. fd_associate(lwp_fdt, 0, d);
  859. d = fd_get(1);
  860. fd_associate(lwp_fdt, 1, d);
  861. d = fd_get(2);
  862. fd_associate(lwp_fdt, 2, d);
  863. }
  864. return;
  865. }
  866. static void lwp_thread_entry(void *parameter)
  867. {
  868. rt_thread_t tid;
  869. struct rt_lwp *lwp;
  870. tid = rt_thread_self();
  871. lwp = (struct rt_lwp *)tid->lwp;
  872. tid->cleanup = lwp_cleanup;
  873. tid->user_stack = RT_NULL;
  874. #ifdef RT_USING_GDBSERVER
  875. if (lwp->debug)
  876. {
  877. lwp->bak_first_ins = *(uint32_t *)lwp->text_entry;
  878. *(uint32_t *)lwp->text_entry = INS_BREAK_CONNECT;
  879. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  880. icache_invalid_all();
  881. }
  882. #endif
  883. lwp_user_entry(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, tid->stack_addr + tid->stack_size);
  884. }
  885. struct rt_lwp *lwp_self(void)
  886. {
  887. rt_thread_t tid;
  888. tid = rt_thread_self();
  889. if (tid)
  890. {
  891. return (struct rt_lwp *)tid->lwp;
  892. }
  893. return RT_NULL;
  894. }
  895. #ifdef RT_USING_GDBSERVER
  896. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  897. #else
  898. pid_t lwp_execve(char *filename, int argc, char **argv, char **envp)
  899. #endif
  900. {
  901. int result;
  902. rt_base_t level;
  903. struct rt_lwp *lwp;
  904. char *thread_name;
  905. char *argv_last = argv[argc - 1];
  906. int bg = 0;
  907. struct process_aux *aux;
  908. int tid = 0;
  909. if (filename == RT_NULL)
  910. {
  911. return -RT_ERROR;
  912. }
  913. lwp = lwp_new();
  914. if (lwp == RT_NULL)
  915. {
  916. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  917. return -RT_ENOMEM;
  918. }
  919. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  920. if ((tid = lwp_tid_get()) == 0)
  921. {
  922. lwp_ref_dec(lwp);
  923. return -ENOMEM;
  924. }
  925. #ifdef RT_USING_USERSPACE
  926. if (lwp_user_space_init(lwp) != 0)
  927. {
  928. lwp_tid_put(tid);
  929. lwp_ref_dec(lwp);
  930. return -ENOMEM;
  931. }
  932. #endif
  933. if (argv_last[0] == '&' && argv_last[1] == '\0')
  934. {
  935. argc--;
  936. bg = 1;
  937. }
  938. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  939. {
  940. lwp_tid_put(tid);
  941. lwp_ref_dec(lwp);
  942. return -ENOMEM;
  943. }
  944. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  945. if (result == 1)
  946. {
  947. /* dynmaic */
  948. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  949. result = load_ldso(lwp, filename, argv, envp);
  950. }
  951. if (result == RT_EOK)
  952. {
  953. rt_thread_t thread = RT_NULL;
  954. lwp_copy_stdio_fdt(lwp);
  955. /* obtain the base name */
  956. thread_name = strrchr(filename, '/');
  957. thread_name = thread_name ? thread_name + 1 : filename;
  958. thread = rt_thread_create(thread_name, lwp_thread_entry, RT_NULL,
  959. LWP_TASK_STACK_SIZE, 25, 200);
  960. if (thread != RT_NULL)
  961. {
  962. struct rt_lwp *self_lwp;
  963. thread->tid = tid;
  964. lwp_tid_set_thread(tid, thread);
  965. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_uint32_t)thread->stack_addr,
  966. (rt_uint32_t)thread->stack_addr + thread->stack_size);
  967. level = rt_hw_interrupt_disable();
  968. self_lwp = lwp_self();
  969. if (self_lwp)
  970. {
  971. /* lwp add to children link */
  972. lwp->sibling = self_lwp->first_child;
  973. self_lwp->first_child = lwp;
  974. lwp->parent = self_lwp;
  975. }
  976. thread->lwp = lwp;
  977. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  978. #ifdef RT_USING_GDBSERVER
  979. if (debug)
  980. {
  981. lwp->debug = debug;
  982. }
  983. #endif
  984. if ((rt_console_get_foreground() == self_lwp) && !bg)
  985. {
  986. rt_console_set_foreground(lwp);
  987. }
  988. rt_hw_interrupt_enable(level);
  989. rt_thread_startup(thread);
  990. return lwp_to_pid(lwp);
  991. }
  992. }
  993. lwp_tid_put(tid);
  994. lwp_ref_dec(lwp);
  995. return -RT_ERROR;
  996. }
  997. #ifdef RT_USING_GDBSERVER
  998. pid_t exec(char *filename, int debug, int argc, char **argv)
  999. {
  1000. return lwp_execve(filename, debug, argc, argv, 0);
  1001. }
  1002. #else
  1003. pid_t exec(char *filename, int argc, char **argv)
  1004. {
  1005. return lwp_execve(filename, argc, argv, 0);
  1006. }
  1007. #endif
  1008. void lwp_user_setting_save(rt_thread_t thread)
  1009. {
  1010. if (thread)
  1011. {
  1012. thread->thread_idr = rt_cpu_get_thread_idr();
  1013. }
  1014. }
  1015. void lwp_user_setting_restore(rt_thread_t thread)
  1016. {
  1017. if (!thread)
  1018. {
  1019. return;
  1020. }
  1021. rt_cpu_set_thread_idr(thread->thread_idr);
  1022. #ifdef RT_USING_GDBSERVER
  1023. {
  1024. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1025. set_process_id((uint32_t)(size_t)l);
  1026. if (l && l->debug)
  1027. {
  1028. uint32_t step_type = 0;
  1029. step_type = gdb_get_step_type();
  1030. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1031. {
  1032. arch_activate_step();
  1033. }
  1034. else
  1035. {
  1036. arch_deactivate_step();
  1037. }
  1038. }
  1039. }
  1040. #endif
  1041. }