1
0

lwp.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-12 Bernard first version
  9. * 2018-11-02 heyuanjie fix complie error in iar
  10. * 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
  11. * 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
  12. * 2023-02-20 wangxiaoyao inv icache before new app startup
  13. * 2023-02-20 wangxiaoyao fix bug on foreground app switch
  14. * 2023-10-16 Shell Support a new backtrace framework
  15. * 2023-11-17 xqyjlj add process group and session support
  16. * 2023-11-30 Shell add lwp_startup()
  17. */
  18. #define DBG_TAG "lwp"
  19. #define DBG_LVL DBG_WARNING
  20. #include <rtdbg.h>
  21. #include <rthw.h>
  22. #include <rtthread.h>
  23. #include <dfs_file.h>
  24. #include <unistd.h>
  25. #include <stdio.h> /* rename() */
  26. #include <fcntl.h>
  27. #include <sys/stat.h>
  28. #include <sys/statfs.h> /* statfs() */
  29. #include <lwp_elf.h>
  30. #ifndef RT_USING_DFS
  31. #error "lwp need file system(RT_USING_DFS)"
  32. #endif
  33. #include "lwp_internal.h"
  34. #include "lwp_arch.h"
  35. #include "lwp_arch_comm.h"
  36. #include "lwp_signal.h"
  37. #include "lwp_dbg.h"
  38. #include <terminal/terminal.h>
  39. #ifdef ARCH_MM_MMU
  40. #include <lwp_user_mm.h>
  41. #endif /* end of ARCH_MM_MMU */
  42. #ifndef O_DIRECTORY
  43. #define O_DIRECTORY 0x200000
  44. #endif
  45. #ifndef O_BINARY
  46. #define O_BINARY 0x10000
  47. #endif
  48. static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
  49. #ifdef DFS_USING_WORKDIR
  50. extern char working_directory[];
  51. #endif
  52. /**
  53. * @brief The default console is only a backup device with lowest priority.
  54. * It's always recommended to scratch the console from the boot arguments.
  55. * And dont forget to register the device with a higher priority.
  56. */
  57. static rt_err_t lwp_default_console_setup(void)
  58. {
  59. rt_device_t bakdev = rt_device_find("ttyS0");
  60. rt_err_t rc;
  61. if (bakdev)
  62. {
  63. lwp_console_register_backend(bakdev, LWP_CONSOLE_LOWEST_PRIOR);
  64. rc = RT_EOK;
  65. }
  66. else
  67. {
  68. rc = -RT_EINVAL;
  69. }
  70. return rc;
  71. }
  72. static int lwp_component_init(void)
  73. {
  74. int rc;
  75. if ((rc = lwp_tid_init()) != RT_EOK)
  76. {
  77. LOG_E("%s: lwp_component_init() failed", __func__);
  78. }
  79. else if ((rc = lwp_pid_init()) != RT_EOK)
  80. {
  81. LOG_E("%s: lwp_pid_init() failed", __func__);
  82. }
  83. else if ((rc = rt_channel_component_init()) != RT_EOK)
  84. {
  85. LOG_E("%s: rt_channel_component_init failed", __func__);
  86. }
  87. else if ((rc = lwp_futex_init()) != RT_EOK)
  88. {
  89. LOG_E("%s: lwp_futex_init() failed", __func__);
  90. }
  91. else if ((rc = lwp_default_console_setup()) != RT_EOK)
  92. {
  93. LOG_E("%s: lwp_default_console_setup() failed", __func__);
  94. }
  95. return rc;
  96. }
  97. INIT_COMPONENT_EXPORT(lwp_component_init);
  98. rt_weak int lwp_startup_debug_request(void)
  99. {
  100. return 0;
  101. }
  102. #define LATENCY_TIMES (3)
  103. #define LATENCY_IN_MSEC (128)
  104. #define LWP_CONSOLE_PATH "CONSOLE=/dev/console"
  105. const char *init_search_path[] = {
  106. "/sbin/init",
  107. "/bin/init",
  108. };
  109. /**
  110. * Startup process 0 and do the essential works
  111. * This is the "Hello World" point of RT-Smart
  112. */
  113. static int lwp_startup(void)
  114. {
  115. int error;
  116. const char *init_path;
  117. char *argv[] = {0, "&"};
  118. char *envp[] = {LWP_CONSOLE_PATH, 0};
  119. #ifdef LWP_DEBUG_INIT
  120. int command;
  121. int countdown = LATENCY_TIMES;
  122. while (countdown)
  123. {
  124. command = lwp_startup_debug_request();
  125. if (command)
  126. {
  127. return 0;
  128. }
  129. rt_kprintf("Press any key to stop init process startup ... %d\n", countdown);
  130. countdown -= 1;
  131. rt_thread_mdelay(LATENCY_IN_MSEC);
  132. }
  133. rt_kprintf("Starting init ...\n");
  134. #endif /* LWP_DEBUG_INIT */
  135. for (size_t i = 0; i < sizeof(init_search_path)/sizeof(init_search_path[0]); i++)
  136. {
  137. struct stat s;
  138. init_path = init_search_path[i];
  139. error = stat(init_path, &s);
  140. if (error == 0)
  141. {
  142. argv[0] = (void *)init_path;
  143. error = lwp_execve((void *)init_path, 0, sizeof(argv)/sizeof(argv[0]), argv, envp);
  144. if (error < 0)
  145. {
  146. LOG_E("%s: failed to startup process 0 (init)\n"
  147. "Switching to legacy mode...", __func__);
  148. }
  149. else if (error != 1)
  150. {
  151. LOG_E("%s: pid 1 is already allocated", __func__);
  152. error = -EBUSY;
  153. }
  154. else
  155. {
  156. rt_lwp_t p = lwp_from_pid_locked(1);
  157. p->sig_protected = 1;
  158. error = 0;
  159. }
  160. break;
  161. }
  162. }
  163. if (error)
  164. {
  165. LOG_D("%s: init program not found\n"
  166. "Switching to legacy mode...", __func__);
  167. }
  168. return error;
  169. }
  170. INIT_APP_EXPORT(lwp_startup);
  171. void lwp_setcwd(char *buf)
  172. {
  173. struct rt_lwp *lwp = RT_NULL;
  174. if(strlen(buf) >= DFS_PATH_MAX)
  175. {
  176. rt_kprintf("buf too long!\n");
  177. return ;
  178. }
  179. lwp = (struct rt_lwp *)rt_thread_self()->lwp;
  180. if (lwp)
  181. {
  182. rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX - 1);
  183. }
  184. else
  185. {
  186. rt_strncpy(working_directory, buf, DFS_PATH_MAX - 1);
  187. }
  188. return ;
  189. }
  190. char *lwp_getcwd(void)
  191. {
  192. char *dir_buf = RT_NULL;
  193. struct rt_lwp *lwp = RT_NULL;
  194. rt_thread_t thread = rt_thread_self();
  195. if (thread)
  196. {
  197. lwp = (struct rt_lwp *)thread->lwp;
  198. }
  199. if (lwp)
  200. {
  201. if(lwp->working_directory[0] != '/')
  202. {
  203. dir_buf = &working_directory[0];
  204. }
  205. else
  206. {
  207. dir_buf = &lwp->working_directory[0];
  208. }
  209. }
  210. else
  211. dir_buf = &working_directory[0];
  212. return dir_buf;
  213. }
  214. /**
  215. * RT-Thread light-weight process
  216. */
  217. void lwp_set_kernel_sp(uint32_t *sp)
  218. {
  219. rt_thread_self()->kernel_sp = (rt_uint32_t *)sp;
  220. }
  221. uint32_t *lwp_get_kernel_sp(void)
  222. {
  223. #ifdef ARCH_MM_MMU
  224. return (uint32_t *)rt_thread_self()->sp;
  225. #else
  226. uint32_t* kernel_sp;
  227. extern rt_uint32_t rt_interrupt_from_thread;
  228. extern rt_uint32_t rt_thread_switch_interrupt_flag;
  229. if (rt_thread_switch_interrupt_flag)
  230. {
  231. kernel_sp = (uint32_t *)((rt_thread_t)rt_container_of(rt_interrupt_from_thread, struct rt_thread, sp))->kernel_sp;
  232. }
  233. else
  234. {
  235. kernel_sp = (uint32_t *)rt_thread_self()->kernel_sp;
  236. }
  237. return kernel_sp;
  238. #endif
  239. }
  240. #ifdef ARCH_MM_MMU
  241. struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  242. {
  243. int size = sizeof(size_t) * 5; /* store argc, argv, envp, aux, NULL */
  244. int *args;
  245. char *str;
  246. char *str_k;
  247. char **new_argve;
  248. int i;
  249. int len;
  250. size_t *args_k;
  251. struct process_aux *aux;
  252. size_t prot = PROT_READ | PROT_WRITE;
  253. size_t flags = MAP_FIXED | MAP_PRIVATE;
  254. size_t zero = 0;
  255. for (i = 0; i < argc; i++)
  256. {
  257. size += (rt_strlen(argv[i]) + 1);
  258. }
  259. size += (sizeof(size_t) * argc);
  260. i = 0;
  261. if (envp)
  262. {
  263. while (envp[i] != 0)
  264. {
  265. size += (rt_strlen(envp[i]) + 1);
  266. size += sizeof(size_t);
  267. i++;
  268. }
  269. }
  270. /* for aux */
  271. size += sizeof(struct process_aux);
  272. if (size > ARCH_PAGE_SIZE)
  273. {
  274. return RT_NULL;
  275. }
  276. args = lwp_mmap2(lwp, (void *)(USER_STACK_VEND), size, prot, flags, -1, 0);
  277. if (args == RT_NULL || lwp_data_put(lwp, args, &zero, sizeof(zero)) != sizeof(zero))
  278. {
  279. return RT_NULL;
  280. }
  281. args_k = (size_t *)lwp_v2p(lwp, args);
  282. args_k = (size_t *)((size_t)args_k - PV_OFFSET);
  283. /* argc, argv[], 0, envp[], 0 , aux[] */
  284. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  285. str_k = (char *)((size_t)args_k + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(size_t));
  286. new_argve = (char **)&args_k[1];
  287. args_k[0] = argc;
  288. for (i = 0; i < argc; i++)
  289. {
  290. len = rt_strlen(argv[i]) + 1;
  291. new_argve[i] = str;
  292. lwp_memcpy(str_k, argv[i], len);
  293. str += len;
  294. str_k += len;
  295. }
  296. new_argve[i] = 0;
  297. i++;
  298. new_argve[i] = 0;
  299. if (envp)
  300. {
  301. int j;
  302. for (j = 0; envp[j] != 0; j++)
  303. {
  304. len = rt_strlen(envp[j]) + 1;
  305. new_argve[i] = str;
  306. lwp_memcpy(str_k, envp[j], len);
  307. str += len;
  308. str_k += len;
  309. i++;
  310. }
  311. new_argve[i] = 0;
  312. }
  313. i++;
  314. /* aux */
  315. aux = (struct process_aux *)(new_argve + i);
  316. aux->item[0].key = AT_EXECFN;
  317. aux->item[0].value = (size_t)(size_t)new_argve[0];
  318. i += AUX_ARRAY_ITEMS_NR * 2;
  319. new_argve[i] = 0;
  320. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, args_k, size);
  321. lwp->args = args;
  322. return aux;
  323. }
  324. #else
  325. static struct process_aux *lwp_argscopy(struct rt_lwp *lwp, int argc, char **argv, char **envp)
  326. {
  327. #ifdef ARCH_MM_MMU
  328. int size = sizeof(int) * 5; /* store argc, argv, envp, aux, NULL */
  329. struct process_aux *aux;
  330. #else
  331. int size = sizeof(int) * 4; /* store argc, argv, envp, NULL */
  332. #endif /* ARCH_MM_MMU */
  333. int *args;
  334. char *str;
  335. char **new_argve;
  336. int i;
  337. int len;
  338. for (i = 0; i < argc; i++)
  339. {
  340. size += (rt_strlen(argv[i]) + 1);
  341. }
  342. size += (sizeof(int) * argc);
  343. i = 0;
  344. if (envp)
  345. {
  346. while (envp[i] != 0)
  347. {
  348. size += (rt_strlen(envp[i]) + 1);
  349. size += sizeof(int);
  350. i++;
  351. }
  352. }
  353. #ifdef ARCH_MM_MMU
  354. /* for aux */
  355. size += sizeof(struct process_aux);
  356. args = (int *)rt_malloc(size);
  357. if (args == RT_NULL)
  358. {
  359. return RT_NULL;
  360. }
  361. /* argc, argv[], 0, envp[], 0 */
  362. str = (char *)((size_t)args + (argc + 2 + i + 1 + AUX_ARRAY_ITEMS_NR * 2 + 1) * sizeof(int));
  363. #else
  364. args = (int *)rt_malloc(size);
  365. if (args == RT_NULL)
  366. {
  367. return RT_NULL;
  368. }
  369. str = (char*)((int)args + (argc + 2 + i + 1) * sizeof(int));
  370. #endif /* ARCH_MM_MMU */
  371. new_argve = (char **)&args[1];
  372. args[0] = argc;
  373. for (i = 0; i < argc; i++)
  374. {
  375. len = rt_strlen(argv[i]) + 1;
  376. new_argve[i] = str;
  377. lwp_memcpy(str, argv[i], len);
  378. str += len;
  379. }
  380. new_argve[i] = 0;
  381. i++;
  382. new_argve[i] = 0;
  383. if (envp)
  384. {
  385. int j;
  386. for (j = 0; envp[j] != 0; j++)
  387. {
  388. len = rt_strlen(envp[j]) + 1;
  389. new_argve[i] = str;
  390. lwp_memcpy(str, envp[j], len);
  391. str += len;
  392. i++;
  393. }
  394. new_argve[i] = 0;
  395. }
  396. #ifdef ARCH_MM_MMU
  397. /* aux */
  398. aux = (struct process_aux *)(new_argve + i);
  399. aux->item[0].key = AT_EXECFN;
  400. aux->item[0].value = (uint32_t)(size_t)new_argve[0];
  401. i += AUX_ARRAY_ITEMS_NR * 2;
  402. new_argve[i] = 0;
  403. lwp->args = args;
  404. return aux;
  405. #else
  406. lwp->args = args;
  407. lwp->args_length = size;
  408. return (struct process_aux *)(new_argve + i);
  409. #endif /* ARCH_MM_MMU */
  410. }
  411. #endif
  412. #ifdef ARCH_MM_MMU
  413. #define check_off(voff, vlen) \
  414. do \
  415. { \
  416. if (voff > vlen) \
  417. { \
  418. result = -RT_ERROR; \
  419. goto _exit; \
  420. } \
  421. } while (0)
  422. #define check_read(vrlen, vrlen_want) \
  423. do \
  424. { \
  425. if (vrlen < vrlen_want) \
  426. { \
  427. result = -RT_ERROR; \
  428. goto _exit; \
  429. } \
  430. } while (0)
  431. static size_t load_fread(void *ptr, size_t size, size_t nmemb, int fd)
  432. {
  433. size_t read_block = 0;
  434. while (nmemb)
  435. {
  436. size_t count;
  437. count = read(fd, ptr, size * nmemb) / size;
  438. if (count < nmemb)
  439. {
  440. LOG_E("ERROR: file size error!");
  441. break;
  442. }
  443. ptr = (void *)((uint8_t *)ptr + (count * size));
  444. nmemb -= count;
  445. read_block += count;
  446. }
  447. return read_block;
  448. }
  449. typedef struct
  450. {
  451. Elf_Word st_name;
  452. Elf_Addr st_value;
  453. Elf_Word st_size;
  454. unsigned char st_info;
  455. unsigned char st_other;
  456. Elf_Half st_shndx;
  457. } Elf_sym;
  458. #ifdef ARCH_MM_MMU
  459. struct map_range
  460. {
  461. void *start;
  462. size_t size;
  463. };
  464. static void expand_map_range(struct map_range *m, void *start, size_t size)
  465. {
  466. if (!m->start)
  467. {
  468. m->start = start;
  469. m->size = size;
  470. }
  471. else
  472. {
  473. void *end = (void *)((char*)start + size);
  474. void *mend = (void *)((char*)m->start + m->size);
  475. if (m->start > start)
  476. {
  477. m->start = start;
  478. }
  479. if (mend < end)
  480. {
  481. mend = end;
  482. }
  483. m->size = (char *)mend - (char *)m->start;
  484. }
  485. }
  486. static int map_range_ckeck(struct map_range *m1, struct map_range *m2)
  487. {
  488. void *m1_start = (void *)((size_t)m1->start & ~ARCH_PAGE_MASK);
  489. void *m1_end = (void *)((((size_t)m1->start + m1->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  490. void *m2_start = (void *)((size_t)m2->start & ~ARCH_PAGE_MASK);
  491. void *m2_end = (void *)((((size_t)m2->start + m2->size) + ARCH_PAGE_MASK) & ~ARCH_PAGE_MASK);
  492. if (m1->size)
  493. {
  494. if (m1_start < (void *)USER_LOAD_VADDR)
  495. {
  496. return -1;
  497. }
  498. if (m1_start > (void *)USER_STACK_VSTART)
  499. {
  500. return -1;
  501. }
  502. if (m1_end < (void *)USER_LOAD_VADDR)
  503. {
  504. return -1;
  505. }
  506. if (m1_end > (void *)USER_STACK_VSTART)
  507. {
  508. return -1;
  509. }
  510. }
  511. if (m2->size)
  512. {
  513. if (m2_start < (void *)USER_LOAD_VADDR)
  514. {
  515. return -1;
  516. }
  517. if (m2_start > (void *)USER_STACK_VSTART)
  518. {
  519. return -1;
  520. }
  521. if (m2_end < (void *)USER_LOAD_VADDR)
  522. {
  523. return -1;
  524. }
  525. if (m2_end > (void *)USER_STACK_VSTART)
  526. {
  527. return -1;
  528. }
  529. }
  530. if ((m1->size != 0) && (m2->size != 0))
  531. {
  532. if (m1_start < m2_start)
  533. {
  534. if (m1_end > m2_start)
  535. {
  536. return -1;
  537. }
  538. }
  539. else /* m2_start <= m1_start */
  540. {
  541. if (m2_end > m1_start)
  542. {
  543. return -1;
  544. }
  545. }
  546. }
  547. return 0;
  548. }
  549. #endif
  550. static int load_elf(int fd, int len, struct rt_lwp *lwp, uint8_t *load_addr, struct process_aux *aux)
  551. {
  552. uint32_t i;
  553. uint32_t off = 0;
  554. size_t load_off = 0;
  555. char *p_section_str = 0;
  556. Elf_sym *dynsym = 0;
  557. Elf_Ehdr eheader;
  558. Elf_Phdr pheader;
  559. Elf_Shdr sheader;
  560. int result = RT_EOK;
  561. uint32_t magic;
  562. size_t read_len;
  563. void *got_start = 0;
  564. size_t got_size = 0;
  565. void *rel_dyn_start = 0;
  566. size_t rel_dyn_size = 0;
  567. size_t dynsym_off = 0;
  568. size_t dynsym_size = 0;
  569. #ifdef ARCH_MM_MMU
  570. struct map_range user_area[2] = {{NULL, 0}, {NULL, 0}}; /* 0 is text, 1 is data */
  571. void *pa, *va;
  572. void *va_self;
  573. #endif
  574. if (len < sizeof eheader)
  575. {
  576. LOG_E("len < sizeof eheader!");
  577. return -RT_ERROR;
  578. }
  579. lseek(fd, 0, SEEK_SET);
  580. read_len = load_fread(&magic, 1, sizeof magic, fd);
  581. check_read(read_len, sizeof magic);
  582. if (memcmp(elf_magic, &magic, 4) != 0)
  583. {
  584. LOG_E("elf_magic not same, magic:0x%x!", magic);
  585. return -RT_ERROR;
  586. }
  587. lseek(fd, off, SEEK_SET);
  588. read_len = load_fread(&eheader, 1, sizeof eheader, fd);
  589. check_read(read_len, sizeof eheader);
  590. #ifndef ARCH_CPU_64BIT
  591. if (eheader.e_ident[4] != 1)
  592. { /* not 32bit */
  593. LOG_E("elf not 32bit, %d!", eheader.e_ident[4]);
  594. return -RT_ERROR;
  595. }
  596. #else
  597. if (eheader.e_ident[4] != 2)
  598. { /* not 64bit */
  599. LOG_E("elf not 64bit, %d!", eheader.e_ident[4]);
  600. return -RT_ERROR;
  601. }
  602. #endif
  603. if (eheader.e_ident[6] != 1)
  604. { /* ver not 1 */
  605. LOG_E("elf Version not 1,ver:%d!", eheader.e_ident[6]);
  606. return -RT_ERROR;
  607. }
  608. if ((eheader.e_type != ET_DYN)
  609. #ifdef ARCH_MM_MMU
  610. && (eheader.e_type != ET_EXEC)
  611. #endif
  612. )
  613. {
  614. /* not pie or exec elf */
  615. LOG_E("elf type not pie or exec, type:%d!", eheader.e_type);
  616. return -RT_ERROR;
  617. }
  618. #ifdef ARCH_MM_MMU
  619. {
  620. off = eheader.e_phoff;
  621. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  622. {
  623. check_off(off, len);
  624. lseek(fd, off, SEEK_SET);
  625. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  626. check_read(read_len, sizeof pheader);
  627. if (pheader.p_type == PT_DYNAMIC)
  628. {
  629. /* load ld.so */
  630. return 1; /* 1 means dynamic */
  631. }
  632. }
  633. }
  634. #endif
  635. if (eheader.e_entry != 0)
  636. {
  637. if ((eheader.e_entry != USER_LOAD_VADDR)
  638. && (eheader.e_entry != LDSO_LOAD_VADDR))
  639. {
  640. /* the entry is invalidate */
  641. LOG_E("elf entry is invalidate, entry:0x%x!", eheader.e_entry);
  642. return -RT_ERROR;
  643. }
  644. }
  645. { /* load aux */
  646. uint8_t *process_header;
  647. size_t process_header_size;
  648. off = eheader.e_phoff;
  649. process_header_size = eheader.e_phnum * sizeof pheader;
  650. #ifdef ARCH_MM_MMU
  651. if (process_header_size > ARCH_PAGE_SIZE - sizeof(char[16]))
  652. {
  653. LOG_E("process_header_size too big, size:0x%x!", process_header_size);
  654. return -RT_ERROR;
  655. }
  656. va = (uint8_t *)lwp_map_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE * 2), process_header_size, 0);
  657. if (!va)
  658. {
  659. LOG_E("lwp map user failed!");
  660. return -RT_ERROR;
  661. }
  662. pa = lwp_v2p(lwp, va);
  663. process_header = (uint8_t *)pa - PV_OFFSET;
  664. #else
  665. process_header = (uint8_t *)rt_malloc(process_header_size + sizeof(char[16]));
  666. if (!process_header)
  667. {
  668. LOG_E("process_header malloc failed, size:0x%x!", process_header_size + sizeof(char[16]));
  669. return -RT_ERROR;
  670. }
  671. #endif
  672. check_off(off, len);
  673. lseek(fd, off, SEEK_SET);
  674. read_len = load_fread(process_header, 1, process_header_size, fd);
  675. check_read(read_len, process_header_size);
  676. #ifdef ARCH_MM_MMU
  677. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, process_header, process_header_size);
  678. #endif
  679. aux->item[1].key = AT_PAGESZ;
  680. #ifdef ARCH_MM_MMU
  681. aux->item[1].value = ARCH_PAGE_SIZE;
  682. #else
  683. aux->item[1].value = RT_MM_PAGE_SIZE;
  684. #endif
  685. aux->item[2].key = AT_RANDOM;
  686. {
  687. uint32_t random_value = rt_tick_get();
  688. uint8_t *random;
  689. #ifdef ARCH_MM_MMU
  690. uint8_t *krandom;
  691. random = (uint8_t *)(USER_VADDR_TOP - ARCH_PAGE_SIZE - sizeof(char[16]));
  692. krandom = (uint8_t *)lwp_v2p(lwp, random);
  693. krandom = (uint8_t *)krandom - PV_OFFSET;
  694. rt_memcpy(krandom, &random_value, sizeof random_value);
  695. #else
  696. random = (uint8_t *)(process_header + process_header_size);
  697. rt_memcpy(random, &random_value, sizeof random_value);
  698. #endif
  699. aux->item[2].value = (size_t)random;
  700. }
  701. aux->item[3].key = AT_PHDR;
  702. #ifdef ARCH_MM_MMU
  703. aux->item[3].value = (size_t)va;
  704. #else
  705. aux->item[3].value = (size_t)process_header;
  706. #endif
  707. aux->item[4].key = AT_PHNUM;
  708. aux->item[4].value = eheader.e_phnum;
  709. aux->item[5].key = AT_PHENT;
  710. aux->item[5].value = sizeof pheader;
  711. #ifdef ARCH_MM_MMU
  712. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, aux, sizeof *aux);
  713. #endif
  714. }
  715. if (load_addr)
  716. {
  717. load_off = (size_t)load_addr;
  718. }
  719. #ifdef ARCH_MM_MMU
  720. else
  721. {
  722. /* map user */
  723. off = eheader.e_shoff;
  724. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  725. {
  726. check_off(off, len);
  727. lseek(fd, off, SEEK_SET);
  728. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  729. check_read(read_len, sizeof sheader);
  730. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  731. {
  732. continue;
  733. }
  734. switch (sheader.sh_type)
  735. {
  736. case SHT_PROGBITS:
  737. if ((sheader.sh_flags & SHF_WRITE) == 0)
  738. {
  739. expand_map_range(&user_area[0], (void *)sheader.sh_addr, sheader.sh_size);
  740. }
  741. else
  742. {
  743. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  744. }
  745. break;
  746. case SHT_NOBITS:
  747. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  748. break;
  749. default:
  750. expand_map_range(&user_area[1], (void *)sheader.sh_addr, sheader.sh_size);
  751. break;
  752. }
  753. }
  754. if (user_area[0].size == 0)
  755. {
  756. /* no code */
  757. result = -RT_ERROR;
  758. goto _exit;
  759. }
  760. if (user_area[0].start == NULL)
  761. {
  762. /* DYN */
  763. load_off = USER_LOAD_VADDR;
  764. user_area[0].start = (void *)((char*)user_area[0].start + load_off);
  765. user_area[1].start = (void *)((char*)user_area[1].start + load_off);
  766. }
  767. if (map_range_ckeck(&user_area[0], &user_area[1]) != 0)
  768. {
  769. result = -RT_ERROR;
  770. goto _exit;
  771. }
  772. /* text and data */
  773. for (i = 0; i < 2; i++)
  774. {
  775. if (user_area[i].size != 0)
  776. {
  777. va = lwp_map_user(lwp, user_area[i].start, user_area[i].size, (i == 0));
  778. if (!va || (va != user_area[i].start))
  779. {
  780. result = -RT_ERROR;
  781. goto _exit;
  782. }
  783. }
  784. }
  785. lwp->text_size = user_area[0].size;
  786. }
  787. #else
  788. else
  789. {
  790. size_t start = -1UL;
  791. size_t end = 0UL;
  792. size_t total_size;
  793. off = eheader.e_shoff;
  794. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  795. {
  796. check_off(off, len);
  797. lseek(fd, off, SEEK_SET);
  798. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  799. check_read(read_len, sizeof sheader);
  800. if ((sheader.sh_flags & SHF_ALLOC) == 0)
  801. {
  802. continue;
  803. }
  804. switch (sheader.sh_type)
  805. {
  806. case SHT_PROGBITS:
  807. case SHT_NOBITS:
  808. if (start > sheader.sh_addr)
  809. {
  810. start = sheader.sh_addr;
  811. }
  812. if (sheader.sh_addr + sheader.sh_size > end)
  813. {
  814. end = sheader.sh_addr + sheader.sh_size;
  815. }
  816. break;
  817. default:
  818. break;
  819. }
  820. }
  821. total_size = end - start;
  822. #ifdef RT_USING_CACHE
  823. load_off = (size_t)rt_malloc_align(total_size, RT_CPU_CACHE_LINE_SZ);
  824. #else
  825. load_off = (size_t)rt_malloc(total_size);
  826. #endif
  827. if (load_off == 0)
  828. {
  829. LOG_E("alloc text memory faild!");
  830. result = -RT_ENOMEM;
  831. goto _exit;
  832. }
  833. else
  834. {
  835. LOG_D("lwp text malloc : %p, size: %d!", (void *)load_off, lwp->text_size);
  836. }
  837. lwp->load_off = load_off; /* for free */
  838. lwp->text_size = total_size;
  839. }
  840. #endif
  841. lwp->text_entry = (void *)(eheader.e_entry + load_off);
  842. off = eheader.e_phoff;
  843. for (i = 0; i < eheader.e_phnum; i++, off += sizeof pheader)
  844. {
  845. check_off(off, len);
  846. lseek(fd, off, SEEK_SET);
  847. read_len = load_fread(&pheader, 1, sizeof pheader, fd);
  848. check_read(read_len, sizeof pheader);
  849. if (pheader.p_type == PT_LOAD)
  850. {
  851. if (pheader.p_filesz > pheader.p_memsz)
  852. {
  853. LOG_E("pheader.p_filesz > pheader.p_memsz, p_filesz:0x%x;p_memsz:0x%x!", pheader.p_filesz, pheader.p_memsz);
  854. return -RT_ERROR;
  855. }
  856. check_off(pheader.p_offset, len);
  857. lseek(fd, pheader.p_offset, SEEK_SET);
  858. #ifdef ARCH_MM_MMU
  859. {
  860. uint32_t size = pheader.p_filesz;
  861. size_t tmp_len = 0;
  862. va = (void *)(pheader.p_vaddr + load_addr);
  863. read_len = 0;
  864. while (size)
  865. {
  866. pa = lwp_v2p(lwp, va);
  867. va_self = (void *)((char *)pa - PV_OFFSET);
  868. LOG_D("va_self = %p pa = %p", va_self, pa);
  869. tmp_len = (size < ARCH_PAGE_SIZE) ? size : ARCH_PAGE_SIZE;
  870. tmp_len = load_fread(va_self, 1, tmp_len, fd);
  871. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, va_self, tmp_len);
  872. read_len += tmp_len;
  873. size -= tmp_len;
  874. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  875. }
  876. }
  877. #else
  878. read_len = load_fread((void*)(pheader.p_vaddr + load_off), 1, pheader.p_filesz, fd);
  879. #endif
  880. check_read(read_len, pheader.p_filesz);
  881. if (pheader.p_filesz < pheader.p_memsz)
  882. {
  883. #ifdef ARCH_MM_MMU
  884. uint32_t size = pheader.p_memsz - pheader.p_filesz;
  885. uint32_t size_s;
  886. uint32_t off;
  887. off = pheader.p_filesz & ARCH_PAGE_MASK;
  888. va = (void *)((pheader.p_vaddr + pheader.p_filesz + load_off) & ~ARCH_PAGE_MASK);
  889. while (size)
  890. {
  891. size_s = (size < ARCH_PAGE_SIZE - off) ? size : ARCH_PAGE_SIZE - off;
  892. pa = lwp_v2p(lwp, va);
  893. va_self = (void *)((char *)pa - PV_OFFSET);
  894. memset((void *)((char *)va_self + off), 0, size_s);
  895. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)((char *)va_self + off), size_s);
  896. off = 0;
  897. size -= size_s;
  898. va = (void *)((char *)va + ARCH_PAGE_SIZE);
  899. }
  900. #else
  901. memset((uint8_t *)pheader.p_vaddr + pheader.p_filesz + load_off, 0, (size_t)(pheader.p_memsz - pheader.p_filesz));
  902. #endif
  903. }
  904. }
  905. }
  906. /* relocate */
  907. if (eheader.e_type == ET_DYN)
  908. {
  909. /* section info */
  910. off = eheader.e_shoff;
  911. /* find section string table */
  912. check_off(off, len);
  913. lseek(fd, off + (sizeof sheader) * eheader.e_shstrndx, SEEK_SET);
  914. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  915. check_read(read_len, sizeof sheader);
  916. p_section_str = (char *)rt_malloc(sheader.sh_size);
  917. if (!p_section_str)
  918. {
  919. LOG_E("out of memory!");
  920. result = -ENOMEM;
  921. goto _exit;
  922. }
  923. check_off(sheader.sh_offset, len);
  924. lseek(fd, sheader.sh_offset, SEEK_SET);
  925. read_len = load_fread(p_section_str, 1, sheader.sh_size, fd);
  926. check_read(read_len, sheader.sh_size);
  927. check_off(off, len);
  928. lseek(fd, off, SEEK_SET);
  929. for (i = 0; i < eheader.e_shnum; i++, off += sizeof sheader)
  930. {
  931. read_len = load_fread(&sheader, 1, sizeof sheader, fd);
  932. check_read(read_len, sizeof sheader);
  933. if (strcmp(p_section_str + sheader.sh_name, ".got") == 0)
  934. {
  935. got_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  936. got_size = (size_t)sheader.sh_size;
  937. }
  938. else if (strcmp(p_section_str + sheader.sh_name, ".rel.dyn") == 0)
  939. {
  940. rel_dyn_start = (void *)((uint8_t *)sheader.sh_addr + load_off);
  941. rel_dyn_size = (size_t)sheader.sh_size;
  942. }
  943. else if (strcmp(p_section_str + sheader.sh_name, ".dynsym") == 0)
  944. {
  945. dynsym_off = (size_t)sheader.sh_offset;
  946. dynsym_size = (size_t)sheader.sh_size;
  947. }
  948. }
  949. /* reloc */
  950. if (dynsym_size)
  951. {
  952. dynsym = rt_malloc(dynsym_size);
  953. if (!dynsym)
  954. {
  955. LOG_E("ERROR: Malloc error!");
  956. result = -ENOMEM;
  957. goto _exit;
  958. }
  959. check_off(dynsym_off, len);
  960. lseek(fd, dynsym_off, SEEK_SET);
  961. read_len = load_fread(dynsym, 1, dynsym_size, fd);
  962. check_read(read_len, dynsym_size);
  963. }
  964. #ifdef ARCH_MM_MMU
  965. arch_elf_reloc(lwp->aspace, (void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  966. #else
  967. arch_elf_reloc((void *)load_off, rel_dyn_start, rel_dyn_size, got_start, got_size, dynsym);
  968. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, lwp->text_size);
  969. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, lwp->text_entry, lwp->text_size);
  970. #endif
  971. }
  972. LOG_D("lwp->text_entry = 0x%p", lwp->text_entry);
  973. LOG_D("lwp->text_size = 0x%p", lwp->text_size);
  974. _exit:
  975. if (dynsym)
  976. {
  977. rt_free(dynsym);
  978. }
  979. if (p_section_str)
  980. {
  981. rt_free(p_section_str);
  982. }
  983. if (result != RT_EOK)
  984. {
  985. LOG_E("lwp load faild, %d", result);
  986. }
  987. return result;
  988. }
  989. #endif /* ARCH_MM_MMU */
  990. rt_weak int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_t addr_size, struct process_aux *aux)
  991. {
  992. uint8_t *ptr;
  993. int ret = -1;
  994. int len;
  995. int fd = -1;
  996. /* check file name */
  997. RT_ASSERT(filename != RT_NULL);
  998. /* check lwp control block */
  999. RT_ASSERT(lwp != RT_NULL);
  1000. /* copy file name to process name */
  1001. rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
  1002. if (load_addr != RT_NULL)
  1003. {
  1004. lwp->lwp_type = LWP_TYPE_FIX_ADDR;
  1005. ptr = load_addr;
  1006. }
  1007. else
  1008. {
  1009. lwp->lwp_type = LWP_TYPE_DYN_ADDR;
  1010. ptr = RT_NULL;
  1011. }
  1012. fd = open(filename, O_BINARY | O_RDONLY, 0);
  1013. if (fd < 0)
  1014. {
  1015. LOG_E("ERROR: Can't open elf file %s!", filename);
  1016. goto out;
  1017. }
  1018. len = lseek(fd, 0, SEEK_END);
  1019. if (len < 0)
  1020. {
  1021. LOG_E("ERROR: File %s size error!", filename);
  1022. goto out;
  1023. }
  1024. lseek(fd, 0, SEEK_SET);
  1025. ret = load_elf(fd, len, lwp, ptr, aux);
  1026. if ((ret != RT_EOK) && (ret != 1))
  1027. {
  1028. LOG_E("lwp load ret = %d", ret);
  1029. }
  1030. out:
  1031. if (fd > 0)
  1032. {
  1033. close(fd);
  1034. }
  1035. return ret;
  1036. }
  1037. /* lwp-thread clean up routine */
  1038. void lwp_cleanup(struct rt_thread *tid)
  1039. {
  1040. struct rt_lwp *lwp;
  1041. if (tid == NULL)
  1042. {
  1043. LOG_I("%s: invalid parameter tid == NULL", __func__);
  1044. return;
  1045. }
  1046. else
  1047. LOG_D("cleanup thread: %s, stack_addr: 0x%x", tid->parent.name, tid->stack_addr);
  1048. /**
  1049. * Brief: lwp thread cleanup
  1050. *
  1051. * Note: Critical Section
  1052. * - thread control block (RW. It's ensured that no one else can access tcb
  1053. * other than itself)
  1054. */
  1055. lwp = (struct rt_lwp *)tid->lwp;
  1056. lwp_thread_signal_detach(&tid->signal);
  1057. /* tty will be release in lwp_ref_dec() if ref is cleared */
  1058. lwp_ref_dec(lwp);
  1059. return;
  1060. }
  1061. static void lwp_execve_setup_stdio(struct rt_lwp *lwp)
  1062. {
  1063. struct dfs_fdtable *lwp_fdt;
  1064. struct dfs_file *cons_file;
  1065. int cons_fd;
  1066. lwp_fdt = &lwp->fdt;
  1067. /* open console */
  1068. cons_fd = open("/dev/console", O_RDWR);
  1069. if (cons_fd < 0)
  1070. {
  1071. LOG_E("%s: Cannot open console tty", __func__);
  1072. return ;
  1073. }
  1074. LOG_D("%s: open console as fd %d", __func__, cons_fd);
  1075. /* init 4 fds */
  1076. lwp_fdt->fds = rt_calloc(4, sizeof(void *));
  1077. if (lwp_fdt->fds)
  1078. {
  1079. cons_file = fd_get(cons_fd);
  1080. lwp_fdt->maxfd = 4;
  1081. fdt_fd_associate_file(lwp_fdt, 0, cons_file);
  1082. fdt_fd_associate_file(lwp_fdt, 1, cons_file);
  1083. fdt_fd_associate_file(lwp_fdt, 2, cons_file);
  1084. }
  1085. close(cons_fd);
  1086. return;
  1087. }
  1088. static void _lwp_thread_entry(void *parameter)
  1089. {
  1090. rt_thread_t tid;
  1091. struct rt_lwp *lwp;
  1092. tid = rt_thread_self();
  1093. lwp = (struct rt_lwp *)tid->lwp;
  1094. tid->cleanup = lwp_cleanup;
  1095. tid->user_stack = RT_NULL;
  1096. if (lwp->debug)
  1097. {
  1098. lwp->bak_first_inst = *(uint32_t *)lwp->text_entry;
  1099. *(uint32_t *)lwp->text_entry = dbg_get_ins();
  1100. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, lwp->text_entry, sizeof(uint32_t));
  1101. icache_invalid_all();
  1102. }
  1103. /**
  1104. * without ASID support, it will be a special case when trying to run application
  1105. * and exit multiple times and a same page frame allocated to it bound to
  1106. * different text segment. Then we are in a situation where icache contains
  1107. * out-of-dated data and must be handle by the running core itself.
  1108. * with ASID support, this should be a rare case that ASID & page frame both
  1109. * identical to previous running application.
  1110. *
  1111. * For a new application loaded into memory, icache are seen as empty. And there
  1112. * should be nothing in the icache entry to match. So this icache invalidation
  1113. * operation should have barely influence.
  1114. */
  1115. rt_hw_icache_invalidate_all();
  1116. #ifdef ARCH_MM_MMU
  1117. arch_start_umode(lwp->args, lwp->text_entry, (void *)USER_STACK_VEND, (char *)tid->stack_addr + tid->stack_size);
  1118. #else
  1119. arch_start_umode(lwp->args, lwp->text_entry, lwp->data_entry, (void *)((uint32_t)lwp->data_entry + lwp->data_size));
  1120. #endif /* ARCH_MM_MMU */
  1121. }
  1122. struct rt_lwp *lwp_self(void)
  1123. {
  1124. rt_thread_t tid;
  1125. tid = rt_thread_self();
  1126. if (tid)
  1127. {
  1128. return (struct rt_lwp *)tid->lwp;
  1129. }
  1130. return RT_NULL;
  1131. }
  1132. rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child)
  1133. {
  1134. /* lwp add to children link */
  1135. LWP_LOCK(parent);
  1136. child->sibling = parent->first_child;
  1137. parent->first_child = child;
  1138. child->parent = parent;
  1139. LWP_UNLOCK(parent);
  1140. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1141. /* parent holds reference to child */
  1142. lwp_ref_inc(parent);
  1143. /* child holds reference to parent */
  1144. lwp_ref_inc(child);
  1145. return 0;
  1146. }
  1147. rt_err_t lwp_children_unregister(struct rt_lwp *parent, struct rt_lwp *child)
  1148. {
  1149. struct rt_lwp **lwp_node;
  1150. LWP_LOCK(parent);
  1151. /* detach from children link */
  1152. lwp_node = &parent->first_child;
  1153. while (*lwp_node != child)
  1154. {
  1155. RT_ASSERT(*lwp_node != RT_NULL);
  1156. lwp_node = &(*lwp_node)->sibling;
  1157. }
  1158. (*lwp_node) = child->sibling;
  1159. child->parent = RT_NULL;
  1160. LWP_UNLOCK(parent);
  1161. LOG_D("%s(parent=%p, child=%p)", __func__, parent, child);
  1162. lwp_ref_dec(child);
  1163. lwp_ref_dec(parent);
  1164. return 0;
  1165. }
  1166. pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
  1167. {
  1168. int result;
  1169. struct rt_lwp *lwp;
  1170. char *thread_name;
  1171. struct process_aux *aux;
  1172. int tid = 0;
  1173. if (filename == RT_NULL)
  1174. {
  1175. return -EINVAL;
  1176. }
  1177. if (access(filename, X_OK) != 0)
  1178. {
  1179. return -EACCES;
  1180. }
  1181. lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID | LWP_CREATE_FLAG_NOTRACE_EXEC);
  1182. if (lwp == RT_NULL)
  1183. {
  1184. dbg_log(DBG_ERROR, "lwp struct out of memory!\n");
  1185. return -ENOMEM;
  1186. }
  1187. LOG_D("lwp malloc : %p, size: %d!", lwp, sizeof(struct rt_lwp));
  1188. if ((tid = lwp_tid_get()) == 0)
  1189. {
  1190. lwp_ref_dec(lwp);
  1191. return -ENOMEM;
  1192. }
  1193. #ifdef ARCH_MM_MMU
  1194. if (lwp_user_space_init(lwp, 0) != 0)
  1195. {
  1196. lwp_tid_put(tid);
  1197. lwp_ref_dec(lwp);
  1198. return -ENOMEM;
  1199. }
  1200. #endif
  1201. if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
  1202. {
  1203. lwp_tid_put(tid);
  1204. lwp_ref_dec(lwp);
  1205. return -ENOMEM;
  1206. }
  1207. result = lwp_load(filename, lwp, RT_NULL, 0, aux);
  1208. #ifdef ARCH_MM_MMU
  1209. if (result == 1)
  1210. {
  1211. /* dynmaic */
  1212. lwp_unmap_user(lwp, (void *)(USER_VADDR_TOP - ARCH_PAGE_SIZE));
  1213. result = load_ldso(lwp, filename, argv, envp);
  1214. }
  1215. #endif /* ARCH_MM_MMU */
  1216. if (result == RT_EOK)
  1217. {
  1218. rt_thread_t thread = RT_NULL;
  1219. rt_uint32_t priority = 25, tick = 200;
  1220. lwp_execve_setup_stdio(lwp);
  1221. /* obtain the base name */
  1222. thread_name = strrchr(filename, '/');
  1223. thread_name = thread_name ? thread_name + 1 : filename;
  1224. #ifndef ARCH_MM_MMU
  1225. struct lwp_app_head *app_head = lwp->text_entry;
  1226. if (app_head->priority)
  1227. {
  1228. priority = app_head->priority;
  1229. }
  1230. if (app_head->tick)
  1231. {
  1232. tick = app_head->tick;
  1233. }
  1234. #endif /* not defined ARCH_MM_MMU */
  1235. thread = rt_thread_create(thread_name, _lwp_thread_entry, RT_NULL,
  1236. LWP_TASK_STACK_SIZE, priority, tick);
  1237. if (thread != RT_NULL)
  1238. {
  1239. struct rt_lwp *self_lwp;
  1240. rt_session_t session;
  1241. rt_processgroup_t group;
  1242. thread->tid = tid;
  1243. lwp_tid_set_thread(tid, thread);
  1244. LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
  1245. (rt_size_t)thread->stack_addr + thread->stack_size);
  1246. self_lwp = lwp_self();
  1247. /* when create init, self_lwp == null */
  1248. if (self_lwp == RT_NULL && lwp_to_pid(lwp) != 1)
  1249. {
  1250. self_lwp = lwp_from_pid_and_lock(1);
  1251. }
  1252. if (self_lwp)
  1253. {
  1254. /* lwp add to children link */
  1255. lwp_children_register(self_lwp, lwp);
  1256. }
  1257. session = RT_NULL;
  1258. group = RT_NULL;
  1259. group = lwp_pgrp_create(lwp);
  1260. if (group)
  1261. {
  1262. lwp_pgrp_insert(group, lwp);
  1263. if (self_lwp == RT_NULL)
  1264. {
  1265. session = lwp_session_create(lwp);
  1266. lwp_session_insert(session, group);
  1267. }
  1268. else
  1269. {
  1270. session = lwp_session_find(lwp_sid_get_byprocess(self_lwp));
  1271. lwp_session_insert(session, group);
  1272. }
  1273. }
  1274. thread->lwp = lwp;
  1275. #ifndef ARCH_MM_MMU
  1276. struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
  1277. thread->user_stack = app_head->stack_offset ?
  1278. (void *)(app_head->stack_offset -
  1279. app_head->data_offset +
  1280. (uint32_t)lwp->data_entry) : RT_NULL;
  1281. thread->user_stack_size = app_head->stack_size;
  1282. /* init data area */
  1283. rt_memset(lwp->data_entry, 0, lwp->data_size);
  1284. /* init user stack */
  1285. rt_memset(thread->user_stack, '#', thread->user_stack_size);
  1286. #endif /* not defined ARCH_MM_MMU */
  1287. rt_list_insert_after(&lwp->t_grp, &thread->sibling);
  1288. lwp->did_exec = RT_TRUE;
  1289. if (debug && rt_dbg_ops)
  1290. {
  1291. lwp->debug = debug;
  1292. rt_thread_control(thread, RT_THREAD_CTRL_BIND_CPU, (void*)0);
  1293. }
  1294. rt_thread_startup(thread);
  1295. return lwp_to_pid(lwp);
  1296. }
  1297. }
  1298. lwp_tid_put(tid);
  1299. lwp_ref_dec(lwp);
  1300. return -RT_ERROR;
  1301. }
  1302. #ifdef RT_USING_MUSLLIBC
  1303. extern char **__environ;
  1304. #else
  1305. char **__environ = 0;
  1306. #endif
  1307. pid_t exec(char *filename, int debug, int argc, char **argv)
  1308. {
  1309. setenv("OS", "RT-Thread", 1);
  1310. return lwp_execve(filename, debug, argc, argv, __environ);
  1311. }
  1312. #ifdef ARCH_MM_MMU
  1313. void lwp_user_setting_save(rt_thread_t thread)
  1314. {
  1315. if (thread)
  1316. {
  1317. thread->thread_idr = arch_get_tidr();
  1318. }
  1319. }
  1320. void lwp_user_setting_restore(rt_thread_t thread)
  1321. {
  1322. if (!thread)
  1323. {
  1324. return;
  1325. }
  1326. #if !defined(ARCH_RISCV64)
  1327. /* tidr will be set in RESTORE_ALL in risc-v */
  1328. arch_set_tidr(thread->thread_idr);
  1329. #endif
  1330. if (rt_dbg_ops)
  1331. {
  1332. struct rt_lwp *l = (struct rt_lwp *)thread->lwp;
  1333. if (l != 0)
  1334. {
  1335. rt_hw_set_process_id((size_t)l->pid);
  1336. }
  1337. else
  1338. {
  1339. rt_hw_set_process_id(0);
  1340. }
  1341. if (l && l->debug)
  1342. {
  1343. uint32_t step_type = 0;
  1344. step_type = dbg_step_type();
  1345. if ((step_type == 2) || (thread->step_exec && (step_type == 1)))
  1346. {
  1347. dbg_activate_step();
  1348. }
  1349. else
  1350. {
  1351. dbg_deactivate_step();
  1352. }
  1353. }
  1354. }
  1355. }
  1356. #endif /* ARCH_MM_MMU */
  1357. void lwp_uthread_ctx_save(void *ctx)
  1358. {
  1359. rt_thread_t thread;
  1360. thread = rt_thread_self();
  1361. thread->user_ctx.ctx = ctx;
  1362. }
  1363. void lwp_uthread_ctx_restore(void)
  1364. {
  1365. rt_thread_t thread;
  1366. thread = rt_thread_self();
  1367. thread->user_ctx.ctx = RT_NULL;
  1368. }
  1369. rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame)
  1370. {
  1371. rt_err_t rc = -RT_ERROR;
  1372. long nesting = 0;
  1373. char **argv;
  1374. rt_lwp_t lwp;
  1375. if (uthread && uthread->lwp && rt_scheduler_is_available())
  1376. {
  1377. lwp = uthread->lwp;
  1378. argv = lwp_get_command_line_args(lwp);
  1379. if (argv)
  1380. {
  1381. rt_kprintf("please use: addr2line -e %s -a -f", argv[0]);
  1382. lwp_free_command_line_args(argv);
  1383. }
  1384. else
  1385. {
  1386. rt_kprintf("please use: addr2line -e %s -a -f", lwp->cmd);
  1387. }
  1388. while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
  1389. {
  1390. rt_kprintf(" 0x%lx", frame->pc);
  1391. if (rt_hw_backtrace_frame_unwind(uthread, frame))
  1392. {
  1393. break;
  1394. }
  1395. nesting++;
  1396. }
  1397. rt_kprintf("\n");
  1398. rc = RT_EOK;
  1399. }
  1400. return rc;
  1401. }
  1402. void rt_update_process_times(void)
  1403. {
  1404. struct rt_thread *thread;
  1405. #ifdef RT_USING_SMP
  1406. struct rt_cpu* pcpu;
  1407. pcpu = rt_cpu_self();
  1408. #endif
  1409. thread = rt_thread_self();
  1410. if (!IS_USER_MODE(thread))
  1411. {
  1412. thread->user_time += 1;
  1413. #ifdef RT_USING_SMP
  1414. pcpu->cpu_stat.user += 1;
  1415. #endif
  1416. }
  1417. else
  1418. {
  1419. thread->system_time += 1;
  1420. #ifdef RT_USING_SMP
  1421. if (thread == pcpu->idle_thread)
  1422. {
  1423. pcpu->cpu_stat.idle += 1;
  1424. }
  1425. else
  1426. {
  1427. pcpu->cpu_stat.system += 1;
  1428. }
  1429. #endif
  1430. }
  1431. }