mmu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2021-01-30 lizhirui first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #ifdef RT_USING_SMART
  13. #include <board.h>
  14. #include <page.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <lwp_mm.h>
  18. #include <cache.h>
  19. #define DBG_TAG "mmu"
  20. #define DBG_LVL DBG_INFO
  21. #include <rtdbg.h>
  22. #include "riscv.h"
  23. #include "riscv_mmu.h"
  24. #include "mmu.h"
  25. void *current_mmu_table = RT_NULL;
  26. volatile rt_ubase_t MMUTable[__SIZE(VPN2_BIT)] __attribute__((aligned(4 * 1024)));
  27. static void rt_hw_cpu_tlb_invalidate()
  28. {
  29. rt_size_t satpv = read_csr(satp);
  30. write_csr(satp, satpv);
  31. mmu_flush_tlb();
  32. }
  33. void *rt_hw_mmu_tbl_get()
  34. {
  35. return current_mmu_table;
  36. }
  37. void rt_hw_mmu_switch(void *mmu_table)
  38. {
  39. current_mmu_table = mmu_table;
  40. RT_ASSERT(__CHECKALIGN(mmu_table, PAGE_OFFSET_BIT));
  41. mmu_set_pagetable((rt_ubase_t)mmu_table);
  42. rt_hw_cpu_dcache_clean_all();
  43. rt_hw_cpu_icache_invalidate_all();
  44. }
  45. int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void *v_address, rt_size_t size, rt_size_t *vtable, rt_size_t pv_off)
  46. {
  47. size_t l1_off, va_s, va_e;
  48. rt_base_t level;
  49. if ((!mmu_info) || (!vtable))
  50. {
  51. return -1;
  52. }
  53. va_s = (rt_size_t)v_address;
  54. va_e = ((rt_size_t)v_address) + size - 1;
  55. if (va_e < va_s)
  56. {
  57. return -1;
  58. }
  59. // convert address to PPN2 index
  60. va_s = GET_L1(va_s);
  61. va_e = GET_L1(va_e);
  62. if (va_s == 0)
  63. {
  64. return -1;
  65. }
  66. rt_mm_lock();
  67. // vtable initialization check
  68. for (l1_off = va_s; l1_off <= va_e; l1_off++)
  69. {
  70. size_t v = vtable[l1_off];
  71. if (v)
  72. {
  73. rt_mm_unlock();
  74. return -1;
  75. }
  76. }
  77. rt_mm_unlock();
  78. mmu_info->vtable = vtable;
  79. mmu_info->vstart = va_s;
  80. mmu_info->vend = va_e;
  81. mmu_info->pv_off = pv_off;
  82. return 0;
  83. }
  84. void rt_hw_mmu_kernel_map_init(rt_mmu_info *mmu_info, rt_size_t vaddr_start, rt_size_t size)
  85. {
  86. rt_size_t paddr_start = __UMASKVALUE(VPN_TO_PPN(vaddr_start, mmu_info->pv_off), PAGE_OFFSET_MASK);
  87. rt_size_t va_s = GET_L1(vaddr_start);
  88. rt_size_t va_e = GET_L1(vaddr_start + size - 1);
  89. rt_size_t i;
  90. for (i = va_s; i <= va_e; i++)
  91. {
  92. mmu_info->vtable[i] = COMBINEPTE(paddr_start, PAGE_ATTR_RWX | PTE_G | PTE_V);
  93. paddr_start += L1_PAGE_SIZE;
  94. }
  95. rt_hw_cpu_tlb_invalidate();
  96. }
  97. // find a range of free virtual address specified by pages
  98. static size_t find_vaddr(rt_mmu_info *mmu_info, int pages)
  99. {
  100. size_t loop_pages;
  101. size_t va;
  102. size_t find_va = 0;
  103. int n = 0;
  104. size_t i;
  105. if (!pages || !mmu_info)
  106. {
  107. return 0;
  108. }
  109. loop_pages = (mmu_info->vend - mmu_info->vstart) ? (mmu_info->vend - mmu_info->vstart) : 1;
  110. loop_pages <<= (ARCH_INDEX_WIDTH * 2);
  111. va = mmu_info->vstart;
  112. va <<= (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2);
  113. for (i = 0; i < loop_pages; i++, va += ARCH_PAGE_SIZE)
  114. {
  115. if (_rt_hw_mmu_v2p(mmu_info, (void *)va))
  116. {
  117. n = 0;
  118. find_va = 0;
  119. continue;
  120. }
  121. if (!find_va)
  122. {
  123. find_va = va;
  124. }
  125. n++;
  126. if (n >= pages)
  127. {
  128. return find_va;
  129. }
  130. }
  131. return 0;
  132. }
  133. // check whether the range of virtual address are free
  134. static int check_vaddr(rt_mmu_info *mmu_info, void *va, rt_size_t pages)
  135. {
  136. rt_size_t loop_va = __UMASKVALUE((rt_size_t)va, PAGE_OFFSET_MASK);
  137. rt_size_t l1_off, l2_off, l3_off;
  138. rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
  139. if (!pages)
  140. {
  141. return -1;
  142. }
  143. if (!mmu_info)
  144. {
  145. return -1;
  146. }
  147. while (pages--)
  148. {
  149. l1_off = GET_L1(loop_va);
  150. l2_off = GET_L2(loop_va);
  151. l3_off = GET_L3(loop_va);
  152. mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
  153. if (PTE_USED(*mmu_l1))
  154. {
  155. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  156. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off) + l2_off;
  157. if (PTE_USED(*mmu_l2))
  158. {
  159. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  160. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2), mmu_info->pv_off) + l3_off;
  161. if (PTE_USED(*mmu_l3))
  162. {
  163. RT_ASSERT(PAGE_IS_LEAF(*mmu_l3));
  164. return -1;
  165. }
  166. }
  167. }
  168. loop_va += PAGE_SIZE;
  169. }
  170. return 0;
  171. }
  172. static void __rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t npages)
  173. {
  174. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  175. rt_size_t l1_off, l2_off, l3_off;
  176. rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
  177. RT_ASSERT(mmu_info);
  178. while (npages--)
  179. {
  180. l1_off = (rt_size_t)GET_L1(loop_va);
  181. RT_ASSERT((l1_off >= mmu_info->vstart) && (l1_off <= mmu_info->vend));
  182. l2_off = (rt_size_t)GET_L2(loop_va);
  183. l3_off = (rt_size_t)GET_L3(loop_va);
  184. mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
  185. RT_ASSERT(PTE_USED(*mmu_l1))
  186. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  187. mmu_l2 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off)) + l2_off;
  188. RT_ASSERT(PTE_USED(*mmu_l2));
  189. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l2));
  190. mmu_l3 = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l2), mmu_info->pv_off)) + l3_off;
  191. RT_ASSERT(PTE_USED(*mmu_l3));
  192. RT_ASSERT(PAGE_IS_LEAF(*(mmu_l3)));
  193. *mmu_l3 = 0;
  194. rt_hw_cpu_dcache_clean(mmu_l3, sizeof(*mmu_l3));
  195. // decrease reference from leaf page to l3 page
  196. mmu_l3 -= l3_off;
  197. rt_pages_free(mmu_l3, 0);
  198. int free = rt_page_ref_get(mmu_l3, 0);
  199. if (free == 1)
  200. {
  201. // free l3 page
  202. rt_pages_free(mmu_l3, 0);
  203. *mmu_l2 = 0;
  204. rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
  205. // decrease reference from l3 page to l2 page
  206. mmu_l2 -= l2_off;
  207. rt_pages_free(mmu_l2, 0);
  208. free = rt_page_ref_get(mmu_l2, 0);
  209. if (free == 1)
  210. {
  211. // free l3 page
  212. rt_pages_free(mmu_l2, 0);
  213. // reset PTE in l1
  214. *mmu_l1 = 0;
  215. rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
  216. }
  217. }
  218. loop_va += PAGE_SIZE;
  219. }
  220. }
  221. static int _mmu_map_one_page(rt_mmu_info *mmu_info, size_t va, size_t pa, size_t attr)
  222. {
  223. rt_size_t l1_off, l2_off, l3_off;
  224. rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
  225. l1_off = GET_L1(va);
  226. l2_off = GET_L2(va);
  227. l3_off = GET_L3(va);
  228. mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
  229. if (PTE_USED(*mmu_l1))
  230. {
  231. RT_ASSERT(!PAGE_IS_LEAF(*mmu_l1));
  232. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off);
  233. }
  234. else
  235. {
  236. mmu_l2 = (rt_size_t *)rt_pages_alloc(0);
  237. if (mmu_l2)
  238. {
  239. rt_memset(mmu_l2, 0, PAGE_SIZE);
  240. rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
  241. *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, mmu_info->pv_off), PAGE_DEFAULT_ATTR_NEXT);
  242. rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
  243. }
  244. else
  245. {
  246. return -1;
  247. }
  248. }
  249. if (PTE_USED(*(mmu_l2 + l2_off)))
  250. {
  251. RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
  252. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), mmu_info->pv_off);
  253. }
  254. else
  255. {
  256. mmu_l3 = (rt_size_t *)rt_pages_alloc(0);
  257. if (mmu_l3)
  258. {
  259. rt_memset(mmu_l3, 0, PAGE_SIZE);
  260. rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
  261. *(mmu_l2 + l2_off) = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, mmu_info->pv_off), PAGE_DEFAULT_ATTR_NEXT);
  262. rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
  263. // declares a reference to parent page table
  264. rt_page_ref_inc((void *)mmu_l2, 0);
  265. }
  266. else
  267. {
  268. return -1;
  269. }
  270. }
  271. RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
  272. // declares a reference to parent page table
  273. rt_page_ref_inc((void *)mmu_l3, 0);
  274. *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
  275. rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
  276. return 0;
  277. }
  278. static int __rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t npages, rt_size_t attr)
  279. {
  280. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  281. rt_size_t loop_pa = __UMASKVALUE((rt_size_t)p_addr, PAGE_OFFSET_MASK);
  282. if (!mmu_info)
  283. {
  284. return -1;
  285. }
  286. while (npages--)
  287. {
  288. if (_mmu_map_one_page(mmu_info, loop_va, loop_pa, attr) != 0)
  289. {
  290. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  291. return -1;
  292. }
  293. loop_va += PAGE_SIZE;
  294. loop_pa += PAGE_SIZE;
  295. }
  296. return 0;
  297. }
  298. void *_rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr)
  299. {
  300. rt_size_t pa_s, pa_e;
  301. rt_size_t vaddr;
  302. rt_size_t pages;
  303. int ret;
  304. if (!size)
  305. {
  306. return 0;
  307. }
  308. pa_s = (rt_size_t)p_addr;
  309. pa_e = ((rt_size_t)p_addr) + size - 1;
  310. pa_s = GET_PF_ID(pa_s);
  311. pa_e = GET_PF_ID(pa_e);
  312. pages = pa_e - pa_s + 1;
  313. if (v_addr)
  314. {
  315. vaddr = (rt_size_t)v_addr;
  316. pa_s = (rt_size_t)p_addr;
  317. if (GET_PF_OFFSET(vaddr) != GET_PF_OFFSET(pa_s))
  318. {
  319. return 0;
  320. }
  321. vaddr = __UMASKVALUE(vaddr, PAGE_OFFSET_MASK);
  322. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  323. {
  324. return 0;
  325. }
  326. }
  327. else
  328. {
  329. vaddr = find_vaddr(mmu_info, pages);
  330. }
  331. if (vaddr)
  332. {
  333. ret = __rt_hw_mmu_map(mmu_info, (void *)vaddr, p_addr, pages, attr);
  334. if (ret == 0)
  335. {
  336. rt_hw_cpu_tlb_invalidate();
  337. return (void *)(vaddr | GET_PF_OFFSET((rt_size_t)p_addr));
  338. }
  339. }
  340. return 0;
  341. }
  342. static int __rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t npages, rt_size_t attr)
  343. {
  344. rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  345. rt_size_t loop_pa;
  346. rt_size_t l1_off, l2_off, l3_off;
  347. rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
  348. rt_size_t *ref_cnt;
  349. rt_size_t i;
  350. void *va, *pa;
  351. if (!mmu_info)
  352. {
  353. return -1;
  354. }
  355. while (npages--)
  356. {
  357. loop_pa = (rt_size_t)rt_pages_alloc(0);
  358. if (!loop_pa)
  359. {
  360. goto err;
  361. }
  362. if (__rt_hw_mmu_map(mmu_info, (void *)loop_va, (void *)loop_pa, 1, attr) < 0)
  363. {
  364. goto err;
  365. }
  366. loop_va += PAGE_SIZE;
  367. }
  368. return 0;
  369. err:
  370. va = (void *)__UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  371. for (i = 0; i < npages; i++)
  372. {
  373. pa = rt_hw_mmu_v2p(mmu_info, va);
  374. if (pa)
  375. {
  376. rt_pages_free((void *)PPN_TO_VPN(pa, mmu_info->pv_off), 0);
  377. }
  378. va = (void *)((rt_uint8_t *)va + PAGE_SIZE);
  379. }
  380. __rt_hw_mmu_unmap(mmu_info, v_addr, npages);
  381. return -1;
  382. }
  383. void *_rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr)
  384. {
  385. rt_size_t vaddr;
  386. rt_size_t offset;
  387. rt_size_t pages;
  388. int ret;
  389. if (!size)
  390. {
  391. return 0;
  392. }
  393. offset = GET_PF_OFFSET((rt_size_t)v_addr);
  394. size += (offset + PAGE_SIZE - 1);
  395. pages = size >> PAGE_OFFSET_BIT;
  396. if (v_addr)
  397. {
  398. vaddr = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
  399. if (check_vaddr(mmu_info, (void *)vaddr, pages) != 0)
  400. {
  401. return 0;
  402. }
  403. }
  404. else
  405. {
  406. vaddr = find_vaddr(mmu_info, pages);
  407. }
  408. if (vaddr)
  409. {
  410. ret = __rt_hw_mmu_map_auto(mmu_info, (void *)vaddr, pages, attr);
  411. if (ret == 0)
  412. {
  413. rt_hw_cpu_tlb_invalidate();
  414. return (void *)(vaddr | offset);
  415. }
  416. }
  417. return 0;
  418. }
  419. void _rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size)
  420. {
  421. rt_size_t va_s, va_e;
  422. rt_size_t pages;
  423. va_s = ((rt_size_t)v_addr) >> PAGE_OFFSET_BIT;
  424. va_e = (((rt_size_t)v_addr) + size - 1) >> PAGE_OFFSET_BIT;
  425. pages = va_e - va_s + 1;
  426. __rt_hw_mmu_unmap(mmu_info, v_addr, pages);
  427. rt_hw_cpu_tlb_invalidate();
  428. }
  429. void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void *p_addr, rt_size_t size, rt_size_t attr)
  430. {
  431. void *ret;
  432. rt_base_t level;
  433. rt_mm_lock();
  434. ret = _rt_hw_mmu_map(mmu_info, v_addr, p_addr, size, attr);
  435. rt_mm_unlock();
  436. return ret;
  437. }
  438. void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size, rt_size_t attr)
  439. {
  440. void *ret;
  441. rt_base_t level;
  442. rt_mm_lock();
  443. ret = _rt_hw_mmu_map_auto(mmu_info, v_addr, size, attr);
  444. rt_mm_unlock();
  445. return ret;
  446. }
  447. void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void *v_addr, rt_size_t size)
  448. {
  449. rt_base_t level;
  450. rt_mm_lock();
  451. _rt_hw_mmu_unmap(mmu_info, v_addr, size);
  452. rt_mm_unlock();
  453. }
  454. void *_rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  455. {
  456. rt_size_t l1_off, l2_off, l3_off;
  457. rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
  458. rt_size_t pa;
  459. l1_off = GET_L1((rt_size_t)v_addr);
  460. l2_off = GET_L2((rt_size_t)v_addr);
  461. l3_off = GET_L3((rt_size_t)v_addr);
  462. if (!mmu_info)
  463. {
  464. return RT_NULL;
  465. }
  466. mmu_l1 = ((rt_size_t *)mmu_info->vtable) + l1_off;
  467. if (PTE_USED(*mmu_l1))
  468. {
  469. if (*mmu_l1 & PTE_XWR_MASK)
  470. return (void *)(GET_PADDR(*mmu_l1) | ((rt_size_t)v_addr & ((1 << 30) - 1)));
  471. mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), mmu_info->pv_off);
  472. if (PTE_USED(*(mmu_l2 + l2_off)))
  473. {
  474. if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
  475. return (void *)(GET_PADDR(*(mmu_l2 + l2_off)) | ((rt_size_t)v_addr & ((1 << 21) - 1)));
  476. mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), mmu_info->pv_off);
  477. if (PTE_USED(*(mmu_l3 + l3_off)))
  478. {
  479. return (void *)(GET_PADDR(*(mmu_l3 + l3_off)) | GET_PF_OFFSET((rt_size_t)v_addr));
  480. }
  481. }
  482. }
  483. return RT_NULL;
  484. }
  485. void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void *v_addr)
  486. {
  487. void *ret;
  488. rt_base_t level;
  489. rt_mm_lock();
  490. ret = _rt_hw_mmu_v2p(mmu_info, v_addr);
  491. rt_mm_unlock();
  492. return ret;
  493. }
  494. /**
  495. * @brief setup Page Table for kernel space. It's a fixed map
  496. * and all mappings cannot be changed after initialization.
  497. *
  498. * Memory region in struct mem_desc must be page aligned,
  499. * otherwise is a failure and no report will be
  500. * returned.
  501. *
  502. * @param mmu_info
  503. * @param mdesc
  504. * @param desc_nr
  505. */
  506. void rt_hw_mmu_setup(rt_mmu_info *mmu_info, struct mem_desc *mdesc, int desc_nr)
  507. {
  508. void *err;
  509. for (size_t i = 0; i < desc_nr; i++)
  510. {
  511. size_t attr;
  512. switch (mdesc->attr)
  513. {
  514. case NORMAL_MEM:
  515. attr = MMU_MAP_K_RWCB;
  516. break;
  517. case NORMAL_NOCACHE_MEM:
  518. attr = MMU_MAP_K_RWCB;
  519. break;
  520. case DEVICE_MEM:
  521. attr = MMU_MAP_K_DEVICE;
  522. break;
  523. default:
  524. attr = MMU_MAP_K_DEVICE;
  525. }
  526. err = _rt_hw_mmu_map(mmu_info, (void *)mdesc->vaddr_start, (void *)mdesc->paddr_start,
  527. mdesc->vaddr_end - mdesc->vaddr_start + 1, attr);
  528. mdesc++;
  529. }
  530. rt_hw_mmu_switch((void *)MMUTable);
  531. }
  532. #endif /* RT_USING_SMART */