1
0

test_aspace_api.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-03-20 WangXiaoyao Complete testcase for mm_aspace.c
  9. */
  10. #ifndef __TEST_ASPACE_API_H__
  11. #define __TEST_ASPACE_API_H__
  12. #include "common.h"
  13. #include "mm_aspace.h"
  14. #include "mm_flag.h"
  15. #include "test_aspace_api_internal.h"
  16. #include "test_synchronization.h"
  17. /**
  18. * @brief API for aspace create/destroy
  19. *
  20. * rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);
  21. * rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
  22. * void rt_aspace_delete(rt_aspace_t aspace);
  23. * void rt_aspace_detach(rt_aspace_t aspace);
  24. *
  25. * the init & detach is covered by create & detach
  26. */
  27. static void aspace_create_tc(void)
  28. {
  29. /* test robustness, detect failure and recover status of overall system */
  30. rt_aspace_t aspace;
  31. CONSIST_HEAP(aspace = rt_aspace_create((void *)(0 - 0x1000), 0x1000, NULL));
  32. uassert_true(!aspace);
  33. }
  34. #if 1 /* make it clear to identify the block :) */
  35. /* for testing on _aspace_traverse */
  36. static void *_prev_end;
  37. static size_t _count;
  38. static int _test_increase(rt_varea_t varea, void *param)
  39. {
  40. uassert_true(varea->start >= _prev_end);
  41. _prev_end = varea->start + varea->size;
  42. _count += 1;
  43. return 0;
  44. }
  45. #endif
  46. static void aspace_delete_tc(void)
  47. {
  48. /**
  49. * @brief Requirements: delete should recycle all types of vareas properly inside
  50. * and release the resource allocated for it
  51. */
  52. rt_aspace_t aspace;
  53. struct rt_mm_va_hint hint = {.flags = 0,
  54. .map_size = 0x1000,
  55. .prefer = 0};
  56. struct rt_varea varea_phy;
  57. struct rt_varea varea_mobj;
  58. void *pgtbl;
  59. void *vaddr;
  60. /* compatible to armv7a */
  61. pgtbl = rt_pages_alloc(2);
  62. uassert_true(!!pgtbl); /* page must be usable */
  63. rt_memset(pgtbl, 0, ARCH_PAGE_SIZE);
  64. CONSIST_HEAP({
  65. aspace = rt_aspace_create((void *)USER_VADDR_START, USER_VADDR_TOP - USER_VADDR_START, pgtbl);
  66. uassert_true(!!aspace);
  67. /* insert 4 types of vareas into this aspace */
  68. hint.limit_start = aspace->start;
  69. hint.limit_range_size = aspace->size;
  70. uassert_true(!rt_aspace_map_phy(aspace, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
  71. uassert_true(!rt_aspace_map_phy_static(aspace, &varea_phy, &hint, MMU_MAP_K_RWCB, 0, &vaddr));
  72. uassert_true(!rt_aspace_map(aspace, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
  73. uassert_true(!rt_aspace_map_static(aspace, &varea_mobj, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
  74. /* for testing on _aspace_traverse */
  75. _count = 0;
  76. _prev_end = 0;
  77. uassert_true(!rt_aspace_traversal(aspace, _test_increase, 0));
  78. /* ensure the mapping is done */
  79. uassert_true(_count == 4);
  80. rt_aspace_delete(aspace);
  81. uassert_true(rt_pages_free(pgtbl, 2) == 1); /* page free must success */
  82. });
  83. }
  84. /**
  85. * @brief Memory Map on Virtual Address Space to Mappable Object
  86. * int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
  87. * mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);
  88. * int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
  89. * rt_size_t length, rt_size_t attr, mm_flag_t flags,
  90. * rt_mem_obj_t mem_obj, rt_size_t offset);
  91. */
  92. static void aspace_map_tc(void)
  93. {
  94. /**
  95. * @brief Requirement:
  96. * Robustness, filter out invalid input
  97. */
  98. void *vaddr = RT_NULL;
  99. uassert_true(rt_aspace_map(0, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
  100. uassert_true(vaddr == RT_NULL);
  101. vaddr = (void *)USER_VADDR_START;
  102. uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
  103. uassert_true(vaddr == RT_NULL);
  104. uassert_true(rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, -1, &rt_mm_dummy_mapper, 0));
  105. uassert_true(vaddr == RT_NULL);
  106. /**
  107. * @brief Requirement:
  108. * in _rt_aspace_map:_varea_install
  109. * not covering an existed varea if a named mapping is mandatory
  110. */
  111. // vaddr = (void *)((rt_ubase_t)aspace_map_tc & ~ARCH_PAGE_MASK);
  112. // CONSIST_HEAP(
  113. // uassert_true(
  114. // rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0)));
  115. // uassert_true(vaddr == RT_NULL);
  116. /**
  117. * @brief Requirement:
  118. * in _rt_aspace_map:_varea_install:_find_free
  119. * verify that this routine can choose a free region with specified size
  120. * and specified alignment requirement
  121. */
  122. #define ALIGN_REQ (0x04000000)
  123. CONSIST_HEAP({
  124. uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_CREATE(0, ALIGN_REQ), &rt_mm_dummy_mapper, 0));
  125. uassert_true(!((rt_ubase_t)vaddr & (ALIGN_REQ - 1)));
  126. rt_aspace_unmap(&rt_kernel_space, vaddr);
  127. });
  128. /* test internal APIs */
  129. test_find_free();
  130. }
  131. /**
  132. * @brief Page frames mapping to varea
  133. * complete the page table on specified varea, and handle tlb maintenance
  134. * There are 2 variants of this API
  135. *
  136. * int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
  137. * int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
  138. */
  139. static rt_varea_t _create_varea(const size_t size)
  140. {
  141. rt_varea_t varea;
  142. void *vaddr = rt_ioremap_start;
  143. varea = rt_malloc(sizeof(*varea));
  144. uassert_true(!!varea);
  145. uassert_true(!rt_aspace_map_static(&rt_kernel_space, varea, &vaddr, size, MMU_MAP_K_RWCB, 0, &rt_mm_dummy_mapper, 0));
  146. varea->flag &= ~MMF_STATIC_ALLOC;
  147. uassert_true(!!vaddr);
  148. return varea;
  149. }
  150. static void test_varea_map_page(void)
  151. {
  152. /**
  153. * @brief rt_varea_map_page
  154. * Requirements: complete the page table entry
  155. */
  156. const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
  157. rt_varea_t varea = _create_varea(buf_sz);
  158. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  159. {
  160. void *page = rt_pages_alloc(0);
  161. uassert_true(!!page);
  162. uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
  163. uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
  164. /* let page manager handle the free of page */
  165. rt_varea_pgmgr_insert(varea, page);
  166. uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
  167. }
  168. uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
  169. }
  170. static void test_varea_map_range(void)
  171. {
  172. /**
  173. * @brief rt_varea_map_range
  174. * Requirements: complete the page table entry
  175. */
  176. const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
  177. rt_varea_t varea = _create_varea(buf_sz);
  178. void *page = rt_pages_alloc(rt_page_bits(buf_sz));
  179. uassert_true(!!page);
  180. uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
  181. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  182. {
  183. uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
  184. }
  185. uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
  186. uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
  187. }
  188. /**
  189. * @brief rt_varea_unmap_page
  190. * Requirements: cancel the page table entry
  191. */
  192. static void test_varea_unmap_page(void)
  193. {
  194. /* Prepare environment */
  195. const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
  196. rt_varea_t varea = _create_varea(buf_sz);
  197. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  198. {
  199. void *page = rt_pages_alloc(0);
  200. uassert_true(!!page);
  201. uassert_true(!rt_varea_map_page(varea, varea->start + i, page));
  202. /* let page manager handle the free of page */
  203. rt_varea_pgmgr_insert(varea, page);
  204. uassert_true(rt_kmem_v2p(varea->start + i) == (page + PV_OFFSET));
  205. }
  206. /* test if unmap is success */
  207. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  208. {
  209. uassert_true(rt_varea_unmap_page(varea, varea->start + i) == RT_EOK);
  210. uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
  211. }
  212. uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
  213. }
  214. /**
  215. * @brief rt_varea_map_range
  216. * Requirements: complete the page table entry
  217. */
  218. static void test_varea_unmap_range(void)
  219. {
  220. const size_t buf_sz = 4 * ARCH_PAGE_SIZE;
  221. rt_varea_t varea = _create_varea(buf_sz);
  222. void *page = rt_pages_alloc(rt_page_bits(buf_sz));
  223. uassert_true(!!page);
  224. uassert_true(!rt_varea_map_range(varea, varea->start, page + PV_OFFSET, buf_sz));
  225. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  226. {
  227. uassert_true(rt_kmem_v2p(varea->start + i) == (page + i + PV_OFFSET));
  228. }
  229. /* test if unmap is success */
  230. uassert_true(rt_varea_unmap_range(varea, varea->start, buf_sz) == RT_EOK);
  231. for (size_t i = 0; i < buf_sz; i += ARCH_PAGE_SIZE)
  232. {
  233. uassert_true(rt_kmem_v2p(varea->start + i) == ARCH_MAP_FAILED);
  234. }
  235. uassert_true(rt_pages_free(page, rt_page_bits(buf_sz)));
  236. uassert_true(!rt_aspace_unmap(&rt_kernel_space, varea->start));
  237. }
  238. static void varea_map_tc(void)
  239. {
  240. CONSIST_HEAP(test_varea_map_page());
  241. CONSIST_HEAP(test_varea_map_range());
  242. CONSIST_HEAP(test_varea_unmap_page());
  243. CONSIST_HEAP(test_varea_unmap_range());
  244. }
  245. static void aspace_traversal_tc(void)
  246. {
  247. /**
  248. * @brief Requirement
  249. * Iterate over each varea in the kernel space
  250. */
  251. CONSIST_HEAP(aspace_delete_tc());
  252. uassert_true(4 == _count);
  253. }
  254. #ifdef ARCH_ARMV8
  255. static void aspace_control_tc(void)
  256. {
  257. /* this case is designed only for one page size */
  258. const size_t buf_sz = ARCH_PAGE_SIZE;
  259. void *vaddr = RT_NULL;
  260. volatile char *remap_nocache;
  261. int platform_cache_probe;
  262. uassert_true(!rt_aspace_map(&rt_kernel_space, &vaddr, 0x1000, MMU_MAP_K_RWCB, MMF_PREFETCH, &rt_mm_dummy_mapper, 0));
  263. uassert_true(!!vaddr);
  264. /* map non-cacheable region to verify cache */
  265. remap_nocache = rt_ioremap(rt_kmem_v2p(vaddr), buf_sz);
  266. uassert_true(!!remap_nocache);
  267. /* pre probing */
  268. rt_memset(vaddr, 0xba, buf_sz);
  269. /* no need to sync transaction on same core */
  270. platform_cache_probe = memtest(remap_nocache, 0xab, buf_sz);
  271. if (!platform_cache_probe)
  272. {
  273. LOG_I("Cannot distinguish cache attribution on current platform");
  274. }
  275. else
  276. {
  277. LOG_I("Ready to verify attribution of cached & non-cacheable");
  278. }
  279. /* verify cache */
  280. uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_NONCACHE));
  281. rt_memset(vaddr, 0, buf_sz);
  282. uassert_true(!memtest(remap_nocache, 0, buf_sz));
  283. /* another option as MMU_CNTL_CACHE */
  284. uassert_true(!rt_aspace_control(&rt_kernel_space, vaddr, MMU_CNTL_CACHE));
  285. rt_iounmap(remap_nocache);
  286. uassert_true(!rt_aspace_unmap(&rt_kernel_space, vaddr));
  287. }
  288. #endif
  289. static void aspace_tc(void)
  290. {
  291. UTEST_UNIT_RUN(aspace_create_tc);
  292. UTEST_UNIT_RUN(aspace_delete_tc);
  293. UTEST_UNIT_RUN(aspace_map_tc);
  294. UTEST_UNIT_RUN(aspace_traversal_tc);
  295. #ifdef ARCH_ARMV8
  296. UTEST_UNIT_RUN(aspace_control_tc);
  297. #endif
  298. UTEST_UNIT_RUN(varea_map_tc);
  299. /* functionality */
  300. UTEST_UNIT_RUN(synchronization_tc);
  301. return ;
  302. }
  303. #endif /* __TEST_ASPACE_API_H__ */