test_cache_rv64.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-03-17 WangXiaoyao cache API unit test
  9. */
  10. #ifndef __TEST_CACHE_RV64_H
  11. #define __TEST_CACHE_RV64_H
  12. #ifdef ARCH_RISCV64
  13. #include "riscv_mmu.h"
  14. #include <utest.h>
  15. #include <cache.h>
  16. #include <page.h>
  17. #include <mmu.h>
  18. #include <ioremap.h>
  19. /**
  20. * ==============================================================
  21. * TEST FEATURE
  22. * API under cache.h
  23. * rt_hw_sync_cache_local
  24. *
  25. * rt_hw_cpu_dcache_clean
  26. * rt_hw_cpu_dcache_invalidate
  27. * rt_hw_cpu_dcache_clean_invalidate
  28. * rt_hw_cpu_dcache_clean_all
  29. * rt_hw_cpu_dcache_invalidate_all // meaningless
  30. * rt_hw_cpu_dcache_clean_invalidate_all
  31. * rt_hw_cpu_icache_invalidate
  32. * rt_hw_cpu_icache_invalidate_all
  33. * ==============================================================
  34. */
  35. /* Ensure the ISA is valid for target ARCHITECTURE */
  36. static void _illegal_instr(void)
  37. {
  38. rt_hw_sync_cache_local(_illegal_instr, 64);
  39. rt_hw_cpu_dcache_clean(_illegal_instr, 64);
  40. rt_hw_cpu_dcache_invalidate(_illegal_instr, 64);
  41. // rt_hw_cpu_dcache_clean_invalidate(_illegal_instr, 64); // C908 ONLY
  42. rt_hw_cpu_dcache_clean_all();
  43. rt_hw_cpu_dcache_invalidate_all(); // !CAREFUL must be inline
  44. // rt_hw_cpu_dcache_clean_invalidate_all(); // C908 ONLY
  45. rt_hw_cpu_icache_invalidate(_illegal_instr, 64);
  46. rt_hw_cpu_icache_invalidate_all();
  47. uassert_true(1);
  48. LOG_I("All ok!");
  49. }
  50. static int _get1(void)
  51. {
  52. return 1;
  53. }
  54. static int _get2(void)
  55. {
  56. return 2;
  57. }
  58. /* hot patching codes and test if the value can be seen by icache */
  59. static void _test_cache_sync(void)
  60. {
  61. uassert_true(1 == _get1());
  62. rt_memcpy(_get1, _get2, _get2 - _get1);
  63. uassert_true(1 == _get1());
  64. rt_hw_sync_cache_local(_get1, _get2 - _get1);
  65. uassert_true(2 == _get1());
  66. LOG_I("%s ok", __func__);
  67. }
  68. /* test clean operation should do and only effect the range specified by writing to a page */
  69. static void _test_dcache_clean(void)
  70. {
  71. const size_t padding = 1024 * 3;
  72. const size_t buf_sz = ARCH_PAGE_SIZE * 2;
  73. char *page = rt_pages_alloc(rt_page_bits(buf_sz));
  74. uassert_true(!!page);
  75. /* after ioremap, we can access system memory to verify outcome */
  76. volatile char *remap_nocache = rt_ioremap(page + PV_OFFSET, buf_sz);
  77. rt_memset(page, 0xab, buf_sz);
  78. rt_hw_cpu_sync();
  79. int _outdate_flag = 0;
  80. for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
  81. {
  82. if (remap_nocache[i] != 0xab)
  83. {
  84. _outdate_flag = 1;
  85. break;
  86. }
  87. }
  88. page[padding - 1] = 0xac;
  89. page[padding + ARCH_PAGE_SIZE] = 0xac;
  90. rt_hw_cpu_dcache_clean(page + padding, ARCH_PAGE_SIZE);
  91. /* free some space in dcache to avoid padding data being written back */
  92. rt_hw_cpu_dcache_invalidate(page + padding, ARCH_PAGE_SIZE);
  93. uassert_true(remap_nocache[padding - 1] != 0xac);
  94. uassert_true(remap_nocache[padding + ARCH_PAGE_SIZE] != 0xac);
  95. int _test_ok = 1;
  96. for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
  97. {
  98. if (remap_nocache[i] != 0xab)
  99. {
  100. _test_ok = 0;
  101. break;
  102. }
  103. }
  104. uassert_true(_test_ok);
  105. if (!_outdate_flag)
  106. LOG_W("Cannot guarantee clean works");
  107. else
  108. LOG_I("%s ok", __func__);
  109. rt_pages_free(page, 0);
  110. rt_iounmap(remap_nocache);
  111. }
  112. /* test clean op should do and only effect the range */
  113. static void _test_dcache_invalidate(void)
  114. {
  115. const size_t padding = 1024 * 3;
  116. const size_t buf_sz = ARCH_PAGE_SIZE * 2;
  117. /* prepare */
  118. char *page = rt_pages_alloc(rt_page_bits(buf_sz));
  119. uassert_true(!!page);
  120. volatile char *remap_nocache = rt_ioremap(page + PV_OFFSET, buf_sz);
  121. rt_memset(page, 0x0, buf_sz);
  122. rt_hw_cpu_sync();
  123. int _outdate_flag = 0;
  124. for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
  125. {
  126. remap_nocache[i] = 0xab;
  127. rt_hw_cpu_dcache_invalidate((void *)&remap_nocache[i], 1);
  128. }
  129. rt_hw_cpu_dcache_clean_all();
  130. int _test_ok = 1;
  131. for (size_t i = padding; i < ARCH_PAGE_SIZE; i++)
  132. {
  133. if (remap_nocache[i] == 0xab)
  134. {
  135. _test_ok = 0;
  136. break;
  137. }
  138. }
  139. uassert_true(_test_ok);
  140. LOG_I("%s ok", __func__);
  141. rt_pages_free(page, 0);
  142. rt_iounmap(remap_nocache);
  143. }
  144. static rt_err_t utest_tc_init(void)
  145. {
  146. return RT_EOK;
  147. }
  148. static rt_err_t utest_tc_cleanup(void)
  149. {
  150. return RT_EOK;
  151. }
  152. static void testcase(void)
  153. {
  154. UTEST_UNIT_RUN(_illegal_instr);
  155. #ifdef BOARD_allwinnerd1s
  156. /* thead ISA extension */
  157. UTEST_UNIT_RUN(_test_cache_sync);
  158. /* part of it is hard to test on simulation machine */
  159. UTEST_UNIT_RUN(_test_dcache_clean);
  160. UTEST_UNIT_RUN(_test_dcache_invalidate);
  161. #endif
  162. }
  163. UTEST_TC_EXPORT(testcase, "testcases.libcpu.cache", utest_tc_init, utest_tc_cleanup, 10);
  164. #endif /* ARCH_RISCV64 */
  165. #endif /* __TEST_CACHE_RV64_H */