numa.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-09-24 GuEe-GUI the first version
  9. */
  10. #include <rtthread.h>
  11. #include <rtdevice.h>
  12. #define DBG_TAG "rtdm.numa"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <drivers/pic.h>
  16. struct numa_memory
  17. {
  18. rt_list_t list;
  19. int nid;
  20. rt_uint64_t start;
  21. rt_uint64_t end;
  22. union
  23. {
  24. void *ofw_node;
  25. };
  26. };
  27. static rt_bool_t numa_enabled = RT_FALSE;
  28. static int cpu_numa_map[RT_CPUS_NR] rt_section(".bss.noclean.numa");
  29. static rt_list_t numa_memory_nodes rt_section(".bss.noclean.numa");
  30. int rt_numa_cpu_id(int cpuid)
  31. {
  32. if (!numa_enabled)
  33. {
  34. return -RT_ENOSYS;
  35. }
  36. return cpuid < RT_ARRAY_SIZE(cpu_numa_map) ? cpu_numa_map[cpuid] : -RT_EINVAL;
  37. }
  38. int rt_numa_device_id(struct rt_device *dev)
  39. {
  40. rt_uint32_t nid = (rt_uint32_t)-RT_ENOSYS;
  41. if (!numa_enabled)
  42. {
  43. return nid;
  44. }
  45. return rt_dm_dev_prop_read_u32(dev, "numa-node-id", &nid) ? : (int)nid;
  46. }
  47. rt_err_t rt_numa_memory_affinity(rt_uint64_t phy_addr, rt_bitmap_t *out_affinity)
  48. {
  49. struct numa_memory *nm;
  50. if (!out_affinity)
  51. {
  52. return -RT_EINVAL;
  53. }
  54. if (!numa_enabled)
  55. {
  56. /* Default to CPU#0 */
  57. RT_IRQ_AFFINITY_SET(out_affinity, 0);
  58. return RT_EOK;
  59. }
  60. rt_memset(out_affinity, 0, sizeof(*out_affinity) * RT_BITMAP_LEN(RT_CPUS_NR));
  61. rt_list_for_each_entry(nm, &numa_memory_nodes, list)
  62. {
  63. if (phy_addr >= nm->start && phy_addr < nm->end)
  64. {
  65. for (int i = 0; i < RT_ARRAY_SIZE(cpu_numa_map); ++i)
  66. {
  67. if (cpu_numa_map[i] == nm->nid)
  68. {
  69. RT_IRQ_AFFINITY_SET(out_affinity, i);
  70. }
  71. }
  72. return RT_EOK;
  73. }
  74. }
  75. return -RT_EEMPTY;
  76. }
  77. #ifdef RT_USING_OFW
  78. static int numa_ofw_init(void)
  79. {
  80. int i = 0;
  81. rt_uint32_t nid;
  82. const char *numa_conf;
  83. struct rt_ofw_node *np = RT_NULL;
  84. numa_conf = rt_ofw_bootargs_select("numa=", 0);
  85. if (!numa_conf || rt_strcmp(numa_conf, "on"))
  86. {
  87. return (int)RT_EOK;
  88. }
  89. numa_enabled = RT_TRUE;
  90. for (int i = 0; i < RT_ARRAY_SIZE(cpu_numa_map); ++i)
  91. {
  92. cpu_numa_map[i] = -RT_ENOSYS;
  93. }
  94. rt_list_init(&numa_memory_nodes);
  95. rt_ofw_foreach_cpu_node(np)
  96. {
  97. rt_ofw_prop_read_u32(np, "numa-node-id", (rt_uint32_t *)&cpu_numa_map[i]);
  98. if (++i >= RT_CPUS_NR)
  99. {
  100. break;
  101. }
  102. }
  103. rt_ofw_foreach_node_by_type(np, "memory")
  104. {
  105. if (!rt_ofw_prop_read_u32(np, "numa-node-id", &nid))
  106. {
  107. int mem_nr = rt_ofw_get_address_count(np);
  108. for (i = 0; i < mem_nr; ++i)
  109. {
  110. rt_uint64_t addr, size;
  111. struct numa_memory *nm;
  112. if (rt_ofw_get_address(np, i, &addr, &size))
  113. {
  114. continue;
  115. }
  116. nm = rt_malloc(sizeof(*nm));
  117. if (!nm)
  118. {
  119. LOG_E("No memory to record NUMA[%d] memory[%p, %p] info",
  120. nid, addr, addr + size);
  121. return (int)-RT_ENOMEM;
  122. }
  123. nm->start = addr;
  124. nm->end = addr + size;
  125. nm->ofw_node = np;
  126. rt_list_init(&nm->list);
  127. rt_list_insert_before(&numa_memory_nodes, &nm->list);
  128. }
  129. }
  130. }
  131. return 0;
  132. }
  133. INIT_CORE_EXPORT(numa_ofw_init);
  134. #endif /* RT_USING_OFW */