slab.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #include "list.h"
  2. #include "slab.h"
  3. #include "kmem.h"
  4. #include "debug.h"
  5. #include <string.h>
  6. LIST_HEAD(cache_list);
  7. struct cache_struct *cachep;
  8. struct cache_struct *slabp;
  9. struct cache_struct *kmem_cache_create(const char *name, intptr_t obj_size, handler_t ctor, handler_t dtor)
  10. {
  11. struct cache_struct *cp;
  12. cp = kmem_cache_alloc(cachep);
  13. KASSERT(cp);
  14. strncpy(cp->name, name, CACHE_NAME_BUF_SIZE-1);
  15. cp->obj_size = obj_size;
  16. cp->page_nr = obj_size * 8 / PG_SIZE + 1;
  17. cp->ctor = ctor;
  18. cp->dtor = dtor;
  19. cp->free_objs = 0;
  20. if (cp->obj_size < (PG_SIZE >> 3))
  21. cp->num = (PG_SIZE - sizeof(struct slab_struct)) / (sizeof(bufctl_t) + cp->obj_size);
  22. else
  23. cp->num = PG_SIZE / cp->obj_size;
  24. cp->total_objs = 0;
  25. INIT_LIST_HEAD(&cp->slab_full);
  26. INIT_LIST_HEAD(&cp->slab_partial);
  27. INIT_LIST_HEAD(&cp->slab_free);
  28. INIT_LIST_HEAD(&cp->next);
  29. list_add_tail(&cp->next, &cache_list);
  30. DEBUG(0, 1, "Cache \"%s\" (size:%d) created\n", name, obj_size);
  31. return cp;
  32. }
  33. static inline bufctl_t *slab_bufctl(struct slab_struct *sp)
  34. {
  35. return (bufctl_t*)(sp+1);
  36. }
  37. static void cache_init_objs(struct cache_struct *cp, struct slab_struct *sp, unsigned long ctor_flags)
  38. {
  39. intptr_t i;
  40. DEBUG(0, 1, " %s:cp->num =%d\n", cp->name, cp->num);
  41. for (i = 0; i < cp->num; i++) {
  42. void *objp = (void*)(sp->s_mem + cp->obj_size*i);
  43. if (cp->ctor)
  44. cp->ctor(objp, cp, ctor_flags);
  45. slab_bufctl(sp)[i] = (bufctl_t)(i+1);
  46. }
  47. slab_bufctl(sp)[i-1] = (bufctl_t)BUFCTL_END;
  48. sp->next_free = 0;
  49. }
  50. static void kmem_cache_grow(struct cache_struct *cp)
  51. {
  52. void *addr;
  53. struct slab_struct *sp;
  54. struct page_struct *pg_des;
  55. int i;
  56. addr = alloc_pages(cp->page_nr);
  57. KASSERT(addr);
  58. if (cp->obj_size < (PG_SIZE >> 3)) {
  59. sp = (struct slab_struct*)addr;
  60. sp->s_mem = (void*)sp + sizeof(struct slab_struct) + sizeof(bufctl_t)*cp->num;
  61. }
  62. else {
  63. sp = (struct slab_struct*)kmem_cache_alloc(slabp);
  64. KASSERT(sp);
  65. sp->s_mem = addr;
  66. }
  67. pg_des = addr_to_pf_des(addr);
  68. i = cp->page_nr;
  69. while (i--) {
  70. SET_PAGE_CACHE(pg_des, cp);
  71. SET_PAGE_SLAB(pg_des, sp);
  72. pg_des++;
  73. }
  74. sp->cache = cp;
  75. sp->ref_cnt = 0;
  76. cache_init_objs(cp, sp, 0);
  77. cp->free_objs += cp->num;
  78. cp->total_objs += cp->num;
  79. INIT_LIST_HEAD(&sp->list);
  80. list_add_tail(&sp->list, &cp->slab_free);
  81. DEBUG(0, 1, " %s:grow up, get %d more free object\n", cp->name, cp->free_objs);
  82. }
  83. static void *kmem_slab_refill(struct slab_struct *sp)
  84. {
  85. void *buf_addr;
  86. intptr_t idx;
  87. idx = sp->next_free;
  88. sp->next_free = (intptr_t)(slab_bufctl(sp)[idx]);
  89. slab_bufctl(sp)[idx] = BUFCTL_END;
  90. DEBUG(0, 1, " %s:allocating %uth obj in a slab of Cache \n", sp->cache->name, idx);
  91. if (sp->next_free == (intptr_t)BUFCTL_END) {
  92. DEBUG(0, 1, " %s:move from slab_partial to slab_full\n", sp->cache->name);
  93. list_move(&sp->list, &sp->cache->slab_full);
  94. }
  95. else if (sp->ref_cnt == 0) {
  96. DEBUG(0, 1, " %s:move from slab_free to slab_partial\n", sp->cache->name);
  97. list_move(&sp->list, &sp->cache->slab_partial);
  98. }
  99. sp->ref_cnt++;
  100. sp->cache->free_objs--;
  101. buf_addr = sp->s_mem + idx*sp->cache->obj_size;
  102. return buf_addr;
  103. }
  104. static void slab_print_list(struct cache_struct *cp)
  105. {
  106. DEBUG(0, 1, " %s:partial[%c], free[%c], full[%c] %3d/%3d (%3d)\n",
  107. cp->name,
  108. list_empty(&cp->slab_partial)?' ':'*',
  109. list_empty(&cp->slab_free)?' ':'*',
  110. list_empty(&cp->slab_full)?' ':'*',
  111. cp->free_objs,
  112. cp->total_objs,
  113. cp->num);
  114. }
  115. void *kmem_cache_alloc(struct cache_struct *cp)
  116. {
  117. struct slab_struct *sp;
  118. slab_print_list(cp);
  119. DEBUG(0, 1, " %s:free_objs = %d/%d\n", cp->name, cp->free_objs, cp->num);
  120. while (!cp->free_objs)
  121. kmem_cache_grow(cp);
  122. list_for_each_entry(sp, &cp->slab_partial, list) {
  123. DEBUG(0, 1, " %s:get a free obj from slab_partial list\n", cp->name);
  124. return kmem_slab_refill(sp);
  125. }
  126. list_for_each_entry(sp, &cp->slab_free, list) {
  127. DEBUG(0, 1, " %s:get a free obj from slab_free list\n", cp->name);
  128. return kmem_slab_refill(sp);
  129. }
  130. KPANIC("%s:failed to alloc a free slab\n", cp->name);
  131. }
  132. void kmem_cache_free(struct cache_struct *cp, void *buf_addr)
  133. {
  134. struct page_struct *pg;
  135. struct slab_struct *sp;
  136. intptr_t idx;
  137. pg = addr_to_pf_des(buf_addr);
  138. sp = GET_PAGE_SLAB(pg);
  139. idx = ((intptr_t)(buf_addr - sp->s_mem)) / cp->obj_size;
  140. slab_bufctl(sp)[idx] = (bufctl_t)sp->next_free;
  141. sp->next_free = idx;
  142. DEBUG(0, 1, " %s:freeing %uth obj in a slab\n", cp->name, idx);
  143. if (sp->ref_cnt == cp->num) {
  144. DEBUG(0, 1, " %s:move from slab_full to slab_partial\n", cp->name);
  145. list_move(&sp->list, &sp->cache->slab_partial);
  146. }
  147. else if (sp->ref_cnt == 1) {
  148. DEBUG(0, 1, " %s:move from slab_partial to slab_free\n", cp->name);
  149. list_move(&sp->list, &sp->cache->slab_free);
  150. }
  151. cp->free_objs++;
  152. sp->ref_cnt--;
  153. }
  154. void kmem_cache_destory(struct cache_struct *cp)
  155. {
  156. if (!cp)
  157. return;
  158. }
  159. void kmem_cache_reap(struct cache_struct *cp)
  160. {
  161. struct slab_struct *sp, *tmp;
  162. slab_print_list(cp);
  163. list_for_each_entry_safe(sp, tmp, &cp->slab_free, list) {
  164. list_del(&sp->list);
  165. if (cp->obj_size < (PG_SIZE >> 3))
  166. free_pages(sp);
  167. else
  168. free_pages(sp->s_mem);
  169. cp->free_objs -= cp->num;
  170. DEBUG(0, 1, " %s:cache shrink, free_objs = %d\n", cp->name, cp->free_objs);
  171. }
  172. }
  173. void init_cache(void)
  174. {
  175. DEBUG(1, 0, "*************************************************\n");
  176. DEBUG(1, 0, "* Initializeing SLAB *\n");
  177. DEBUG(1, 0, "*************************************************\n");
  178. /* Initialize the cachep statically */
  179. cachep = (struct cache_struct*)alloc_page();
  180. KASSERT(cachep);
  181. strncpy(cachep->name, "cachep", CACHE_NAME_BUF_SIZE-1);
  182. cachep->obj_size = sizeof(struct cache_struct);
  183. cachep->page_nr = sizeof(struct cache_struct) * 8 / PG_SIZE + 1;
  184. cachep->ctor = (void*)0;
  185. cachep->dtor = (void*)0;
  186. cachep->num = (PG_SIZE - sizeof(struct slab_struct)) / (sizeof(bufctl_t) + cachep->obj_size);
  187. cachep->free_objs = 0;
  188. cachep->total_objs = 0;
  189. INIT_LIST_HEAD(&cachep->slab_full);
  190. INIT_LIST_HEAD(&cachep->slab_partial);
  191. INIT_LIST_HEAD(&cachep->slab_free);
  192. INIT_LIST_HEAD(&cachep->next);
  193. list_add_tail(&cachep->next, &cache_list);
  194. slabp = kmem_cache_create("slabp", sizeof(struct cache_struct), (void*)0, (void*)0);
  195. #ifdef CONFIG_KMEM_USING_SLAB
  196. init_general_caches();
  197. #endif
  198. }
  199. #define TBL_SIZE 10
  200. #define MIN_OBJ_SIZE 8
  201. struct cache_tbl_entry {
  202. struct cache_struct *cp;
  203. intptr_t size;
  204. };
  205. struct cache_tbl_entry cache_tbl[TBL_SIZE];
  206. void init_general_caches(void)
  207. {
  208. char name_buf[CACHE_NAME_BUF_SIZE];
  209. intptr_t size;
  210. intptr_t i;
  211. size = MIN_OBJ_SIZE;
  212. DEBUG(1, 0, "*************************************************\n");
  213. DEBUG(1, 0, "* Initializeing SLAB(General Object Caches) *\n");
  214. DEBUG(1, 0, "*************************************************\n");
  215. for (i = 0; i < TBL_SIZE; i++) {
  216. sprintf(name_buf, "gen_cache_%d", size);
  217. cache_tbl[i].cp = kmem_cache_create(name_buf, size, (void*)0, (void*)0);
  218. cache_tbl[i].size = size;
  219. size *= 2;
  220. }
  221. }
  222. void *kmem_cache_alloc_gen(intptr_t size)
  223. {
  224. int i;
  225. for (i = 0; i < TBL_SIZE; i++) {
  226. if (cache_tbl[i].size > size)
  227. return kmem_cache_alloc(cache_tbl[i].cp);
  228. }
  229. return (void*)0;
  230. }