cache.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. #include <string.h>
  2. #include <stdlib.h>
  3. #include <time.h>
  4. #include <pthread.h>
  5. #include <errno.h>
  6. #include <unistd.h>
  7. #include <hal_timer.h>
  8. #include "inter.h"
  9. #define addr_to_blk(nor, addr) ((addr) / nor->blk_size)
  10. #define addr_to_page(nor, addr) ((addr) / nor->page_size)
  11. typedef struct {
  12. #define CACHE_SIZE SZ_64K
  13. char *buf;
  14. #define INVALID_CACHE_ADDR ((unsigned int)-1)
  15. unsigned int addr;
  16. } cache_t;
  17. struct nor_cache {
  18. cache_t cache;
  19. unsigned int last_write;
  20. unsigned long *bitmap_page;
  21. unsigned int page_cnt;
  22. unsigned long *bitmap_blk;
  23. unsigned int blk_cnt;
  24. struct nor_flash *nor;
  25. };
  26. static struct nor_cache g_nor_cache;
  27. hal_sem_t lock_nor_cache;
  28. static inline int nor_lock_init(void)
  29. {
  30. lock_nor_cache = hal_sem_create(1);
  31. if (!lock_nor_cache) {
  32. SPINOR_ERR("create hal_sem lock for nor_flash failed\n");
  33. return -1;
  34. }
  35. return 0;
  36. }
  37. static int nor_cache_lock(void)
  38. {
  39. return hal_sem_wait(lock_nor_cache);
  40. }
  41. static int nor_cache_trylock(void)
  42. {
  43. return hal_sem_trywait(lock_nor_cache);
  44. }
  45. static int nor_cache_unlock(void)
  46. {
  47. return hal_sem_post(lock_nor_cache);
  48. }
  49. static void clear_cache(cache_t *c)
  50. {
  51. SPINOR_DEBUG("clear cache addr 0x%x\n", c->addr);
  52. memset(c->buf, 0xFF, CACHE_SIZE);
  53. c->addr = INVALID_CACHE_ADDR;
  54. }
  55. static void delete_cache(cache_t *c)
  56. {
  57. free(c->buf);
  58. c->buf = NULL;
  59. c->addr = 0;
  60. }
  61. static int init_cache(cache_t *c)
  62. {
  63. c->buf = malloc(CACHE_SIZE);
  64. if (!c->buf)
  65. return -ENOMEM;
  66. clear_cache(c);
  67. return 0;
  68. }
  69. static int init_bitmap(struct nor_cache *nc)
  70. {
  71. struct nor_flash *nor = nc->nor;
  72. nc->page_cnt = CACHE_SIZE / nor->page_size;
  73. nc->bitmap_page = malloc(BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
  74. if (!nc->bitmap_page)
  75. return -ENOMEM;
  76. memset(nc->bitmap_page, 0, BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
  77. nc->blk_cnt = CACHE_SIZE / nor->blk_size;
  78. nc->bitmap_blk = malloc(BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
  79. if (!nc->blk_cnt)
  80. goto free_page;
  81. memset(nc->bitmap_blk, 0, BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
  82. return 0;
  83. free_page:
  84. free(nc->bitmap_page);
  85. return -ENOMEM;
  86. }
  87. static void delete_bitmap(struct nor_cache *nc)
  88. {
  89. free(nc->bitmap_blk);
  90. nc->bitmap_blk = NULL;
  91. free(nc->bitmap_page);
  92. nc->bitmap_page = NULL;
  93. }
  94. int nor_cache_init(struct nor_flash *nor)
  95. {
  96. int ret = -ENOMEM;
  97. struct nor_cache *nc = &g_nor_cache;
  98. /* initialized before */
  99. if (nc->cache.buf)
  100. return -EBUSY;
  101. ret = nor_lock_init();
  102. if (ret)
  103. return ret;
  104. nor_cache_lock();
  105. ret = init_cache(&nc->cache);
  106. if (ret)
  107. goto unlock;
  108. nc->nor = nor;
  109. ret = init_bitmap(nc);
  110. if (ret)
  111. goto free_cache;
  112. nor_cache_unlock();
  113. return 0;
  114. /*
  115. * free_bitmap:
  116. * delete_bitmap(nc);
  117. */
  118. free_cache:
  119. delete_cache(&nc->cache);
  120. unlock:
  121. nor_cache_unlock();
  122. return ret;
  123. }
  124. void nor_cache_exit(void)
  125. {
  126. struct nor_cache *nc = &g_nor_cache;
  127. delete_cache(&nc->cache);
  128. delete_bitmap(nc);
  129. }
  130. static int check_cache_addr(cache_t *c, unsigned int addr, unsigned int len)
  131. {
  132. if (c->addr == INVALID_CACHE_ADDR)
  133. return -1;
  134. /* start boundary */
  135. if (ALIGN_DOWN(addr, CACHE_SIZE) != c->addr)
  136. return -1;
  137. /* end boundary */
  138. if (ALIGN_DOWN(addr + len - 1, CACHE_SIZE) != c->addr)
  139. return -1;
  140. return 0;
  141. }
  142. static inline void set_cache_addr(cache_t *c, unsigned int addr)
  143. {
  144. c->addr = ALIGN_DOWN(addr, CACHE_SIZE);
  145. SPINOR_DEBUG("set cache addr as 0x%x\n", c->addr);
  146. }
  147. static inline unsigned int get_addr_by_page(unsigned int page)
  148. {
  149. struct nor_cache *nc = &g_nor_cache;
  150. struct nor_flash *nor = nc->nor;
  151. cache_t *c = &nc->cache;
  152. return c->addr + page * nor->page_size;
  153. }
  154. static inline unsigned int get_addr_by_blk(unsigned int blk)
  155. {
  156. struct nor_cache *nc = &g_nor_cache;
  157. struct nor_flash *nor = nc->nor;
  158. cache_t *c = &nc->cache;
  159. return c->addr + blk * nor->blk_size;
  160. }
  161. static inline void set_bit(int nr, unsigned long *addr)
  162. {
  163. addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
  164. }
  165. static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
  166. {
  167. return ((1UL << (nr % BITS_PER_LONG)) &
  168. (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
  169. }
  170. static int nor_flush_erase(struct nor_cache *nc)
  171. {
  172. struct nor_flash *nor = nc->nor;
  173. unsigned int start, end;
  174. int ret;
  175. for (end = start = 0; end < nc->blk_cnt; end++) {
  176. /* we should do erase lazy to get more continuous erase block */
  177. if (test_bit(end, nc->bitmap_blk))
  178. continue;
  179. /* continuous zero, do nothing */
  180. if (start == end) {
  181. start = end + 1;
  182. continue;
  183. }
  184. SPINOR_DEBUG("flush erase: addr 0x%x blk cnt %u\n",
  185. get_addr_by_blk(start), end - start);
  186. ret = nor_erase(get_addr_by_blk(start), (end - start) * nor->blk_size);
  187. if (ret)
  188. return ret;
  189. start = end + 1;
  190. }
  191. if (start != end) {
  192. SPINOR_DEBUG("flush erase: addr 0x%x blk cnt %u\n",
  193. get_addr_by_blk(start), end - start);
  194. ret = nor_erase(get_addr_by_blk(start), (end - start) * nor->blk_size);
  195. if (ret)
  196. return ret;
  197. }
  198. memset(nc->bitmap_blk, 0, BITS_TO_LONGS(nc->blk_cnt) * sizeof(long));
  199. return 0;
  200. }
  201. static int nor_flush_write(struct nor_cache *nc)
  202. {
  203. struct nor_flash *nor = nc->nor;
  204. cache_t *c = &nc->cache;
  205. unsigned int page, i;
  206. char *buf = c->buf;
  207. int ret;
  208. for (i = 0; i < nc->page_cnt; i++) {
  209. /* let start with (last_write page + 1) */
  210. page = (i + nc->last_write + 1) % nc->page_cnt;
  211. if (!test_bit(page, nc->bitmap_page))
  212. continue;
  213. SPINOR_DEBUG("flush write: addr 0x%x\n", get_addr_by_page(page));
  214. ret = nor_write(get_addr_by_page(page),
  215. buf + page * nor->page_size, nor->page_size);
  216. if (ret)
  217. return ret;
  218. }
  219. memset(nc->bitmap_page, 0, BITS_TO_LONGS(nc->page_cnt) * sizeof(long));
  220. return 0;
  221. }
  222. static int nor_flush_cache(struct nor_cache *nc)
  223. {
  224. int ret;
  225. if (nc->cache.addr == INVALID_CACHE_ADDR)
  226. return 0;
  227. ret = nor_flush_erase(nc);
  228. if (ret)
  229. return ret;
  230. ret = nor_flush_write(nc);
  231. if (ret)
  232. return ret;
  233. clear_cache(&nc->cache);
  234. return 0;
  235. }
  236. int nor_cache_write(unsigned int addr, char *buf, unsigned int len)
  237. {
  238. struct nor_cache *nc = &g_nor_cache;
  239. struct nor_flash *nor = nc->nor;
  240. cache_t *c = &nc->cache;
  241. char *pbuf;
  242. unsigned int page;
  243. int ret;
  244. SPINOR_DEBUG("try to write addr 0x%x with size %u\n", addr, len);
  245. if (addr % nor->page_size) {
  246. SPINOR_ERR("addr %u must align to page size %u\n", addr, nor->page_size);
  247. return -EINVAL;
  248. }
  249. if (len % nor->page_size) {
  250. SPINOR_ERR("len %u must align to page size %u\n", len, nor->page_size);
  251. return -EINVAL;
  252. }
  253. ret = nor_cache_lock();
  254. if (ret)
  255. return ret;
  256. /* if over CACHE_SIZE, write roughly */
  257. if (len > CACHE_SIZE ||
  258. (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
  259. ret = nor_flush_cache(nc);
  260. if (ret)
  261. goto unlock;
  262. ret = nor_write(addr, buf, len);
  263. goto unlock;
  264. }
  265. ret = check_cache_addr(c, addr, len);
  266. if (ret) {
  267. SPINOR_DEBUG("write addr 0x%x len %d over cache addr 0x%x\n",
  268. addr, len, c->addr);
  269. /* if write to a new addr out of cache, just flush cache for new one */
  270. ret = nor_flush_cache(nc);
  271. if (ret)
  272. goto unlock;
  273. /* ready new cache for data */
  274. set_cache_addr(c, addr);
  275. }
  276. pbuf = c->buf + (addr - c->addr);
  277. page = addr_to_page(nor, addr - c->addr);
  278. while (len) {
  279. unsigned int size = min(len, nor->page_size);
  280. memcpy(pbuf, buf, size);
  281. SPINOR_DEBUG("write: mark page %d abs addr 0x%x\n", page,
  282. get_addr_by_page(page));
  283. set_bit(page, nc->bitmap_page);
  284. /*
  285. * The order of page to flush-write is very pivotal. On lfs,
  286. * the last page before sync always meta data. We must ensure normal
  287. * data to write to flash before meta data. So, we must save the
  288. * last page and flush this page at last.
  289. */
  290. nc->last_write = page;
  291. page += 1;
  292. pbuf += size;
  293. buf += size;
  294. len -= size;
  295. }
  296. ret = 0;
  297. unlock:
  298. nor_cache_unlock();
  299. return ret;
  300. }
  301. int nor_cache_read(unsigned int addr, char *buf, unsigned int len)
  302. {
  303. struct nor_cache *nc = &g_nor_cache;
  304. struct nor_flash *nor = nc->nor;
  305. cache_t *c = &nc->cache;
  306. char *pbuf;
  307. unsigned int page, blk;
  308. int ret;
  309. SPINOR_DEBUG("try to read addr 0x%x with size %u\n", addr, len);
  310. if (addr % nor->page_size) {
  311. SPINOR_ERR("addr %u must align to page size %u\n", addr, nor->page_size);
  312. return -EINVAL;
  313. }
  314. if (len % nor->page_size) {
  315. SPINOR_ERR("len %u must align to page size %u\n", len, nor->page_size);
  316. return -EINVAL;
  317. }
  318. ret = nor_cache_lock();
  319. if (ret)
  320. return ret;
  321. /* if over CACHE_SIZE, write roughly */
  322. if (len > CACHE_SIZE ||
  323. (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
  324. ret = nor_flush_cache(nc);
  325. if (ret)
  326. goto unlock;
  327. ret = nor_read(addr, buf, len);
  328. goto unlock;
  329. }
  330. ret = check_cache_addr(c, addr, len);
  331. if (ret) {
  332. ret = nor_read(addr, buf, len);
  333. goto unlock;
  334. }
  335. pbuf = c->buf + (addr - c->addr);
  336. page = addr_to_page(nor, addr - c->addr);
  337. while (len) {
  338. unsigned int size = min(len, nor->page_size);
  339. if (test_bit(page, nc->bitmap_page)) {
  340. SPINOR_DEBUG("read match cache page %d addr 0x%x\n", page,
  341. get_addr_by_page(page));
  342. memcpy(buf, pbuf, size);
  343. } else {
  344. blk = addr_to_blk(nor, addr - c->addr);
  345. if (test_bit(blk, nc->bitmap_blk)) {
  346. SPINOR_DEBUG("read match cache erase blk 0x%x addr 0x%x\n",
  347. blk, get_addr_by_page(page));
  348. memset(buf, 0xFF, size);
  349. } else {
  350. SPINOR_DEBUG("read not match cache addr 0x%x\n",
  351. get_addr_by_page(page));
  352. ret = nor_read(get_addr_by_page(page), buf, size);
  353. if (ret)
  354. goto unlock;
  355. }
  356. }
  357. page += 1;
  358. pbuf += size;
  359. buf += size;
  360. len -= size;
  361. }
  362. ret = 0;
  363. unlock:
  364. nor_cache_unlock();
  365. return ret;
  366. }
  367. int nor_cache_sync(void)
  368. {
  369. struct nor_cache *nc = &g_nor_cache;
  370. int ret;
  371. SPINOR_DEBUG("try to sync nor cache\n");
  372. ret = nor_cache_trylock();
  373. if (ret) {
  374. SPINOR_DEBUG("trylock fail, skip sync nor cache\n");
  375. return ret;
  376. }
  377. ret = nor_flush_cache(nc);
  378. nor_cache_unlock();
  379. return ret? -1 : 0;
  380. }
  381. int nor_cache_erase(unsigned int addr, unsigned int len)
  382. {
  383. struct nor_cache *nc = &g_nor_cache;
  384. struct nor_flash *nor = nc->nor;
  385. cache_t *c = &nc->cache;
  386. unsigned int blk;
  387. int ret;
  388. SPINOR_DEBUG("try to erase addr 0x%x with size %u\n", addr, len);
  389. if (addr % nor->blk_size) {
  390. SPINOR_ERR("addr %u must align to blk size %u\n", addr, nor->blk_size);
  391. return -EINVAL;
  392. }
  393. if (len % nor->blk_size) {
  394. SPINOR_ERR("len %u must align to blk size %u\n", len, nor->blk_size);
  395. return -EINVAL;
  396. }
  397. ret = nor_cache_lock();
  398. if (ret)
  399. return ret;
  400. /* if over CACHE_SIZE, erase roughly */
  401. if (len > CACHE_SIZE ||
  402. (addr - ALIGN_DOWN(addr, CACHE_SIZE) + len > CACHE_SIZE)) {
  403. /* just flush earse operation */
  404. ret = nor_flush_erase(nc);
  405. if (ret)
  406. goto unlock;
  407. ret = nor_erase(addr, len);
  408. goto unlock;
  409. }
  410. ret = check_cache_addr(c, addr, len);
  411. if (ret) {
  412. SPINOR_DEBUG("erase addr 0x%x len %d over cache addr 0x%x\n",
  413. addr, len, c->addr);
  414. /* if erase to a new addr out of cache, just flush cache for new one */
  415. ret = nor_flush_cache(nc);
  416. if (ret)
  417. goto unlock;
  418. /* ready new cache for data */
  419. set_cache_addr(c, addr);
  420. }
  421. blk = addr_to_blk(nor, addr - c->addr);
  422. for (; len; len -= nor->blk_size, blk++) {
  423. SPINOR_DEBUG("erase: mark blk %d addr 0x%x\n", blk, get_addr_by_blk(blk));
  424. set_bit(blk, nc->bitmap_blk);
  425. }
  426. ret = 0;
  427. unlock:
  428. nor_cache_unlock();
  429. return ret;
  430. }