dfs_cache.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. +------------------------------------------------------------------------------
  3. | Project : Device Filesystem
  4. +------------------------------------------------------------------------------
  5. | Copyright 2004 - 2009 www.rt-thread.org
  6. | All rights reserved.
  7. |------------------------------------------------------------------------------
  8. | File : dfs_cache.c, the LUT disk cache implementation
  9. |------------------------------------------------------------------------------
  10. | Chang Logs:
  11. | Date Author Notes
  12. | 2009-04-26 bernard The first version.
  13. +------------------------------------------------------------------------------
  14. */
  15. #include "dfs_cache.h"
  16. #define ioman_isReqRo(mode) ((mode)&(IOM_MODE_READONLY))
  17. #define ioman_isReqRw(mode) ((mode)&(IOM_MODE_READWRITE))
  18. #define ioman_isReqExp(mode) ((mode)&(IOM_MODE_EXP_REQ))
  19. rt_err_t ioman_init(IOManager* ioman)
  20. {
  21. register rt_uint32_t index;
  22. RT_ASSERT(ioman != RT_NULL);
  23. ioman->numbuf = DFS_CACHE_MAX_NUM;
  24. rt_memset(ioman->sector, 0,sizeof(ioman->sector));
  25. rt_memset(ioman->status, 0,sizeof(ioman->status));
  26. rt_memset(ioman->usage, 0, sizeof(ioman->usage));
  27. rt_memset(ioman->ring_fifo, 0, sizeof(ioman->ring_fifo));
  28. rt_memset(ioman->cache, 0, sizeof(ioman->cache));
  29. /* init fifo */
  30. for (index = 0; index < ioman->numbuf; index ++)
  31. {
  32. ioman->ring_fifo[index] = ioman->numbuf - index - 1;
  33. }
  34. return RT_EOK;
  35. }
  36. /*
  37. * get the last fifo item and put it to the top of fifo ring
  38. */
  39. static rt_uint8_t ioman_ring_fifo_swap(IOManager *ioman)
  40. {
  41. rt_uint8_t bp;
  42. rt_uint32_t index;
  43. bp = ioman->ring_fifo[ioman->numbuf - 1];
  44. for (index = ioman->numbuf - 1; index > 0; index --)
  45. {
  46. ioman->ring_fifo[index] = ioman->ring_fifo[index - 1];
  47. }
  48. ioman->ring_fifo[0] = bp;
  49. return bp;
  50. }
  51. /*
  52. * get the index of bp in fifo ring and then put it to the top of
  53. * fifo ring
  54. */
  55. static void ioman_ring_fifo_relocate(IOManager *ioman, rt_uint32_t bp)
  56. {
  57. rt_uint32_t bp_index = 0;
  58. register rt_uint32_t index;
  59. /* find bp in fifo ring */
  60. for (index = 0; index < ioman->numbuf; index ++)
  61. {
  62. if (ioman->ring_fifo[index] == bp)
  63. {
  64. bp_index = index;
  65. break;
  66. }
  67. }
  68. /* not found bp in fifo ring */
  69. if (index == ioman->numbuf) return;
  70. /* move the bp to the top of fifo ring */
  71. for (index = bp_index; index > 0; index --)
  72. {
  73. ioman->ring_fifo[index] = ioman->ring_fifo[index - 1];
  74. }
  75. ioman->ring_fifo[0] = bp;
  76. }
  77. /*
  78. * get last bp in fifo ring
  79. */
  80. rt_inline rt_uint8_t ioman_ring_fifo_last(IOManager *ioman)
  81. {
  82. RT_ASSERT(ioman != RT_NULL);
  83. return ioman->ring_fifo[ioman->numbuf - 1];
  84. }
  85. static rt_err_t ioman_readSector(IOManager *ioman, rt_uint32_t address, rt_uint8_t* buf)
  86. {
  87. rt_err_t result;
  88. RT_ASSERT(buf != RT_NULL);
  89. result = rt_device_read(ioman->device, address * DFS_SECTOR_SIZE, buf, DFS_SECTOR_SIZE);
  90. if (result == DFS_SECTOR_SIZE) return RT_EOK;
  91. return -RT_ERROR;
  92. }
  93. static rt_err_t ioman_writeSector(IOManager *ioman, rt_uint32_t address, rt_uint8_t* buf)
  94. {
  95. rt_err_t result;
  96. RT_ASSERT(buf != RT_NULL);
  97. result = rt_device_write(ioman->device, address * DFS_SECTOR_SIZE, buf, DFS_SECTOR_SIZE);
  98. if (result == DFS_SECTOR_SIZE) return RT_EOK;
  99. return -RT_ERROR;
  100. }
  101. static rt_int32_t ioman_findSectorInCache(IOManager *ioman, rt_uint32_t address)
  102. {
  103. rt_uint32_t c;
  104. for(c=0;c<ioman->numbuf;c++)
  105. {
  106. if((ioman->status[c] & (1 << IOMAN_STATUS_ATTR_VALIDDATA)) &&
  107. ioman->sector[c] == address)
  108. return (c);
  109. }
  110. return -RT_ERROR;
  111. }
  112. static rt_err_t ioman_flushSector(IOManager *ioman, rt_uint32_t bp)
  113. {
  114. rt_err_t result;
  115. RT_ASSERT(ioman != RT_NULL);
  116. result = ioman_writeSector(ioman, ioman->sector[bp], &ioman->cache[bp][0]);
  117. if (result == RT_EOK)
  118. {
  119. /* set status */
  120. ioman->status[bp] &= ~(1 << IOMAN_STATUS_ATTR_WRITE);
  121. }
  122. return result;
  123. }
  124. rt_uint8_t* ioman_getSector(IOManager *ioman, rt_uint32_t address, rt_uint8_t mode)
  125. {
  126. rt_int32_t bp;
  127. RT_ASSERT(ioman != RT_NULL);
  128. bp = ioman_findSectorInCache(ioman, address);
  129. if (bp != -RT_ERROR)
  130. {
  131. /* incress cache usage */
  132. ioman->usage[bp] ++;
  133. /* relocate bp in fifo ring */
  134. ioman_ring_fifo_relocate(ioman, bp);
  135. if (ioman_isReqRw(mode))
  136. ioman->status[bp] |= (1 << IOMAN_STATUS_ATTR_WRITE);
  137. return &(ioman->cache[bp][0]);
  138. }
  139. /* not find in cache, get the last bp in fifo ring */
  140. bp = ioman_ring_fifo_last(ioman);
  141. if ((ioman->status[bp] & (1 << IOMAN_STATUS_ATTR_WRITE)) &&
  142. ioman->usage[bp] == 0)
  143. {
  144. /* it's a writable buffer, flush it */
  145. ioman_flushSector(ioman, bp);
  146. }
  147. /* strip last bp in fifo ring */
  148. bp = ioman_ring_fifo_swap(ioman);
  149. /* read sector */
  150. ioman_readSector(ioman, address, &ioman->cache[bp][0]);
  151. ioman->sector[bp] = address;
  152. ioman->usage [bp] = 1;
  153. ioman->status[bp] = (1 << IOMAN_STATUS_ATTR_VALIDDATA);
  154. if (ioman_isReqRw(mode))
  155. ioman->status[bp] |= (1 << IOMAN_STATUS_ATTR_WRITE);
  156. return &ioman->cache[bp][0];
  157. }
  158. rt_err_t ioman_releaseSector(IOManager *ioman, rt_uint8_t* buf)
  159. {
  160. rt_uint32_t bp;
  161. /* get buffer place */
  162. bp = ((rt_uint32_t)buf - (rt_uint32_t)&ioman->cache[0]) / DFS_SECTOR_SIZE;
  163. /* decrease usage */
  164. if (ioman->usage[bp] > 0) ioman->usage[bp] --;
  165. if (ioman->usage[bp] == 0)
  166. {
  167. if(ioman->status[bp] & (1 << IOMAN_STATUS_ATTR_WRITE))
  168. {
  169. ioman_flushSector(ioman,bp);
  170. }
  171. }
  172. return RT_EOK;
  173. }
  174. rt_err_t ioman_flushRange(IOManager *ioman, rt_uint32_t address_low, rt_uint32_t address_high)
  175. {
  176. rt_uint32_t c;
  177. if(address_low > address_high)
  178. {
  179. c = address_low; address_low = address_high; address_high = c;
  180. }
  181. for(c = 0; c < ioman->numbuf; c++)
  182. {
  183. if((ioman->sector[c]>=address_low)
  184. && (ioman->sector[c]<=address_high)
  185. && (ioman->status[c] & (1 << IOMAN_STATUS_ATTR_WRITE)))
  186. {
  187. if(ioman_flushSector(ioman,c) != RT_EOK)
  188. return -RT_ERROR;
  189. /* remove writable status */
  190. if(ioman->usage[c]==0) ioman->status[c] &= ~IOMAN_STATUS_ATTR_WRITE;
  191. }
  192. }
  193. return RT_EOK;
  194. }
  195. /*
  196. * read multi-sectors directly (none-cachable)
  197. */
  198. rt_err_t ioman_directSectorRead(IOManager *ioman, rt_uint32_t address, rt_uint8_t* buf, rt_uint32_t numsector)
  199. {
  200. rt_err_t result;
  201. RT_ASSERT(buf != RT_NULL);
  202. result = rt_device_read(ioman->device, address * DFS_SECTOR_SIZE, buf, DFS_SECTOR_SIZE * numsector);
  203. if (result == DFS_SECTOR_SIZE * numsector) return RT_EOK;
  204. return -RT_ERROR;
  205. }
  206. /*
  207. * write multi-sectors directly (none-cachable)
  208. */
  209. rt_err_t ioman_directSectorWrite(IOManager *ioman, rt_uint32_t address, rt_uint8_t* buf, rt_uint32_t numsector)
  210. {
  211. rt_err_t result;
  212. RT_ASSERT(buf != RT_NULL);
  213. result = rt_device_write(ioman->device, address * DFS_SECTOR_SIZE, buf, DFS_SECTOR_SIZE * numsector);
  214. if (result == DFS_SECTOR_SIZE * numsector) return RT_EOK;
  215. return -RT_ERROR;
  216. }