dfs_pcache.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-05-05 RTT Implement mnt in dfs v2.0
  9. * 2023-10-23 Shell fix synchronization of data to icache
  10. */
  11. #define DBG_TAG "dfs.pcache"
  12. #define DBG_LVL DBG_WARNING
  13. #include <rtdbg.h>
  14. #include <dfs_pcache.h>
  15. #include <dfs_dentry.h>
  16. #include <dfs_mnt.h>
  17. #include <rthw.h>
  18. #ifdef RT_USING_PAGECACHE
  19. #include <mm_page.h>
  20. #include <mm_private.h>
  21. #include <mmu.h>
  22. #include <tlb.h>
  23. #ifndef RT_PAGECACHE_COUNT
  24. #define RT_PAGECACHE_COUNT 4096
  25. #endif
  26. #ifndef RT_PAGECACHE_ASPACE_COUNT
  27. #define RT_PAGECACHE_ASPACE_COUNT 1024
  28. #endif
  29. #ifndef RT_PAGECACHE_PRELOAD
  30. #define RT_PAGECACHE_PRELOAD 4
  31. #endif
  32. #ifndef RT_PAGECACHE_GC_WORK_LEVEL
  33. #define RT_PAGECACHE_GC_WORK_LEVEL 90
  34. #endif
  35. #ifndef RT_PAGECACHE_GC_STOP_LEVEL
  36. #define RT_PAGECACHE_GC_STOP_LEVEL 70
  37. #endif
  38. #define PCACHE_MQ_GC 1
  39. #define PCACHE_MQ_WB 2
  40. struct dfs_aspace_mmap_obj
  41. {
  42. rt_uint32_t cmd;
  43. struct rt_mailbox *ack;
  44. struct dfs_file *file;
  45. struct rt_varea *varea;
  46. void *data;
  47. };
  48. struct dfs_pcache_mq_obj
  49. {
  50. struct rt_mailbox *ack;
  51. rt_uint32_t cmd;
  52. };
  53. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos);
  54. static void dfs_page_ref(struct dfs_page *page);
  55. static int dfs_page_inactive(struct dfs_page *page);
  56. static int dfs_page_remove(struct dfs_page *page);
  57. static void dfs_page_release(struct dfs_page *page);
  58. static int dfs_page_dirty(struct dfs_page *page);
  59. static int dfs_aspace_release(struct dfs_aspace *aspace);
  60. static int dfs_aspace_lock(struct dfs_aspace *aspace);
  61. static int dfs_aspace_unlock(struct dfs_aspace *aspace);
  62. static int dfs_pcache_lock(void);
  63. static int dfs_pcache_unlock(void);
  64. static struct dfs_pcache __pcache;
  65. static int dfs_aspace_gc(struct dfs_aspace *aspace, int count)
  66. {
  67. int cnt = count;
  68. if (aspace)
  69. {
  70. dfs_aspace_lock(aspace);
  71. if (aspace->pages_count > 0)
  72. {
  73. struct dfs_page *page = RT_NULL;
  74. rt_list_t *node = aspace->list_inactive.next;
  75. while (cnt && node != &aspace->list_active)
  76. {
  77. page = rt_list_entry(node, struct dfs_page, space_node);
  78. node = node->next;
  79. if (dfs_page_remove(page) == 0)
  80. {
  81. cnt --;
  82. }
  83. }
  84. node = aspace->list_active.next;
  85. while (cnt && node != &aspace->list_inactive)
  86. {
  87. page = rt_list_entry(node, struct dfs_page, space_node);
  88. node = node->next;
  89. if (dfs_page_remove(page) == 0)
  90. {
  91. cnt --;
  92. }
  93. }
  94. }
  95. dfs_aspace_unlock(aspace);
  96. }
  97. return count - cnt;
  98. }
  99. void dfs_pcache_release(size_t count)
  100. {
  101. rt_list_t *node = RT_NULL;
  102. struct dfs_aspace *aspace = RT_NULL;
  103. dfs_pcache_lock();
  104. if (count == 0)
  105. {
  106. count = rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
  107. }
  108. node = __pcache.list_inactive.next;
  109. while (count && node != &__pcache.list_active)
  110. {
  111. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  112. node = node->next;
  113. if (aspace)
  114. {
  115. count -= dfs_aspace_gc(aspace, count);
  116. dfs_aspace_release(aspace);
  117. }
  118. }
  119. node = __pcache.list_active.next;
  120. while (count && node != &__pcache.list_inactive)
  121. {
  122. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  123. node = node->next;
  124. if (aspace)
  125. {
  126. count -= dfs_aspace_gc(aspace, count);
  127. }
  128. }
  129. dfs_pcache_unlock();
  130. }
  131. static void _pcache_clean(struct dfs_mnt *mnt, int (*cb)(struct dfs_aspace *aspace))
  132. {
  133. rt_list_t *node = RT_NULL;
  134. struct dfs_aspace *aspace = RT_NULL;
  135. dfs_pcache_lock();
  136. node = __pcache.list_inactive.next;
  137. while (node != &__pcache.list_active)
  138. {
  139. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  140. node = node->next;
  141. if (aspace && aspace->mnt == mnt)
  142. {
  143. dfs_aspace_clean(aspace);
  144. cb(aspace);
  145. }
  146. }
  147. node = __pcache.list_active.next;
  148. while (node != &__pcache.list_inactive)
  149. {
  150. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  151. node = node->next;
  152. if (aspace && aspace->mnt == mnt)
  153. {
  154. dfs_aspace_clean(aspace);
  155. cb(aspace);
  156. }
  157. }
  158. dfs_pcache_unlock();
  159. }
  160. void dfs_pcache_unmount(struct dfs_mnt *mnt)
  161. {
  162. _pcache_clean(mnt, dfs_aspace_release);
  163. }
  164. static int _dummy_cb(struct dfs_aspace *mnt)
  165. {
  166. return 0;
  167. }
  168. void dfs_pcache_clean(struct dfs_mnt *mnt)
  169. {
  170. _pcache_clean(mnt, _dummy_cb);
  171. }
  172. static int dfs_pcache_limit_check(void)
  173. {
  174. int index = 4;
  175. while (index && rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  176. {
  177. dfs_pcache_release(0);
  178. index --;
  179. }
  180. return 0;
  181. }
  182. static void dfs_pcache_thread(void *parameter)
  183. {
  184. struct dfs_pcache_mq_obj work;
  185. while (1)
  186. {
  187. if (rt_mq_recv(__pcache.mqueue, &work, sizeof(work), RT_WAITING_FOREVER) == sizeof(work))
  188. {
  189. if (work.cmd == PCACHE_MQ_GC)
  190. {
  191. dfs_pcache_limit_check();
  192. }
  193. else if (work.cmd == PCACHE_MQ_WB)
  194. {
  195. int count = 0;
  196. rt_list_t *node;
  197. struct dfs_page *page = 0;
  198. while (1)
  199. {
  200. /* try to get dirty page */
  201. dfs_pcache_lock();
  202. page = 0;
  203. rt_list_for_each(node, &__pcache.list_active)
  204. {
  205. if (node != &__pcache.list_inactive)
  206. {
  207. struct dfs_aspace *aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  208. dfs_aspace_lock(aspace);
  209. if (aspace->list_dirty.next != &aspace->list_dirty)
  210. {
  211. page = rt_list_entry(aspace->list_dirty.next, struct dfs_page, dirty_node);
  212. dfs_page_ref(page);
  213. dfs_aspace_unlock(aspace);
  214. break;
  215. }
  216. else
  217. {
  218. page = RT_NULL;
  219. }
  220. dfs_aspace_unlock(aspace);
  221. }
  222. }
  223. dfs_pcache_unlock();
  224. if (page)
  225. {
  226. struct dfs_aspace *aspace = page->aspace;
  227. dfs_aspace_lock(aspace);
  228. if (page->is_dirty == 1 && aspace->vnode)
  229. {
  230. if (rt_tick_get_millisecond() - page->tick_ms >= 500)
  231. {
  232. if (aspace->vnode->size < page->fpos + page->size)
  233. {
  234. page->len = aspace->vnode->size - page->fpos;
  235. }
  236. else
  237. {
  238. page->len = page->size;
  239. }
  240. if (aspace->ops->write)
  241. {
  242. aspace->ops->write(page);
  243. }
  244. page->is_dirty = 0;
  245. if (page->dirty_node.next != RT_NULL)
  246. {
  247. rt_list_remove(&page->dirty_node);
  248. page->dirty_node.next = RT_NULL;
  249. }
  250. }
  251. }
  252. dfs_page_release(page);
  253. dfs_aspace_unlock(aspace);
  254. }
  255. else
  256. {
  257. break;
  258. }
  259. rt_thread_mdelay(5);
  260. count ++;
  261. if (count >= 4)
  262. {
  263. break;
  264. }
  265. }
  266. }
  267. }
  268. }
  269. }
  270. static int dfs_pcache_init(void)
  271. {
  272. rt_thread_t tid;
  273. for (int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
  274. {
  275. rt_list_init(&__pcache.head[i]);
  276. }
  277. rt_list_init(&__pcache.list_active);
  278. rt_list_init(&__pcache.list_inactive);
  279. rt_list_insert_after(&__pcache.list_active, &__pcache.list_inactive);
  280. rt_atomic_store(&(__pcache.pages_count), 0);
  281. rt_mutex_init(&__pcache.lock, "pcache", RT_IPC_FLAG_PRIO);
  282. __pcache.mqueue = rt_mq_create("pcache", sizeof(struct dfs_pcache_mq_obj), 1024, RT_IPC_FLAG_FIFO);
  283. tid = rt_thread_create("pcache", dfs_pcache_thread, 0, 8192, 25, 5);
  284. if (tid)
  285. {
  286. rt_thread_startup(tid);
  287. }
  288. __pcache.last_time_wb = rt_tick_get_millisecond();
  289. return 0;
  290. }
  291. INIT_PREV_EXPORT(dfs_pcache_init);
  292. static rt_ubase_t dfs_pcache_mq_work(rt_uint32_t cmd)
  293. {
  294. rt_err_t err;
  295. struct dfs_pcache_mq_obj work = { 0 };
  296. work.cmd = cmd;
  297. err = rt_mq_send_wait(__pcache.mqueue, (const void *)&work, sizeof(struct dfs_pcache_mq_obj), 0);
  298. return err;
  299. }
  300. static int dfs_pcache_lock(void)
  301. {
  302. rt_mutex_take(&__pcache.lock, RT_WAITING_FOREVER);
  303. return 0;
  304. }
  305. static int dfs_pcache_unlock(void)
  306. {
  307. rt_mutex_release(&__pcache.lock);
  308. return 0;
  309. }
  310. static uint32_t dfs_aspace_hash(struct dfs_mnt *mnt, const char *path)
  311. {
  312. uint32_t val = 0;
  313. if (path)
  314. {
  315. while (*path)
  316. {
  317. val = ((val << 5) + val) + *path++;
  318. }
  319. }
  320. return (val ^ (unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
  321. }
  322. static struct dfs_aspace *dfs_aspace_hash_lookup(struct dfs_dentry *dentry, const struct dfs_aspace_ops *ops)
  323. {
  324. struct dfs_aspace *aspace = RT_NULL;
  325. dfs_pcache_lock();
  326. rt_list_for_each_entry(aspace, &__pcache.head[dfs_aspace_hash(dentry->mnt, dentry->pathname)], hash_node)
  327. {
  328. if (aspace->mnt == dentry->mnt
  329. && aspace->ops == ops
  330. && !strcmp(aspace->pathname, dentry->pathname))
  331. {
  332. rt_atomic_add(&aspace->ref_count, 1);
  333. dfs_pcache_unlock();
  334. return aspace;
  335. }
  336. }
  337. dfs_pcache_unlock();
  338. return RT_NULL;
  339. }
  340. static void dfs_aspace_insert(struct dfs_aspace *aspace)
  341. {
  342. uint32_t val = 0;
  343. val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
  344. dfs_pcache_lock();
  345. rt_atomic_add(&aspace->ref_count, 1);
  346. rt_list_insert_after(&__pcache.head[val], &aspace->hash_node);
  347. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  348. dfs_pcache_unlock();
  349. }
  350. static void dfs_aspace_remove(struct dfs_aspace *aspace)
  351. {
  352. dfs_pcache_lock();
  353. if (aspace->hash_node.next != RT_NULL)
  354. {
  355. rt_list_remove(&aspace->hash_node);
  356. }
  357. if (aspace->cache_node.next != RT_NULL)
  358. {
  359. rt_list_remove(&aspace->cache_node);
  360. }
  361. dfs_pcache_unlock();
  362. }
  363. static void dfs_aspace_active(struct dfs_aspace *aspace)
  364. {
  365. dfs_pcache_lock();
  366. if (aspace->cache_node.next != RT_NULL)
  367. {
  368. rt_list_remove(&aspace->cache_node);
  369. rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
  370. }
  371. dfs_pcache_unlock();
  372. }
  373. static void dfs_aspace_inactive(struct dfs_aspace *aspace)
  374. {
  375. dfs_pcache_lock();
  376. if (aspace->cache_node.next != RT_NULL)
  377. {
  378. rt_list_remove(&aspace->cache_node);
  379. rt_list_insert_before(&__pcache.list_active, &aspace->cache_node);
  380. }
  381. dfs_pcache_unlock();
  382. }
  383. static struct dfs_aspace *_dfs_aspace_create(struct dfs_dentry *dentry,
  384. struct dfs_vnode *vnode,
  385. const struct dfs_aspace_ops *ops)
  386. {
  387. struct dfs_aspace *aspace;
  388. aspace = rt_calloc(1, sizeof(struct dfs_aspace));
  389. if (aspace)
  390. {
  391. rt_list_init(&aspace->list_active);
  392. rt_list_init(&aspace->list_inactive);
  393. rt_list_init(&aspace->list_dirty);
  394. rt_list_insert_after(&aspace->list_active, &aspace->list_inactive);
  395. aspace->avl_root.root_node = 0;
  396. aspace->avl_page = 0;
  397. rt_mutex_init(&aspace->lock, rt_thread_self()->parent.name, RT_IPC_FLAG_PRIO);
  398. rt_atomic_store(&aspace->ref_count, 1);
  399. aspace->pages_count = 0;
  400. aspace->vnode = vnode;
  401. aspace->ops = ops;
  402. if (dentry && dentry->mnt)
  403. {
  404. aspace->mnt = dentry->mnt;
  405. aspace->fullpath = rt_strdup(dentry->mnt->fullpath);
  406. aspace->pathname = rt_strdup(dentry->pathname);
  407. }
  408. dfs_aspace_insert(aspace);
  409. }
  410. return aspace;
  411. }
  412. struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry,
  413. struct dfs_vnode *vnode,
  414. const struct dfs_aspace_ops *ops)
  415. {
  416. struct dfs_aspace *aspace = RT_NULL;
  417. RT_ASSERT(vnode && ops);
  418. dfs_pcache_lock();
  419. if (dentry)
  420. {
  421. aspace = dfs_aspace_hash_lookup(dentry, ops);
  422. }
  423. if (!aspace)
  424. {
  425. aspace = _dfs_aspace_create(dentry, vnode, ops);
  426. }
  427. else
  428. {
  429. aspace->vnode = vnode;
  430. dfs_aspace_active(aspace);
  431. }
  432. dfs_pcache_unlock();
  433. return aspace;
  434. }
  435. int dfs_aspace_destroy(struct dfs_aspace *aspace)
  436. {
  437. int ret = -EINVAL;
  438. if (aspace)
  439. {
  440. dfs_pcache_lock();
  441. dfs_aspace_lock(aspace);
  442. rt_atomic_sub(&aspace->ref_count, 1);
  443. RT_ASSERT(rt_atomic_load(&aspace->ref_count) > 0);
  444. dfs_aspace_inactive(aspace);
  445. aspace->vnode = RT_NULL;
  446. if (dfs_aspace_release(aspace) != 0)
  447. {
  448. dfs_aspace_unlock(aspace);
  449. }
  450. dfs_pcache_unlock();
  451. }
  452. return ret;
  453. }
  454. static int dfs_aspace_release(struct dfs_aspace *aspace)
  455. {
  456. int ret = -1;
  457. if (aspace)
  458. {
  459. dfs_pcache_lock();
  460. dfs_aspace_lock(aspace);
  461. if (rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
  462. {
  463. dfs_aspace_remove(aspace);
  464. if (aspace->fullpath)
  465. {
  466. rt_free(aspace->fullpath);
  467. }
  468. if (aspace->pathname)
  469. {
  470. rt_free(aspace->pathname);
  471. }
  472. rt_mutex_detach(&aspace->lock);
  473. rt_free(aspace);
  474. ret = 0;
  475. }
  476. else
  477. {
  478. dfs_aspace_unlock(aspace);
  479. }
  480. dfs_pcache_unlock();
  481. }
  482. return ret;
  483. }
  484. static int _dfs_aspace_dump(struct dfs_aspace *aspace, int is_dirty)
  485. {
  486. if (aspace)
  487. {
  488. rt_list_t *next;
  489. struct dfs_page *page;
  490. dfs_aspace_lock(aspace);
  491. if (aspace->pages_count > 0)
  492. {
  493. rt_list_for_each(next, &aspace->list_inactive)
  494. {
  495. if (next != &aspace->list_active)
  496. {
  497. page = rt_list_entry(next, struct dfs_page, space_node);
  498. if (is_dirty && page->is_dirty)
  499. {
  500. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  501. }
  502. else if (is_dirty == 0)
  503. {
  504. rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
  505. }
  506. }
  507. }
  508. }
  509. else
  510. {
  511. rt_kprintf(" pages >> empty\n");
  512. }
  513. dfs_aspace_unlock(aspace);
  514. }
  515. return 0;
  516. }
  517. static int dfs_pcache_dump(int argc, char **argv)
  518. {
  519. int dump = 0;
  520. rt_list_t *node;
  521. struct dfs_aspace *aspace;
  522. if (argc == 2)
  523. {
  524. if (strcmp(argv[1], "--dump") == 0)
  525. {
  526. dump = 1;
  527. }
  528. else if (strcmp(argv[1], "--dirty") == 0)
  529. {
  530. dump = 2;
  531. }
  532. else
  533. {
  534. rt_kprintf("dfs page cache dump\n");
  535. rt_kprintf("usage: dfs_cache\n");
  536. rt_kprintf(" dfs_cache --dump\n");
  537. rt_kprintf(" dfs_cache --dirty\n");
  538. return 0;
  539. }
  540. }
  541. dfs_pcache_lock();
  542. rt_kprintf("total pages count: %d / %d\n", rt_atomic_load(&(__pcache.pages_count)), RT_PAGECACHE_COUNT);
  543. rt_list_for_each(node, &__pcache.list_active)
  544. {
  545. if (node != &__pcache.list_inactive)
  546. {
  547. aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
  548. if (aspace->mnt)
  549. {
  550. rt_kprintf("file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
  551. }
  552. else
  553. {
  554. rt_kprintf("unknown type, pages: %d\n", aspace->pages_count);
  555. }
  556. if (dump > 0)
  557. {
  558. _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
  559. }
  560. }
  561. }
  562. dfs_pcache_unlock();
  563. return 0;
  564. }
  565. MSH_CMD_EXPORT_ALIAS(dfs_pcache_dump, dfs_cache, dump dfs page cache);
  566. static int dfs_page_unmap(struct dfs_page *page)
  567. {
  568. rt_list_t *next;
  569. struct dfs_mmap *map;
  570. next = page->mmap_head.next;
  571. if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
  572. {
  573. dfs_page_dirty(page);
  574. }
  575. while (next != &page->mmap_head)
  576. {
  577. map = rt_list_entry(next, struct dfs_mmap, mmap_node);
  578. next = next->next;
  579. if (map)
  580. {
  581. rt_varea_t varea;
  582. void *vaddr;
  583. varea = rt_aspace_query(map->aspace, map->vaddr);
  584. RT_ASSERT(varea);
  585. vaddr = dfs_aspace_vaddr(varea, page->fpos);
  586. rt_varea_unmap_page(varea, vaddr);
  587. rt_free(map);
  588. }
  589. }
  590. rt_list_init(&page->mmap_head);
  591. return 0;
  592. }
  593. static struct dfs_page *dfs_page_create(off_t pos)
  594. {
  595. struct dfs_page *page = RT_NULL;
  596. int affid = RT_PAGE_PICK_AFFID(pos);
  597. page = rt_calloc(1, sizeof(struct dfs_page));
  598. if (page)
  599. {
  600. page->page = rt_pages_alloc_tagged(0, affid, PAGE_ANY_AVAILABLE);
  601. if (page->page)
  602. {
  603. //memset(page->page, 0x00, ARCH_PAGE_SIZE);
  604. rt_list_init(&page->mmap_head);
  605. rt_atomic_store(&(page->ref_count), 1);
  606. }
  607. else
  608. {
  609. LOG_E("page alloc failed!\n");
  610. rt_free(page);
  611. page = RT_NULL;
  612. }
  613. }
  614. return page;
  615. }
  616. static void dfs_page_ref(struct dfs_page *page)
  617. {
  618. rt_atomic_add(&(page->ref_count), 1);
  619. }
  620. static void dfs_page_release(struct dfs_page *page)
  621. {
  622. struct dfs_aspace *aspace = page->aspace;
  623. dfs_aspace_lock(aspace);
  624. rt_atomic_sub(&(page->ref_count), 1);
  625. if (rt_atomic_load(&(page->ref_count)) == 0)
  626. {
  627. dfs_page_unmap(page);
  628. if (page->is_dirty == 1 && aspace->vnode)
  629. {
  630. if (aspace->vnode->size < page->fpos + page->size)
  631. {
  632. page->len = aspace->vnode->size - page->fpos;
  633. }
  634. else
  635. {
  636. page->len = page->size;
  637. }
  638. if (aspace->ops->write)
  639. {
  640. aspace->ops->write(page);
  641. }
  642. page->is_dirty = 0;
  643. }
  644. RT_ASSERT(page->is_dirty == 0);
  645. rt_pages_free(page->page, 0);
  646. page->page = RT_NULL;
  647. rt_free(page);
  648. }
  649. dfs_aspace_unlock(aspace);
  650. }
  651. static int dfs_page_compare(off_t fpos, off_t value)
  652. {
  653. return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
  654. }
  655. static int _dfs_page_insert(struct dfs_aspace *aspace, struct dfs_page *page)
  656. {
  657. struct dfs_page *tmp;
  658. struct util_avl_struct *current = NULL;
  659. struct util_avl_struct **next = &(aspace->avl_root.root_node);
  660. /* Figure out where to put new node */
  661. while (*next)
  662. {
  663. current = *next;
  664. tmp = rt_container_of(current, struct dfs_page, avl_node);
  665. if (page->fpos < tmp->fpos)
  666. next = &(current->avl_left);
  667. else if (page->fpos > tmp->fpos)
  668. next = &(current->avl_right);
  669. else
  670. return -1;
  671. }
  672. /* Add new node and rebalance tree. */
  673. util_avl_link(&page->avl_node, current, next);
  674. util_avl_rebalance(current, &aspace->avl_root);
  675. aspace->avl_page = page;
  676. return 0;
  677. }
  678. static void _dfs_page_remove(struct dfs_aspace *aspace, struct dfs_page *page)
  679. {
  680. if (aspace->avl_page && aspace->avl_page == page)
  681. {
  682. aspace->avl_page = 0;
  683. }
  684. util_avl_remove(&page->avl_node, &aspace->avl_root);
  685. }
  686. static int dfs_aspace_lock(struct dfs_aspace *aspace)
  687. {
  688. rt_mutex_take(&aspace->lock, RT_WAITING_FOREVER);
  689. return 0;
  690. }
  691. static int dfs_aspace_unlock(struct dfs_aspace *aspace)
  692. {
  693. rt_mutex_release(&aspace->lock);
  694. return 0;
  695. }
  696. static int dfs_page_insert(struct dfs_page *page)
  697. {
  698. struct dfs_aspace *aspace = page->aspace;
  699. dfs_aspace_lock(aspace);
  700. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  701. aspace->pages_count ++;
  702. if (_dfs_page_insert(aspace, page))
  703. {
  704. RT_ASSERT(0);
  705. }
  706. if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
  707. {
  708. rt_list_t *next = aspace->list_active.next;
  709. if (next != &aspace->list_inactive)
  710. {
  711. struct dfs_page *tmp = rt_list_entry(next, struct dfs_page, space_node);
  712. dfs_page_inactive(tmp);
  713. }
  714. }
  715. rt_atomic_add(&(__pcache.pages_count), 1);
  716. dfs_aspace_unlock(aspace);
  717. return 0;
  718. }
  719. static int dfs_page_remove(struct dfs_page *page)
  720. {
  721. int ret = -1;
  722. struct dfs_aspace *aspace = page->aspace;
  723. dfs_aspace_lock(aspace);
  724. if (rt_atomic_load(&(page->ref_count)) == 1)
  725. {
  726. if (page->space_node.next != RT_NULL)
  727. {
  728. rt_list_remove(&page->space_node);
  729. page->space_node.next = RT_NULL;
  730. aspace->pages_count--;
  731. _dfs_page_remove(aspace, page);
  732. }
  733. if (page->dirty_node.next != RT_NULL)
  734. {
  735. rt_list_remove(&page->dirty_node);
  736. page->dirty_node.next = RT_NULL;
  737. }
  738. rt_atomic_sub(&(__pcache.pages_count), 1);
  739. dfs_page_release(page);
  740. ret = 0;
  741. }
  742. dfs_aspace_unlock(aspace);
  743. return ret;
  744. }
  745. static int dfs_page_active(struct dfs_page *page)
  746. {
  747. struct dfs_aspace *aspace = page->aspace;
  748. dfs_aspace_lock(aspace);
  749. if (page->space_node.next != RT_NULL)
  750. {
  751. rt_list_remove(&page->space_node);
  752. rt_list_insert_before(&aspace->list_inactive, &page->space_node);
  753. }
  754. dfs_aspace_unlock(aspace);
  755. return 0;
  756. }
  757. static int dfs_page_inactive(struct dfs_page *page)
  758. {
  759. struct dfs_aspace *aspace = page->aspace;
  760. dfs_aspace_lock(aspace);
  761. if (page->space_node.next != RT_NULL)
  762. {
  763. rt_list_remove(&page->space_node);
  764. rt_list_insert_before(&aspace->list_active, &page->space_node);
  765. }
  766. dfs_aspace_unlock(aspace);
  767. return 0;
  768. }
  769. static int dfs_page_dirty(struct dfs_page *page)
  770. {
  771. struct dfs_aspace *aspace = page->aspace;
  772. dfs_aspace_lock(aspace);
  773. if (page->dirty_node.next == RT_NULL && page->space_node.next != RT_NULL)
  774. {
  775. rt_list_insert_before(&aspace->list_dirty, &page->dirty_node);
  776. }
  777. page->is_dirty = 1;
  778. page->tick_ms = rt_tick_get_millisecond();
  779. if (rt_tick_get_millisecond() - __pcache.last_time_wb >= 1000)
  780. {
  781. dfs_pcache_mq_work(PCACHE_MQ_WB);
  782. __pcache.last_time_wb = rt_tick_get_millisecond();
  783. }
  784. dfs_aspace_unlock(aspace);
  785. return 0;
  786. }
  787. static struct dfs_page *dfs_page_search(struct dfs_aspace *aspace, off_t fpos)
  788. {
  789. int cmp;
  790. struct dfs_page *page;
  791. struct util_avl_struct *avl_node;
  792. dfs_aspace_lock(aspace);
  793. if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
  794. {
  795. page = aspace->avl_page;
  796. dfs_page_active(page);
  797. dfs_page_ref(page);
  798. dfs_aspace_unlock(aspace);
  799. return page;
  800. }
  801. avl_node = aspace->avl_root.root_node;
  802. while (avl_node)
  803. {
  804. page = rt_container_of(avl_node, struct dfs_page, avl_node);
  805. cmp = dfs_page_compare(fpos, page->fpos);
  806. if (cmp < 0)
  807. {
  808. avl_node = avl_node->avl_left;
  809. }
  810. else if (cmp > 0)
  811. {
  812. avl_node = avl_node->avl_right;
  813. }
  814. else
  815. {
  816. aspace->avl_page = page;
  817. dfs_page_active(page);
  818. dfs_page_ref(page);
  819. dfs_aspace_unlock(aspace);
  820. return page;
  821. }
  822. }
  823. dfs_aspace_unlock(aspace);
  824. return RT_NULL;
  825. }
  826. static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
  827. {
  828. struct dfs_page *page = RT_NULL;
  829. if (file && file->vnode && file->vnode->aspace)
  830. {
  831. struct dfs_vnode *vnode = file->vnode;
  832. struct dfs_aspace *aspace = vnode->aspace;
  833. page = dfs_page_create(pos);
  834. if (page)
  835. {
  836. page->aspace = aspace;
  837. page->size = ARCH_PAGE_SIZE;
  838. page->fpos = RT_ALIGN_DOWN(pos, ARCH_PAGE_SIZE);
  839. aspace->ops->read(file, page);
  840. page->ref_count ++;
  841. dfs_page_insert(page);
  842. }
  843. }
  844. return page;
  845. }
  846. static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos)
  847. {
  848. struct dfs_page *page = RT_NULL;
  849. struct dfs_aspace *aspace = file->vnode->aspace;
  850. dfs_aspace_lock(aspace);
  851. page = dfs_page_search(aspace, pos);
  852. if (!page)
  853. {
  854. int count = RT_PAGECACHE_PRELOAD;
  855. struct dfs_page *tmp = RT_NULL;
  856. off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
  857. do
  858. {
  859. page = dfs_aspace_load_page(file, fpos);
  860. if (page)
  861. {
  862. if (tmp == RT_NULL)
  863. {
  864. tmp = page;
  865. }
  866. else
  867. {
  868. dfs_page_release(page);
  869. }
  870. }
  871. else
  872. {
  873. break;
  874. }
  875. fpos += ARCH_PAGE_SIZE;
  876. page = dfs_page_search(aspace, fpos);
  877. if (page)
  878. {
  879. dfs_page_release(page);
  880. }
  881. count --;
  882. } while (count && page == RT_NULL);
  883. page = tmp;
  884. if (page)
  885. {
  886. dfs_aspace_unlock(aspace);
  887. if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
  888. {
  889. dfs_pcache_limit_check();
  890. }
  891. else if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
  892. {
  893. dfs_pcache_mq_work(PCACHE_MQ_GC);
  894. }
  895. return page;
  896. }
  897. }
  898. dfs_aspace_unlock(aspace);
  899. return page;
  900. }
  901. int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
  902. {
  903. int ret = -EINVAL;
  904. if (file && file->vnode && file->vnode->aspace)
  905. {
  906. if (!(file->vnode->aspace->ops->read))
  907. return ret;
  908. struct dfs_vnode *vnode = file->vnode;
  909. struct dfs_aspace *aspace = vnode->aspace;
  910. struct dfs_page *page;
  911. char *ptr = (char *)buf;
  912. ret = 0;
  913. while (count)
  914. {
  915. page = dfs_page_lookup(file, *pos);
  916. if (page)
  917. {
  918. off_t len;
  919. dfs_aspace_lock(aspace);
  920. if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
  921. {
  922. len = aspace->vnode->size - *pos;
  923. }
  924. else
  925. {
  926. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  927. }
  928. len = count > len ? len : count;
  929. if (len > 0)
  930. {
  931. rt_memcpy(ptr, page->page + *pos - page->fpos, len);
  932. ptr += len;
  933. *pos += len;
  934. count -= len;
  935. ret += len;
  936. }
  937. else
  938. {
  939. dfs_page_release(page);
  940. dfs_aspace_unlock(aspace);
  941. break;
  942. }
  943. dfs_page_release(page);
  944. dfs_aspace_unlock(aspace);
  945. }
  946. else
  947. {
  948. break;
  949. }
  950. }
  951. }
  952. return ret;
  953. }
  954. int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
  955. {
  956. int ret = -EINVAL;
  957. if (file && file->vnode && file->vnode->aspace)
  958. {
  959. struct dfs_vnode *vnode = file->vnode;
  960. struct dfs_aspace *aspace = vnode->aspace;
  961. struct dfs_page *page;
  962. char *ptr = (char *)buf;
  963. if (!(aspace->ops->write))
  964. {
  965. return ret;
  966. }
  967. else if (aspace->mnt && (aspace->mnt->flags & MNT_RDONLY))
  968. {
  969. return -EROFS;
  970. }
  971. ret = 0;
  972. while (count)
  973. {
  974. page = dfs_page_lookup(file, *pos);
  975. if (page)
  976. {
  977. off_t len;
  978. dfs_aspace_lock(aspace);
  979. len = page->fpos + ARCH_PAGE_SIZE - *pos;
  980. len = count > len ? len : count;
  981. rt_memcpy(page->page + *pos - page->fpos, ptr, len);
  982. ptr += len;
  983. *pos += len;
  984. count -= len;
  985. ret += len;
  986. if (*pos > aspace->vnode->size)
  987. {
  988. aspace->vnode->size = *pos;
  989. }
  990. if (file->flags & O_SYNC)
  991. {
  992. if (aspace->vnode->size < page->fpos + page->size)
  993. {
  994. page->len = aspace->vnode->size - page->fpos;
  995. }
  996. else
  997. {
  998. page->len = page->size;
  999. }
  1000. aspace->ops->write(page);
  1001. page->is_dirty = 0;
  1002. }
  1003. else
  1004. {
  1005. dfs_page_dirty(page);
  1006. }
  1007. dfs_page_release(page);
  1008. dfs_aspace_unlock(aspace);
  1009. }
  1010. else
  1011. {
  1012. break;
  1013. }
  1014. }
  1015. }
  1016. return ret;
  1017. }
  1018. int dfs_aspace_flush(struct dfs_aspace *aspace)
  1019. {
  1020. if (aspace)
  1021. {
  1022. rt_list_t *next;
  1023. struct dfs_page *page;
  1024. dfs_aspace_lock(aspace);
  1025. if (aspace->pages_count > 0 && aspace->vnode)
  1026. {
  1027. rt_list_for_each(next, &aspace->list_dirty)
  1028. {
  1029. page = rt_list_entry(next, struct dfs_page, dirty_node);
  1030. if (page->is_dirty == 1 && aspace->vnode)
  1031. {
  1032. if (aspace->vnode->size < page->fpos + page->size)
  1033. {
  1034. page->len = aspace->vnode->size - page->fpos;
  1035. }
  1036. else
  1037. {
  1038. page->len = page->size;
  1039. }
  1040. if (aspace->ops->write)
  1041. {
  1042. aspace->ops->write(page);
  1043. }
  1044. page->is_dirty = 0;
  1045. }
  1046. RT_ASSERT(page->is_dirty == 0);
  1047. }
  1048. }
  1049. dfs_aspace_unlock(aspace);
  1050. }
  1051. return 0;
  1052. }
  1053. int dfs_aspace_clean(struct dfs_aspace *aspace)
  1054. {
  1055. if (aspace)
  1056. {
  1057. dfs_aspace_lock(aspace);
  1058. if (aspace->pages_count > 0)
  1059. {
  1060. rt_list_t *next = aspace->list_active.next;
  1061. struct dfs_page *page;
  1062. while (next && next != &aspace->list_active)
  1063. {
  1064. if (next == &aspace->list_inactive)
  1065. {
  1066. next = next->next;
  1067. continue;
  1068. }
  1069. page = rt_list_entry(next, struct dfs_page, space_node);
  1070. next = next->next;
  1071. dfs_page_remove(page);
  1072. }
  1073. }
  1074. dfs_aspace_unlock(aspace);
  1075. }
  1076. return 0;
  1077. }
  1078. void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1079. {
  1080. void *ret = RT_NULL;
  1081. struct dfs_page *page;
  1082. struct dfs_aspace *aspace = file->vnode->aspace;
  1083. rt_aspace_t target_aspace = varea->aspace;
  1084. page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
  1085. if (page)
  1086. {
  1087. struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
  1088. if (map)
  1089. {
  1090. void *pg_vaddr = page->page;
  1091. void *pg_paddr = rt_kmem_v2p(pg_vaddr);
  1092. int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
  1093. if (err == RT_EOK)
  1094. {
  1095. /**
  1096. * Note: While the page is mapped into user area, the data writing into the page
  1097. * is not guaranteed to be visible for machines with the *weak* memory model and
  1098. * those Harvard architecture (especially for those ARM64) cores for their
  1099. * out-of-order pipelines of data buffer. Besides if the instruction cache in the
  1100. * L1 memory system is a VIPT cache, there are chances to have the alias matching
  1101. * entry if we reuse the same page frame and map it into the same virtual address
  1102. * of the previous one.
  1103. *
  1104. * That's why we have to do synchronization and cleanup manually to ensure that
  1105. * fetching of the next instruction can see the coherent data with the data cache,
  1106. * TLB, MMU, main memory, and all the other observers in the computer system.
  1107. */
  1108. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
  1109. rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
  1110. ret = pg_vaddr;
  1111. map->aspace = target_aspace;
  1112. map->vaddr = vaddr;
  1113. dfs_aspace_lock(aspace);
  1114. rt_list_insert_after(&page->mmap_head, &map->mmap_node);
  1115. dfs_page_release(page);
  1116. dfs_aspace_unlock(aspace);
  1117. }
  1118. else
  1119. {
  1120. dfs_page_release(page);
  1121. rt_free(map);
  1122. }
  1123. }
  1124. else
  1125. {
  1126. dfs_page_release(page);
  1127. }
  1128. }
  1129. return ret;
  1130. }
  1131. int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
  1132. {
  1133. struct dfs_vnode *vnode = file->vnode;
  1134. struct dfs_aspace *aspace = vnode->aspace;
  1135. void *unmap_start = varea->start;
  1136. void *unmap_end = (char *)unmap_start + varea->size;
  1137. if (aspace)
  1138. {
  1139. rt_list_t *next;
  1140. struct dfs_page *page;
  1141. dfs_aspace_lock(aspace);
  1142. if (aspace->pages_count > 0)
  1143. {
  1144. rt_list_for_each(next, &aspace->list_active)
  1145. {
  1146. if (next != &aspace->list_inactive)
  1147. {
  1148. page = rt_list_entry(next, struct dfs_page, space_node);
  1149. if (page)
  1150. {
  1151. rt_list_t *node, *tmp;
  1152. struct dfs_mmap *map;
  1153. rt_varea_t map_varea = RT_NULL;
  1154. node = page->mmap_head.next;
  1155. while (node != &page->mmap_head)
  1156. {
  1157. rt_aspace_t map_aspace;
  1158. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1159. tmp = node;
  1160. node = node->next;
  1161. if (map && varea->aspace == map->aspace
  1162. && map->vaddr >= unmap_start && map->vaddr < unmap_end)
  1163. {
  1164. void *vaddr = map->vaddr;
  1165. map_aspace = map->aspace;
  1166. if (!map_varea || map_varea->aspace != map_aspace ||
  1167. vaddr < map_varea->start ||
  1168. vaddr >= map_varea->start + map_varea->size)
  1169. {
  1170. /* lock the tree so we don't access uncompleted data */
  1171. map_varea = rt_aspace_query(map_aspace, vaddr);
  1172. }
  1173. rt_varea_unmap_page(map_varea, vaddr);
  1174. if (!rt_varea_is_private_locked(varea) &&
  1175. page->fpos < page->aspace->vnode->size)
  1176. {
  1177. dfs_page_dirty(page);
  1178. }
  1179. rt_list_remove(tmp);
  1180. rt_free(map);
  1181. break;
  1182. }
  1183. }
  1184. }
  1185. }
  1186. }
  1187. }
  1188. dfs_aspace_unlock(aspace);
  1189. }
  1190. return 0;
  1191. }
  1192. int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1193. {
  1194. struct dfs_page *page;
  1195. struct dfs_aspace *aspace = file->vnode->aspace;
  1196. if (aspace)
  1197. {
  1198. dfs_aspace_lock(aspace);
  1199. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1200. if (page)
  1201. {
  1202. rt_list_t *node, *tmp;
  1203. struct dfs_mmap *map;
  1204. rt_varea_unmap_page(varea, vaddr);
  1205. node = page->mmap_head.next;
  1206. while (node != &page->mmap_head)
  1207. {
  1208. map = rt_list_entry(node, struct dfs_mmap, mmap_node);
  1209. tmp = node;
  1210. node = node->next;
  1211. if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
  1212. {
  1213. if (!rt_varea_is_private_locked(varea))
  1214. {
  1215. dfs_page_dirty(page);
  1216. }
  1217. rt_list_remove(tmp);
  1218. rt_free(map);
  1219. break;
  1220. }
  1221. }
  1222. dfs_page_release(page);
  1223. }
  1224. dfs_aspace_unlock(aspace);
  1225. }
  1226. return 0;
  1227. }
  1228. int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
  1229. {
  1230. struct dfs_page *page;
  1231. struct dfs_aspace *aspace = file->vnode->aspace;
  1232. if (aspace)
  1233. {
  1234. dfs_aspace_lock(aspace);
  1235. page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
  1236. if (page)
  1237. {
  1238. dfs_page_dirty(page);
  1239. dfs_page_release(page);
  1240. }
  1241. dfs_aspace_unlock(aspace);
  1242. }
  1243. return 0;
  1244. }
  1245. off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr)
  1246. {
  1247. return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
  1248. }
  1249. void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos)
  1250. {
  1251. return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
  1252. }
  1253. int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data)
  1254. {
  1255. int ret = 0;
  1256. if (file && varea)
  1257. {
  1258. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1259. if (msg)
  1260. {
  1261. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1262. return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1263. }
  1264. }
  1265. return ret;
  1266. }
  1267. int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data)
  1268. {
  1269. int ret = 0;
  1270. if (file && varea)
  1271. {
  1272. struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
  1273. if (msg)
  1274. {
  1275. off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
  1276. return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
  1277. }
  1278. }
  1279. return ret;
  1280. }
  1281. #endif