cache.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. /************************************************************************/
  18. /* */
  19. /* PROJECT : exFAT & FAT12/16/32 File System */
  20. /* FILE : cache.c */
  21. /* PURPOSE : sdFAT Cache Manager */
  22. /* (FAT Cache & Buffer Cache) */
  23. /* */
  24. /*----------------------------------------------------------------------*/
  25. /* NOTES */
  26. /* */
  27. /* */
  28. /************************************************************************/
  29. #include <linux/swap.h> /* for mark_page_accessed() */
  30. #include <asm/unaligned.h>
  31. #include "sdfat.h"
  32. #include "core.h"
  33. #define DEBUG_HASH_LIST
  34. #define DEBUG_HASH_PREV (0xAAAA5555)
  35. #define DEBUG_HASH_NEXT (0x5555AAAA)
  36. /*----------------------------------------------------------------------*/
  37. /* Global Variable Definitions */
  38. /*----------------------------------------------------------------------*/
  39. /* All buffer structures are protected w/ fsi->v_sem */
  40. /*----------------------------------------------------------------------*/
  41. /* Local Variable Definitions */
  42. /*----------------------------------------------------------------------*/
  43. #define LOCKBIT (0x01)
  44. #define DIRTYBIT (0x02)
  45. #define KEEPBIT (0x04)
  46. /*----------------------------------------------------------------------*/
  47. /* Cache handling function declarations */
  48. /*----------------------------------------------------------------------*/
  49. static cache_ent_t *__fcache_find(struct super_block *sb, u64 sec);
  50. static cache_ent_t *__fcache_get(struct super_block *sb);
  51. static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
  52. static void __fcache_remove_hash(cache_ent_t *bp);
  53. static cache_ent_t *__dcache_find(struct super_block *sb, u64 sec);
  54. static cache_ent_t *__dcache_get(struct super_block *sb);
  55. static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp);
  56. static void __dcache_remove_hash(cache_ent_t *bp);
  57. /*----------------------------------------------------------------------*/
  58. /* Static functions */
  59. /*----------------------------------------------------------------------*/
  60. static void push_to_mru(cache_ent_t *bp, cache_ent_t *list)
  61. {
  62. bp->next = list->next;
  63. bp->prev = list;
  64. list->next->prev = bp;
  65. list->next = bp;
  66. }
  67. static void push_to_lru(cache_ent_t *bp, cache_ent_t *list)
  68. {
  69. bp->prev = list->prev;
  70. bp->next = list;
  71. list->prev->next = bp;
  72. list->prev = bp;
  73. }
  74. static void move_to_mru(cache_ent_t *bp, cache_ent_t *list)
  75. {
  76. bp->prev->next = bp->next;
  77. bp->next->prev = bp->prev;
  78. push_to_mru(bp, list);
  79. }
  80. static void move_to_lru(cache_ent_t *bp, cache_ent_t *list)
  81. {
  82. bp->prev->next = bp->next;
  83. bp->next->prev = bp->prev;
  84. push_to_lru(bp, list);
  85. }
  86. static inline s32 __check_hash_valid(cache_ent_t *bp)
  87. {
  88. #ifdef DEBUG_HASH_LIST
  89. if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
  90. (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
  91. return -EINVAL;
  92. }
  93. #endif
  94. if ((bp->hash.next == bp) || (bp->hash.prev == bp))
  95. return -EINVAL;
  96. return 0;
  97. }
  98. static inline void __remove_from_hash(cache_ent_t *bp)
  99. {
  100. (bp->hash.prev)->hash.next = bp->hash.next;
  101. (bp->hash.next)->hash.prev = bp->hash.prev;
  102. bp->hash.next = bp;
  103. bp->hash.prev = bp;
  104. #ifdef DEBUG_HASH_LIST
  105. bp->hash.next = (cache_ent_t *)DEBUG_HASH_NEXT;
  106. bp->hash.prev = (cache_ent_t *)DEBUG_HASH_PREV;
  107. #endif
  108. }
  109. /* Do FAT mirroring (don't sync)
  110. * sec: sector No. in FAT1
  111. * bh: bh of sec.
  112. */
  113. static inline s32 __fat_copy(struct super_block *sb, u64 sec, struct buffer_head *bh, int sync)
  114. {
  115. #ifdef CONFIG_SDFAT_FAT_MIRRORING
  116. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  117. u64 sec2;
  118. if (fsi->FAT2_start_sector != fsi->FAT1_start_sector) {
  119. sec2 = sec - fsi->FAT1_start_sector + fsi->FAT2_start_sector;
  120. BUG_ON(sec2 != (sec + (u64)fsi->num_FAT_sectors));
  121. MMSG("BD: fat mirroring (%llu in FAT1, %llu in FAT2)\n", sec, sec2);
  122. if (write_sect(sb, sec2, bh, sync))
  123. return -EIO;
  124. }
  125. #else
  126. /* DO NOTHING */
  127. #endif
  128. return 0;
  129. } /* end of __fat_copy */
  130. /*
  131. * returns 1, if bp is flushed
  132. * returns 0, if bp is not dirty
  133. * returns -1, if error occurs
  134. */
  135. static s32 __fcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
  136. {
  137. if (!(bp->flag & DIRTYBIT))
  138. return 0;
  139. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  140. // Make buffer dirty (XXX: Naive impl.)
  141. if (write_sect(sb, bp->sec, bp->bh, 0))
  142. return -EIO;
  143. if (__fat_copy(sb, bp->sec, bp->bh, 0))
  144. return -EIO;
  145. #endif
  146. bp->flag &= ~(DIRTYBIT);
  147. if (sync)
  148. sync_dirty_buffer(bp->bh);
  149. return 1;
  150. }
  151. static s32 __fcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
  152. {
  153. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  154. __fcache_remove_hash(bp);
  155. bp->sec = ~0;
  156. bp->flag = 0;
  157. if (bp->bh) {
  158. __brelse(bp->bh);
  159. bp->bh = NULL;
  160. }
  161. move_to_lru(bp, &fsi->fcache.lru_list);
  162. return 0;
  163. }
  164. u8 *fcache_getblk(struct super_block *sb, u64 sec)
  165. {
  166. cache_ent_t *bp;
  167. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  168. u32 page_ra_count = FCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
  169. bp = __fcache_find(sb, sec);
  170. if (bp) {
  171. if (bdev_check_bdi_valid(sb)) {
  172. __fcache_ent_flush(sb, bp, 0);
  173. __fcache_ent_discard(sb, bp);
  174. return NULL;
  175. }
  176. move_to_mru(bp, &fsi->fcache.lru_list);
  177. return bp->bh->b_data;
  178. }
  179. bp = __fcache_get(sb);
  180. if (!__check_hash_valid(bp))
  181. __fcache_remove_hash(bp);
  182. bp->sec = sec;
  183. bp->flag = 0;
  184. __fcache_insert_hash(sb, bp);
  185. /* Naive FAT read-ahead (increase I/O unit to page_ra_count) */
  186. if ((sec & (page_ra_count - 1)) == 0)
  187. bdev_readahead(sb, sec, (u64)page_ra_count);
  188. /*
  189. * patch 1.2.4 : buffer_head null pointer exception problem.
  190. *
  191. * When read_sect is failed, fcache should be moved to
  192. * EMPTY hash_list and the first of lru_list.
  193. */
  194. if (read_sect(sb, sec, &(bp->bh), 1)) {
  195. __fcache_ent_discard(sb, bp);
  196. return NULL;
  197. }
  198. return bp->bh->b_data;
  199. }
  200. static inline int __mark_delayed_dirty(struct super_block *sb, cache_ent_t *bp)
  201. {
  202. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  203. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  204. if (fsi->vol_type == EXFAT)
  205. return -ENOTSUPP;
  206. bp->flag |= DIRTYBIT;
  207. return 0;
  208. #else
  209. return -ENOTSUPP;
  210. #endif
  211. }
  212. s32 fcache_modify(struct super_block *sb, u64 sec)
  213. {
  214. cache_ent_t *bp;
  215. bp = __fcache_find(sb, sec);
  216. if (!bp) {
  217. sdfat_fs_error(sb, "Can`t find fcache (sec 0x%016llx)", sec);
  218. return -EIO;
  219. }
  220. if (!__mark_delayed_dirty(sb, bp))
  221. return 0;
  222. if (write_sect(sb, sec, bp->bh, 0))
  223. return -EIO;
  224. if (__fat_copy(sb, sec, bp->bh, 0))
  225. return -EIO;
  226. return 0;
  227. }
  228. /*======================================================================*/
  229. /* Cache Initialization Functions */
  230. /*======================================================================*/
  231. s32 meta_cache_init(struct super_block *sb)
  232. {
  233. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  234. s32 i;
  235. /* LRU list */
  236. fsi->fcache.lru_list.next = &fsi->fcache.lru_list;
  237. fsi->fcache.lru_list.prev = fsi->fcache.lru_list.next;
  238. for (i = 0; i < FAT_CACHE_SIZE; i++) {
  239. fsi->fcache.pool[i].sec = ~0;
  240. fsi->fcache.pool[i].flag = 0;
  241. fsi->fcache.pool[i].bh = NULL;
  242. fsi->fcache.pool[i].prev = NULL;
  243. fsi->fcache.pool[i].next = NULL;
  244. push_to_mru(&(fsi->fcache.pool[i]), &fsi->fcache.lru_list);
  245. }
  246. fsi->dcache.lru_list.next = &fsi->dcache.lru_list;
  247. fsi->dcache.lru_list.prev = fsi->dcache.lru_list.next;
  248. fsi->dcache.keep_list.next = &fsi->dcache.keep_list;
  249. fsi->dcache.keep_list.prev = fsi->dcache.keep_list.next;
  250. // Initially, all the BUF_CACHEs are in the LRU list
  251. for (i = 0; i < BUF_CACHE_SIZE; i++) {
  252. fsi->dcache.pool[i].sec = ~0;
  253. fsi->dcache.pool[i].flag = 0;
  254. fsi->dcache.pool[i].bh = NULL;
  255. fsi->dcache.pool[i].prev = NULL;
  256. fsi->dcache.pool[i].next = NULL;
  257. push_to_mru(&(fsi->dcache.pool[i]), &fsi->dcache.lru_list);
  258. }
  259. /* HASH list */
  260. for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) {
  261. fsi->fcache.hash_list[i].sec = ~0;
  262. fsi->fcache.hash_list[i].hash.next = &(fsi->fcache.hash_list[i]);
  263. ;
  264. fsi->fcache.hash_list[i].hash.prev = fsi->fcache.hash_list[i].hash.next;
  265. }
  266. for (i = 0; i < FAT_CACHE_SIZE; i++)
  267. __fcache_insert_hash(sb, &(fsi->fcache.pool[i]));
  268. for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) {
  269. fsi->dcache.hash_list[i].sec = ~0;
  270. fsi->dcache.hash_list[i].hash.next = &(fsi->dcache.hash_list[i]);
  271. fsi->dcache.hash_list[i].hash.prev = fsi->dcache.hash_list[i].hash.next;
  272. }
  273. for (i = 0; i < BUF_CACHE_SIZE; i++)
  274. __dcache_insert_hash(sb, &(fsi->dcache.pool[i]));
  275. return 0;
  276. }
  277. s32 meta_cache_shutdown(struct super_block *sb)
  278. {
  279. return 0;
  280. }
  281. /*======================================================================*/
  282. /* FAT Read/Write Functions */
  283. /*======================================================================*/
  284. s32 fcache_release_all(struct super_block *sb)
  285. {
  286. s32 ret = 0;
  287. cache_ent_t *bp;
  288. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  289. s32 dirtycnt = 0;
  290. bp = fsi->fcache.lru_list.next;
  291. while (bp != &fsi->fcache.lru_list) {
  292. s32 ret_tmp = __fcache_ent_flush(sb, bp, 0);
  293. if (ret_tmp < 0)
  294. ret = ret_tmp;
  295. else
  296. dirtycnt += ret_tmp;
  297. bp->sec = ~0;
  298. bp->flag = 0;
  299. if (bp->bh) {
  300. __brelse(bp->bh);
  301. bp->bh = NULL;
  302. }
  303. bp = bp->next;
  304. }
  305. DMSG("BD:Release / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
  306. return ret;
  307. }
  308. /* internal DIRTYBIT marked => bh dirty */
  309. s32 fcache_flush(struct super_block *sb, u32 sync)
  310. {
  311. s32 ret = 0;
  312. cache_ent_t *bp;
  313. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  314. s32 dirtycnt = 0;
  315. bp = fsi->fcache.lru_list.next;
  316. while (bp != &fsi->fcache.lru_list) {
  317. ret = __fcache_ent_flush(sb, bp, sync);
  318. if (ret < 0)
  319. break;
  320. dirtycnt += ret;
  321. bp = bp->next;
  322. }
  323. MMSG("BD: flush / dirty fat cache: %d (err:%d)\n", dirtycnt, ret);
  324. return ret;
  325. }
  326. static cache_ent_t *__fcache_find(struct super_block *sb, u64 sec)
  327. {
  328. s32 off;
  329. cache_ent_t *bp, *hp;
  330. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  331. off = (sec + (sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE - 1);
  332. hp = &(fsi->fcache.hash_list[off]);
  333. for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
  334. if (bp->sec == sec) {
  335. /*
  336. * patch 1.2.4 : for debugging
  337. */
  338. WARN(!bp->bh, "[SDFAT] fcache has no bh. "
  339. "It will make system panic.\n");
  340. touch_buffer(bp->bh);
  341. return bp;
  342. }
  343. }
  344. return NULL;
  345. }
  346. static cache_ent_t *__fcache_get(struct super_block *sb)
  347. {
  348. cache_ent_t *bp;
  349. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  350. bp = fsi->fcache.lru_list.prev;
  351. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  352. while (bp->flag & DIRTYBIT) {
  353. cache_ent_t *bp_prev = bp->prev;
  354. bp = bp_prev;
  355. if (bp == &fsi->fcache.lru_list) {
  356. DMSG("BD: fat cache flooding\n");
  357. fcache_flush(sb, 0); // flush all dirty FAT caches
  358. bp = fsi->fcache.lru_list.prev;
  359. }
  360. }
  361. #endif
  362. // if (bp->flag & DIRTYBIT)
  363. // sync_dirty_buffer(bp->bh);
  364. move_to_mru(bp, &fsi->fcache.lru_list);
  365. return bp;
  366. }
  367. static void __fcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
  368. {
  369. s32 off;
  370. cache_ent_t *hp;
  371. FS_INFO_T *fsi;
  372. fsi = &(SDFAT_SB(sb)->fsi);
  373. off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (FAT_CACHE_HASH_SIZE-1);
  374. hp = &(fsi->fcache.hash_list[off]);
  375. bp->hash.next = hp->hash.next;
  376. bp->hash.prev = hp;
  377. hp->hash.next->hash.prev = bp;
  378. hp->hash.next = bp;
  379. }
  380. static void __fcache_remove_hash(cache_ent_t *bp)
  381. {
  382. #ifdef DEBUG_HASH_LIST
  383. if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
  384. (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
  385. EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
  386. "(bp:%p)\n", __func__, bp);
  387. return;
  388. }
  389. #endif
  390. WARN_ON(bp->flag & DIRTYBIT);
  391. __remove_from_hash(bp);
  392. }
  393. /*======================================================================*/
  394. /* Buffer Read/Write Functions */
  395. /*======================================================================*/
  396. /* Read-ahead a cluster */
  397. s32 dcache_readahead(struct super_block *sb, u64 sec)
  398. {
  399. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  400. struct buffer_head *bh;
  401. u32 max_ra_count = DCACHE_MAX_RA_SIZE >> sb->s_blocksize_bits;
  402. u32 page_ra_count = PAGE_SIZE >> sb->s_blocksize_bits;
  403. u32 adj_ra_count = max(fsi->sect_per_clus, page_ra_count);
  404. u32 ra_count = min(adj_ra_count, max_ra_count);
  405. /* Read-ahead is not required */
  406. if (fsi->sect_per_clus == 1)
  407. return 0;
  408. if (sec < fsi->data_start_sector) {
  409. EMSG("BD: %s: requested sector is invalid(sect:%llu, root:%llu)\n",
  410. __func__, sec, fsi->data_start_sector);
  411. return -EIO;
  412. }
  413. /* Not sector aligned with ra_count, resize ra_count to page size */
  414. if ((sec - fsi->data_start_sector) & (ra_count - 1))
  415. ra_count = page_ra_count;
  416. bh = sb_find_get_block(sb, sec);
  417. if (!bh || !buffer_uptodate(bh))
  418. bdev_readahead(sb, sec, (u64)ra_count);
  419. brelse(bh);
  420. return 0;
  421. }
  422. /*
  423. * returns 1, if bp is flushed
  424. * returns 0, if bp is not dirty
  425. * returns -1, if error occurs
  426. */
  427. static s32 __dcache_ent_flush(struct super_block *sb, cache_ent_t *bp, u32 sync)
  428. {
  429. if (!(bp->flag & DIRTYBIT))
  430. return 0;
  431. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  432. // Make buffer dirty (XXX: Naive impl.)
  433. if (write_sect(sb, bp->sec, bp->bh, 0))
  434. return -EIO;
  435. #endif
  436. bp->flag &= ~(DIRTYBIT);
  437. if (sync)
  438. sync_dirty_buffer(bp->bh);
  439. return 1;
  440. }
  441. static s32 __dcache_ent_discard(struct super_block *sb, cache_ent_t *bp)
  442. {
  443. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  444. MMSG("%s : bp[%p] (sec:%016llx flag:%08x bh:%p) list(prev:%p next:%p) "
  445. "hash(prev:%p next:%p)\n", __func__,
  446. bp, bp->sec, bp->flag, bp->bh, bp->prev, bp->next,
  447. bp->hash.prev, bp->hash.next);
  448. __dcache_remove_hash(bp);
  449. bp->sec = ~0;
  450. bp->flag = 0;
  451. if (bp->bh) {
  452. __brelse(bp->bh);
  453. bp->bh = NULL;
  454. }
  455. move_to_lru(bp, &fsi->dcache.lru_list);
  456. return 0;
  457. }
  458. u8 *dcache_getblk(struct super_block *sb, u64 sec)
  459. {
  460. cache_ent_t *bp;
  461. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  462. bp = __dcache_find(sb, sec);
  463. if (bp) {
  464. if (bdev_check_bdi_valid(sb)) {
  465. MMSG("%s: found cache(%p, sect:%llu). But invalid BDI\n"
  466. , __func__, bp, sec);
  467. __dcache_ent_flush(sb, bp, 0);
  468. __dcache_ent_discard(sb, bp);
  469. return NULL;
  470. }
  471. if (!(bp->flag & KEEPBIT)) // already in keep list
  472. move_to_mru(bp, &fsi->dcache.lru_list);
  473. return bp->bh->b_data;
  474. }
  475. bp = __dcache_get(sb);
  476. if (!__check_hash_valid(bp))
  477. __dcache_remove_hash(bp);
  478. bp->sec = sec;
  479. bp->flag = 0;
  480. __dcache_insert_hash(sb, bp);
  481. if (read_sect(sb, sec, &(bp->bh), 1)) {
  482. __dcache_ent_discard(sb, bp);
  483. return NULL;
  484. }
  485. return bp->bh->b_data;
  486. }
  487. s32 dcache_modify(struct super_block *sb, u64 sec)
  488. {
  489. s32 ret = -EIO;
  490. cache_ent_t *bp;
  491. set_sb_dirty(sb);
  492. bp = __dcache_find(sb, sec);
  493. if (unlikely(!bp)) {
  494. sdfat_fs_error(sb, "Can`t find dcache (sec 0x%016llx)", sec);
  495. return -EIO;
  496. }
  497. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  498. if (SDFAT_SB(sb)->fsi.vol_type != EXFAT) {
  499. bp->flag |= DIRTYBIT;
  500. return 0;
  501. }
  502. #endif
  503. ret = write_sect(sb, sec, bp->bh, 0);
  504. if (ret) {
  505. DMSG("%s : failed to modify buffer(err:%d, sec:%llu, bp:0x%p)\n",
  506. __func__, ret, sec, bp);
  507. }
  508. return ret;
  509. }
  510. s32 dcache_lock(struct super_block *sb, u64 sec)
  511. {
  512. cache_ent_t *bp;
  513. bp = __dcache_find(sb, sec);
  514. if (likely(bp)) {
  515. bp->flag |= LOCKBIT;
  516. return 0;
  517. }
  518. EMSG("%s : failed to lock buffer(sec:%llu, bp:0x%p)\n", __func__, sec, bp);
  519. return -EIO;
  520. }
  521. s32 dcache_unlock(struct super_block *sb, u64 sec)
  522. {
  523. cache_ent_t *bp;
  524. bp = __dcache_find(sb, sec);
  525. if (likely(bp)) {
  526. bp->flag &= ~(LOCKBIT);
  527. return 0;
  528. }
  529. EMSG("%s : failed to unlock buffer (sec:%llu, bp:0x%p)\n", __func__, sec, bp);
  530. return -EIO;
  531. }
  532. s32 dcache_release(struct super_block *sb, u64 sec)
  533. {
  534. cache_ent_t *bp;
  535. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  536. bp = __dcache_find(sb, sec);
  537. if (unlikely(!bp))
  538. return -ENOENT;
  539. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  540. if (bp->flag & DIRTYBIT) {
  541. if (write_sect(sb, bp->sec, bp->bh, 0))
  542. return -EIO;
  543. }
  544. #endif
  545. bp->sec = ~0;
  546. bp->flag = 0;
  547. if (bp->bh) {
  548. __brelse(bp->bh);
  549. bp->bh = NULL;
  550. }
  551. move_to_lru(bp, &fsi->dcache.lru_list);
  552. return 0;
  553. }
  554. s32 dcache_release_all(struct super_block *sb)
  555. {
  556. s32 ret = 0;
  557. cache_ent_t *bp;
  558. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  559. s32 dirtycnt = 0;
  560. /* Connect list elements:
  561. * LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
  562. */
  563. while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
  564. cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
  565. // bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
  566. move_to_mru(bp_keep, &fsi->dcache.lru_list);
  567. }
  568. bp = fsi->dcache.lru_list.next;
  569. while (bp != &fsi->dcache.lru_list) {
  570. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  571. if (bp->flag & DIRTYBIT) {
  572. dirtycnt++;
  573. if (write_sect(sb, bp->sec, bp->bh, 0))
  574. ret = -EIO;
  575. }
  576. #endif
  577. bp->sec = ~0;
  578. bp->flag = 0;
  579. if (bp->bh) {
  580. __brelse(bp->bh);
  581. bp->bh = NULL;
  582. }
  583. bp = bp->next;
  584. }
  585. DMSG("BD:Release / dirty buf cache: %d (err:%d)", dirtycnt, ret);
  586. return ret;
  587. }
  588. s32 dcache_flush(struct super_block *sb, u32 sync)
  589. {
  590. s32 ret = 0;
  591. cache_ent_t *bp;
  592. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  593. s32 dirtycnt = 0;
  594. s32 keepcnt = 0;
  595. /* Connect list elements:
  596. * LRU list : (A - B - ... - bp_front) + (bp_first + ... + bp_last)
  597. */
  598. while (fsi->dcache.keep_list.prev != &fsi->dcache.keep_list) {
  599. cache_ent_t *bp_keep = fsi->dcache.keep_list.prev;
  600. bp_keep->flag &= ~(KEEPBIT); // Will be 0-ed later
  601. move_to_mru(bp_keep, &fsi->dcache.lru_list);
  602. keepcnt++;
  603. }
  604. bp = fsi->dcache.lru_list.next;
  605. while (bp != &fsi->dcache.lru_list) {
  606. if (bp->flag & DIRTYBIT) {
  607. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  608. // Make buffer dirty (XXX: Naive impl.)
  609. if (write_sect(sb, bp->sec, bp->bh, 0)) {
  610. ret = -EIO;
  611. break;
  612. }
  613. #endif
  614. bp->flag &= ~(DIRTYBIT);
  615. dirtycnt++;
  616. if (sync != 0)
  617. sync_dirty_buffer(bp->bh);
  618. }
  619. bp = bp->next;
  620. }
  621. MMSG("BD: flush / dirty dentry cache: %d (%d from keeplist, err:%d)\n",
  622. dirtycnt, keepcnt, ret);
  623. return ret;
  624. }
  625. static cache_ent_t *__dcache_find(struct super_block *sb, u64 sec)
  626. {
  627. s32 off;
  628. cache_ent_t *bp, *hp;
  629. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  630. off = (sec + (sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE - 1);
  631. hp = &(fsi->dcache.hash_list[off]);
  632. for (bp = hp->hash.next; bp != hp; bp = bp->hash.next) {
  633. if (bp->sec == sec) {
  634. touch_buffer(bp->bh);
  635. return bp;
  636. }
  637. }
  638. return NULL;
  639. }
  640. static cache_ent_t *__dcache_get(struct super_block *sb)
  641. {
  642. cache_ent_t *bp;
  643. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  644. bp = fsi->dcache.lru_list.prev;
  645. #ifdef CONFIG_SDFAT_DELAYED_META_DIRTY
  646. while (bp->flag & (DIRTYBIT | LOCKBIT)) {
  647. cache_ent_t *bp_prev = bp->prev; // hold prev
  648. if (bp->flag & DIRTYBIT) {
  649. MMSG("BD: Buf cache => Keep list\n");
  650. bp->flag |= KEEPBIT;
  651. move_to_mru(bp, &fsi->dcache.keep_list);
  652. }
  653. bp = bp_prev;
  654. /* If all dcaches are dirty */
  655. if (bp == &fsi->dcache.lru_list) {
  656. DMSG("BD: buf cache flooding\n");
  657. dcache_flush(sb, 0);
  658. bp = fsi->dcache.lru_list.prev;
  659. }
  660. }
  661. #else
  662. while (bp->flag & LOCKBIT)
  663. bp = bp->prev;
  664. #endif
  665. // if (bp->flag & DIRTYBIT)
  666. // sync_dirty_buffer(bp->bh);
  667. move_to_mru(bp, &fsi->dcache.lru_list);
  668. return bp;
  669. }
  670. static void __dcache_insert_hash(struct super_block *sb, cache_ent_t *bp)
  671. {
  672. s32 off;
  673. cache_ent_t *hp;
  674. FS_INFO_T *fsi;
  675. fsi = &(SDFAT_SB(sb)->fsi);
  676. off = (bp->sec + (bp->sec >> fsi->sect_per_clus_bits)) & (BUF_CACHE_HASH_SIZE-1);
  677. hp = &(fsi->dcache.hash_list[off]);
  678. bp->hash.next = hp->hash.next;
  679. bp->hash.prev = hp;
  680. hp->hash.next->hash.prev = bp;
  681. hp->hash.next = bp;
  682. }
  683. static void __dcache_remove_hash(cache_ent_t *bp)
  684. {
  685. #ifdef DEBUG_HASH_LIST
  686. if ((bp->hash.next == (cache_ent_t *)DEBUG_HASH_NEXT) ||
  687. (bp->hash.prev == (cache_ent_t *)DEBUG_HASH_PREV)) {
  688. EMSG("%s: FATAL: tried to remove already-removed-cache-entry"
  689. "(bp:%p)\n", __func__, bp);
  690. return;
  691. }
  692. #endif
  693. WARN_ON(bp->flag & DIRTYBIT);
  694. __remove_from_hash(bp);
  695. }
  696. /* end of cache.c */