fatent.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * Copyright (C) 2004, OGAWA Hirofumi
  3. * Released under GPL v2.
  4. */
  5. #include <linux/blkdev.h>
  6. #include "fat.h"
  7. struct fatent_operations {
  8. void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
  9. void (*ent_set_ptr)(struct fat_entry *, int);
  10. int (*ent_bread)(struct super_block *, struct fat_entry *,
  11. int, sector_t);
  12. int (*ent_get)(struct fat_entry *);
  13. void (*ent_put)(struct fat_entry *, int);
  14. int (*ent_next)(struct fat_entry *);
  15. };
  16. static DEFINE_SPINLOCK(fat12_entry_lock);
  17. static void fat12_ent_blocknr(struct super_block *sb, int entry,
  18. int *offset, sector_t *blocknr)
  19. {
  20. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  21. int bytes = entry + (entry >> 1);
  22. WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  23. *offset = bytes & (sb->s_blocksize - 1);
  24. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  25. }
  26. static void fat_ent_blocknr(struct super_block *sb, int entry,
  27. int *offset, sector_t *blocknr)
  28. {
  29. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  30. int bytes = (entry << sbi->fatent_shift);
  31. WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  32. *offset = bytes & (sb->s_blocksize - 1);
  33. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  34. }
  35. static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
  36. {
  37. struct buffer_head **bhs = fatent->bhs;
  38. if (fatent->nr_bhs == 1) {
  39. WARN_ON(offset >= (bhs[0]->b_size - 1));
  40. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  41. fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
  42. } else {
  43. WARN_ON(offset != (bhs[0]->b_size - 1));
  44. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  45. fatent->u.ent12_p[1] = bhs[1]->b_data;
  46. }
  47. }
  48. static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
  49. {
  50. WARN_ON(offset & (2 - 1));
  51. fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
  52. }
  53. static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
  54. {
  55. WARN_ON(offset & (4 - 1));
  56. fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
  57. }
  58. static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  59. int offset, sector_t blocknr)
  60. {
  61. struct buffer_head **bhs = fatent->bhs;
  62. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  63. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  64. bhs[0] = sb_bread(sb, blocknr);
  65. if (!bhs[0])
  66. goto err;
  67. if ((offset + 1) < sb->s_blocksize)
  68. fatent->nr_bhs = 1;
  69. else {
  70. /* This entry is block boundary, it needs the next block */
  71. blocknr++;
  72. bhs[1] = sb_bread(sb, blocknr);
  73. if (!bhs[1])
  74. goto err_brelse;
  75. fatent->nr_bhs = 2;
  76. }
  77. fat12_ent_set_ptr(fatent, offset);
  78. return 0;
  79. err_brelse:
  80. brelse(bhs[0]);
  81. err:
  82. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
  83. return -EIO;
  84. }
  85. static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  86. int offset, sector_t blocknr)
  87. {
  88. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  89. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  90. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  91. fatent->bhs[0] = sb_bread(sb, blocknr);
  92. if (!fatent->bhs[0]) {
  93. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
  94. (llu)blocknr);
  95. return -EIO;
  96. }
  97. fatent->nr_bhs = 1;
  98. ops->ent_set_ptr(fatent, offset);
  99. return 0;
  100. }
  101. static int fat12_ent_get(struct fat_entry *fatent)
  102. {
  103. u8 **ent12_p = fatent->u.ent12_p;
  104. int next;
  105. spin_lock(&fat12_entry_lock);
  106. if (fatent->entry & 1)
  107. next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
  108. else
  109. next = (*ent12_p[1] << 8) | *ent12_p[0];
  110. spin_unlock(&fat12_entry_lock);
  111. next &= 0x0fff;
  112. if (next >= BAD_FAT12)
  113. next = FAT_ENT_EOF;
  114. return next;
  115. }
  116. static int fat16_ent_get(struct fat_entry *fatent)
  117. {
  118. int next = le16_to_cpu(*fatent->u.ent16_p);
  119. WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
  120. if (next >= BAD_FAT16)
  121. next = FAT_ENT_EOF;
  122. return next;
  123. }
  124. static int fat32_ent_get(struct fat_entry *fatent)
  125. {
  126. int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
  127. WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
  128. if (next >= BAD_FAT32)
  129. next = FAT_ENT_EOF;
  130. return next;
  131. }
  132. static void fat12_ent_put(struct fat_entry *fatent, int new)
  133. {
  134. u8 **ent12_p = fatent->u.ent12_p;
  135. if (new == FAT_ENT_EOF)
  136. new = EOF_FAT12;
  137. spin_lock(&fat12_entry_lock);
  138. if (fatent->entry & 1) {
  139. *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
  140. *ent12_p[1] = new >> 4;
  141. } else {
  142. *ent12_p[0] = new & 0xff;
  143. *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
  144. }
  145. spin_unlock(&fat12_entry_lock);
  146. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  147. if (fatent->nr_bhs == 2)
  148. mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
  149. }
  150. static void fat16_ent_put(struct fat_entry *fatent, int new)
  151. {
  152. if (new == FAT_ENT_EOF)
  153. new = EOF_FAT16;
  154. *fatent->u.ent16_p = cpu_to_le16(new);
  155. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  156. }
  157. static void fat32_ent_put(struct fat_entry *fatent, int new)
  158. {
  159. WARN_ON(new & 0xf0000000);
  160. new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
  161. *fatent->u.ent32_p = cpu_to_le32(new);
  162. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  163. }
  164. static int fat12_ent_next(struct fat_entry *fatent)
  165. {
  166. u8 **ent12_p = fatent->u.ent12_p;
  167. struct buffer_head **bhs = fatent->bhs;
  168. u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
  169. fatent->entry++;
  170. if (fatent->nr_bhs == 1) {
  171. WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
  172. (bhs[0]->b_size - 2)));
  173. WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
  174. (bhs[0]->b_size - 1)));
  175. if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
  176. ent12_p[0] = nextp - 1;
  177. ent12_p[1] = nextp;
  178. return 1;
  179. }
  180. } else {
  181. WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
  182. (bhs[0]->b_size - 1)));
  183. WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
  184. ent12_p[0] = nextp - 1;
  185. ent12_p[1] = nextp;
  186. brelse(bhs[0]);
  187. bhs[0] = bhs[1];
  188. fatent->nr_bhs = 1;
  189. return 1;
  190. }
  191. ent12_p[0] = NULL;
  192. ent12_p[1] = NULL;
  193. return 0;
  194. }
  195. static int fat16_ent_next(struct fat_entry *fatent)
  196. {
  197. const struct buffer_head *bh = fatent->bhs[0];
  198. fatent->entry++;
  199. if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
  200. fatent->u.ent16_p++;
  201. return 1;
  202. }
  203. fatent->u.ent16_p = NULL;
  204. return 0;
  205. }
  206. static int fat32_ent_next(struct fat_entry *fatent)
  207. {
  208. const struct buffer_head *bh = fatent->bhs[0];
  209. fatent->entry++;
  210. if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
  211. fatent->u.ent32_p++;
  212. return 1;
  213. }
  214. fatent->u.ent32_p = NULL;
  215. return 0;
  216. }
  217. static const struct fatent_operations fat12_ops = {
  218. .ent_blocknr = fat12_ent_blocknr,
  219. .ent_set_ptr = fat12_ent_set_ptr,
  220. .ent_bread = fat12_ent_bread,
  221. .ent_get = fat12_ent_get,
  222. .ent_put = fat12_ent_put,
  223. .ent_next = fat12_ent_next,
  224. };
  225. static const struct fatent_operations fat16_ops = {
  226. .ent_blocknr = fat_ent_blocknr,
  227. .ent_set_ptr = fat16_ent_set_ptr,
  228. .ent_bread = fat_ent_bread,
  229. .ent_get = fat16_ent_get,
  230. .ent_put = fat16_ent_put,
  231. .ent_next = fat16_ent_next,
  232. };
  233. static const struct fatent_operations fat32_ops = {
  234. .ent_blocknr = fat_ent_blocknr,
  235. .ent_set_ptr = fat32_ent_set_ptr,
  236. .ent_bread = fat_ent_bread,
  237. .ent_get = fat32_ent_get,
  238. .ent_put = fat32_ent_put,
  239. .ent_next = fat32_ent_next,
  240. };
  241. static inline void lock_fat(struct msdos_sb_info *sbi)
  242. {
  243. mutex_lock(&sbi->fat_lock);
  244. }
  245. static inline void unlock_fat(struct msdos_sb_info *sbi)
  246. {
  247. mutex_unlock(&sbi->fat_lock);
  248. }
  249. void fat_ent_access_init(struct super_block *sb)
  250. {
  251. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  252. mutex_init(&sbi->fat_lock);
  253. switch (sbi->fat_bits) {
  254. case 32:
  255. sbi->fatent_shift = 2;
  256. sbi->fatent_ops = &fat32_ops;
  257. break;
  258. case 16:
  259. sbi->fatent_shift = 1;
  260. sbi->fatent_ops = &fat16_ops;
  261. break;
  262. case 12:
  263. sbi->fatent_shift = -1;
  264. sbi->fatent_ops = &fat12_ops;
  265. break;
  266. }
  267. }
  268. static void mark_fsinfo_dirty(struct super_block *sb)
  269. {
  270. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  271. if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
  272. return;
  273. __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
  274. }
  275. static inline int fat_ent_update_ptr(struct super_block *sb,
  276. struct fat_entry *fatent,
  277. int offset, sector_t blocknr)
  278. {
  279. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  280. const struct fatent_operations *ops = sbi->fatent_ops;
  281. struct buffer_head **bhs = fatent->bhs;
  282. /* Is this fatent's blocks including this entry? */
  283. if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
  284. return 0;
  285. if (sbi->fat_bits == 12) {
  286. if ((offset + 1) < sb->s_blocksize) {
  287. /* This entry is on bhs[0]. */
  288. if (fatent->nr_bhs == 2) {
  289. brelse(bhs[1]);
  290. fatent->nr_bhs = 1;
  291. }
  292. } else {
  293. /* This entry needs the next block. */
  294. if (fatent->nr_bhs != 2)
  295. return 0;
  296. if (bhs[1]->b_blocknr != (blocknr + 1))
  297. return 0;
  298. }
  299. }
  300. ops->ent_set_ptr(fatent, offset);
  301. return 1;
  302. }
  303. int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
  304. {
  305. struct super_block *sb = inode->i_sb;
  306. struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
  307. const struct fatent_operations *ops = sbi->fatent_ops;
  308. int err, offset;
  309. sector_t blocknr;
  310. if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
  311. fatent_brelse(fatent);
  312. fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
  313. return -EIO;
  314. }
  315. fatent_set_entry(fatent, entry);
  316. ops->ent_blocknr(sb, entry, &offset, &blocknr);
  317. if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
  318. fatent_brelse(fatent);
  319. err = ops->ent_bread(sb, fatent, offset, blocknr);
  320. if (err)
  321. return err;
  322. }
  323. return ops->ent_get(fatent);
  324. }
  325. /* FIXME: We can write the blocks as more big chunk. */
  326. static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
  327. int nr_bhs)
  328. {
  329. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  330. struct buffer_head *c_bh;
  331. int err, n, copy;
  332. err = 0;
  333. for (copy = 1; copy < sbi->fats; copy++) {
  334. sector_t backup_fat = sbi->fat_length * copy;
  335. for (n = 0; n < nr_bhs; n++) {
  336. c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
  337. if (!c_bh) {
  338. err = -ENOMEM;
  339. goto error;
  340. }
  341. memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
  342. set_buffer_uptodate(c_bh);
  343. mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
  344. if (sb->s_flags & MS_SYNCHRONOUS)
  345. err = sync_dirty_buffer(c_bh);
  346. brelse(c_bh);
  347. if (err)
  348. goto error;
  349. }
  350. }
  351. error:
  352. return err;
  353. }
  354. int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
  355. int new, int wait)
  356. {
  357. struct super_block *sb = inode->i_sb;
  358. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  359. int err;
  360. ops->ent_put(fatent, new);
  361. if (wait) {
  362. err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
  363. if (err)
  364. return err;
  365. }
  366. return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
  367. }
  368. static inline int fat_ent_next(struct msdos_sb_info *sbi,
  369. struct fat_entry *fatent)
  370. {
  371. if (sbi->fatent_ops->ent_next(fatent)) {
  372. if (fatent->entry < sbi->max_cluster)
  373. return 1;
  374. }
  375. return 0;
  376. }
  377. static inline int fat_ent_read_block(struct super_block *sb,
  378. struct fat_entry *fatent)
  379. {
  380. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  381. sector_t blocknr;
  382. int offset;
  383. fatent_brelse(fatent);
  384. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  385. return ops->ent_bread(sb, fatent, offset, blocknr);
  386. }
  387. static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
  388. struct fat_entry *fatent)
  389. {
  390. int n, i;
  391. for (n = 0; n < fatent->nr_bhs; n++) {
  392. for (i = 0; i < *nr_bhs; i++) {
  393. if (fatent->bhs[n] == bhs[i])
  394. break;
  395. }
  396. if (i == *nr_bhs) {
  397. get_bh(fatent->bhs[n]);
  398. bhs[i] = fatent->bhs[n];
  399. (*nr_bhs)++;
  400. }
  401. }
  402. }
  403. int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
  404. {
  405. struct super_block *sb = inode->i_sb;
  406. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  407. const struct fatent_operations *ops = sbi->fatent_ops;
  408. struct fat_entry fatent, prev_ent;
  409. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  410. int i, count, err, nr_bhs, idx_clus;
  411. BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
  412. lock_fat(sbi);
  413. if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
  414. sbi->free_clusters < nr_cluster) {
  415. unlock_fat(sbi);
  416. return -ENOSPC;
  417. }
  418. err = nr_bhs = idx_clus = 0;
  419. count = FAT_START_ENT;
  420. fatent_init(&prev_ent);
  421. fatent_init(&fatent);
  422. fatent_set_entry(&fatent, sbi->prev_free + 1);
  423. while (count < sbi->max_cluster) {
  424. if (fatent.entry >= sbi->max_cluster)
  425. fatent.entry = FAT_START_ENT;
  426. fatent_set_entry(&fatent, fatent.entry);
  427. err = fat_ent_read_block(sb, &fatent);
  428. if (err)
  429. goto out;
  430. /* Find the free entries in a block */
  431. do {
  432. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  433. int entry = fatent.entry;
  434. /* make the cluster chain */
  435. ops->ent_put(&fatent, FAT_ENT_EOF);
  436. if (prev_ent.nr_bhs)
  437. ops->ent_put(&prev_ent, entry);
  438. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  439. sbi->prev_free = entry;
  440. if (sbi->free_clusters != -1)
  441. sbi->free_clusters--;
  442. cluster[idx_clus] = entry;
  443. idx_clus++;
  444. if (idx_clus == nr_cluster)
  445. goto out;
  446. /*
  447. * fat_collect_bhs() gets ref-count of bhs,
  448. * so we can still use the prev_ent.
  449. */
  450. prev_ent = fatent;
  451. }
  452. count++;
  453. if (count == sbi->max_cluster)
  454. break;
  455. } while (fat_ent_next(sbi, &fatent));
  456. }
  457. /* Couldn't allocate the free entries */
  458. sbi->free_clusters = 0;
  459. sbi->free_clus_valid = 1;
  460. err = -ENOSPC;
  461. out:
  462. unlock_fat(sbi);
  463. mark_fsinfo_dirty(sb);
  464. fatent_brelse(&fatent);
  465. if (!err) {
  466. if (inode_needs_sync(inode))
  467. err = fat_sync_bhs(bhs, nr_bhs);
  468. if (!err)
  469. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  470. }
  471. for (i = 0; i < nr_bhs; i++)
  472. brelse(bhs[i]);
  473. if (err && idx_clus)
  474. fat_free_clusters(inode, cluster[0]);
  475. return err;
  476. }
  477. int fat_free_clusters(struct inode *inode, int cluster)
  478. {
  479. struct super_block *sb = inode->i_sb;
  480. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  481. const struct fatent_operations *ops = sbi->fatent_ops;
  482. struct fat_entry fatent;
  483. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  484. int i, err, nr_bhs;
  485. int first_cl = cluster, dirty_fsinfo = 0;
  486. nr_bhs = 0;
  487. fatent_init(&fatent);
  488. lock_fat(sbi);
  489. do {
  490. cluster = fat_ent_read(inode, &fatent, cluster);
  491. if (cluster < 0) {
  492. err = cluster;
  493. goto error;
  494. } else if (cluster == FAT_ENT_FREE) {
  495. fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
  496. __func__);
  497. err = -EIO;
  498. goto error;
  499. }
  500. if (sbi->options.discard) {
  501. /*
  502. * Issue discard for the sectors we no longer
  503. * care about, batching contiguous clusters
  504. * into one request
  505. */
  506. if (cluster != fatent.entry + 1) {
  507. int nr_clus = fatent.entry - first_cl + 1;
  508. sb_issue_discard(sb,
  509. fat_clus_to_blknr(sbi, first_cl),
  510. nr_clus * sbi->sec_per_clus,
  511. GFP_NOFS, 0);
  512. first_cl = cluster;
  513. }
  514. }
  515. ops->ent_put(&fatent, FAT_ENT_FREE);
  516. if (sbi->free_clusters != -1) {
  517. sbi->free_clusters++;
  518. dirty_fsinfo = 1;
  519. }
  520. if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
  521. if (sb->s_flags & MS_SYNCHRONOUS) {
  522. err = fat_sync_bhs(bhs, nr_bhs);
  523. if (err)
  524. goto error;
  525. }
  526. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  527. if (err)
  528. goto error;
  529. for (i = 0; i < nr_bhs; i++)
  530. brelse(bhs[i]);
  531. nr_bhs = 0;
  532. }
  533. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  534. } while (cluster != FAT_ENT_EOF);
  535. if (sb->s_flags & MS_SYNCHRONOUS) {
  536. err = fat_sync_bhs(bhs, nr_bhs);
  537. if (err)
  538. goto error;
  539. }
  540. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  541. error:
  542. fatent_brelse(&fatent);
  543. for (i = 0; i < nr_bhs; i++)
  544. brelse(bhs[i]);
  545. unlock_fat(sbi);
  546. if (dirty_fsinfo)
  547. mark_fsinfo_dirty(sb);
  548. return err;
  549. }
  550. EXPORT_SYMBOL_GPL(fat_free_clusters);
  551. /* 128kb is the whole sectors for FAT12 and FAT16 */
  552. #define FAT_READA_SIZE (128 * 1024)
  553. static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
  554. unsigned long reada_blocks)
  555. {
  556. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  557. sector_t blocknr;
  558. int i, offset;
  559. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  560. for (i = 0; i < reada_blocks; i++)
  561. sb_breadahead(sb, blocknr + i);
  562. }
  563. int fat_count_free_clusters(struct super_block *sb)
  564. {
  565. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  566. const struct fatent_operations *ops = sbi->fatent_ops;
  567. struct fat_entry fatent;
  568. unsigned long reada_blocks, reada_mask, cur_block;
  569. int err = 0, free;
  570. lock_fat(sbi);
  571. if (sbi->free_clusters != -1 && sbi->free_clus_valid)
  572. goto out;
  573. reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
  574. reada_mask = reada_blocks - 1;
  575. cur_block = 0;
  576. free = 0;
  577. fatent_init(&fatent);
  578. fatent_set_entry(&fatent, FAT_START_ENT);
  579. while (fatent.entry < sbi->max_cluster) {
  580. /* readahead of fat blocks */
  581. if ((cur_block & reada_mask) == 0) {
  582. unsigned long rest = sbi->fat_length - cur_block;
  583. fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
  584. }
  585. cur_block++;
  586. err = fat_ent_read_block(sb, &fatent);
  587. if (err)
  588. goto out;
  589. do {
  590. if (ops->ent_get(&fatent) == FAT_ENT_FREE)
  591. free++;
  592. } while (fat_ent_next(sbi, &fatent));
  593. }
  594. sbi->free_clusters = free;
  595. sbi->free_clus_valid = 1;
  596. mark_fsinfo_dirty(sb);
  597. fatent_brelse(&fatent);
  598. out:
  599. unlock_fat(sbi);
  600. return err;
  601. }