fatent.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. /*
  2. * Copyright (C) 2004, OGAWA Hirofumi
  3. * Released under GPL v2.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/fs.h>
  7. #include <linux/msdos_fs.h>
  8. #include <linux/blkdev.h>
  9. #include "fat.h"
  10. struct fatent_operations {
  11. void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
  12. void (*ent_set_ptr)(struct fat_entry *, int);
  13. int (*ent_bread)(struct super_block *, struct fat_entry *,
  14. int, sector_t);
  15. int (*ent_get)(struct fat_entry *);
  16. void (*ent_put)(struct fat_entry *, int);
  17. int (*ent_next)(struct fat_entry *);
  18. };
  19. static DEFINE_SPINLOCK(fat12_entry_lock);
  20. static void fat12_ent_blocknr(struct super_block *sb, int entry,
  21. int *offset, sector_t *blocknr)
  22. {
  23. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  24. int bytes = entry + (entry >> 1);
  25. WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  26. *offset = bytes & (sb->s_blocksize - 1);
  27. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  28. }
  29. static void fat_ent_blocknr(struct super_block *sb, int entry,
  30. int *offset, sector_t *blocknr)
  31. {
  32. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  33. int bytes = (entry << sbi->fatent_shift);
  34. WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
  35. *offset = bytes & (sb->s_blocksize - 1);
  36. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  37. }
  38. static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
  39. {
  40. struct buffer_head **bhs = fatent->bhs;
  41. if (fatent->nr_bhs == 1) {
  42. WARN_ON(offset >= (bhs[0]->b_size - 1));
  43. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  44. fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
  45. } else {
  46. WARN_ON(offset != (bhs[0]->b_size - 1));
  47. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  48. fatent->u.ent12_p[1] = bhs[1]->b_data;
  49. }
  50. }
  51. static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
  52. {
  53. WARN_ON(offset & (2 - 1));
  54. fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
  55. }
  56. static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
  57. {
  58. WARN_ON(offset & (4 - 1));
  59. fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
  60. }
  61. static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  62. int offset, sector_t blocknr)
  63. {
  64. struct buffer_head **bhs = fatent->bhs;
  65. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  66. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  67. bhs[0] = sb_bread(sb, blocknr);
  68. if (!bhs[0])
  69. goto err;
  70. if ((offset + 1) < sb->s_blocksize)
  71. fatent->nr_bhs = 1;
  72. else {
  73. /* This entry is block boundary, it needs the next block */
  74. blocknr++;
  75. bhs[1] = sb_bread(sb, blocknr);
  76. if (!bhs[1])
  77. goto err_brelse;
  78. fatent->nr_bhs = 2;
  79. }
  80. fat12_ent_set_ptr(fatent, offset);
  81. return 0;
  82. err_brelse:
  83. brelse(bhs[0]);
  84. err:
  85. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
  86. return -EIO;
  87. }
  88. static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  89. int offset, sector_t blocknr)
  90. {
  91. struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  92. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  93. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  94. fatent->bhs[0] = sb_bread(sb, blocknr);
  95. if (!fatent->bhs[0]) {
  96. fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
  97. (llu)blocknr);
  98. return -EIO;
  99. }
  100. fatent->nr_bhs = 1;
  101. ops->ent_set_ptr(fatent, offset);
  102. return 0;
  103. }
  104. static int fat12_ent_get(struct fat_entry *fatent)
  105. {
  106. u8 **ent12_p = fatent->u.ent12_p;
  107. int next;
  108. spin_lock(&fat12_entry_lock);
  109. if (fatent->entry & 1)
  110. next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
  111. else
  112. next = (*ent12_p[1] << 8) | *ent12_p[0];
  113. spin_unlock(&fat12_entry_lock);
  114. next &= 0x0fff;
  115. if (next >= BAD_FAT12)
  116. next = FAT_ENT_EOF;
  117. return next;
  118. }
  119. static int fat16_ent_get(struct fat_entry *fatent)
  120. {
  121. int next = le16_to_cpu(*fatent->u.ent16_p);
  122. WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
  123. if (next >= BAD_FAT16)
  124. next = FAT_ENT_EOF;
  125. return next;
  126. }
  127. static int fat32_ent_get(struct fat_entry *fatent)
  128. {
  129. int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
  130. WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
  131. if (next >= BAD_FAT32)
  132. next = FAT_ENT_EOF;
  133. return next;
  134. }
  135. static void fat12_ent_put(struct fat_entry *fatent, int new)
  136. {
  137. u8 **ent12_p = fatent->u.ent12_p;
  138. if (new == FAT_ENT_EOF)
  139. new = EOF_FAT12;
  140. spin_lock(&fat12_entry_lock);
  141. if (fatent->entry & 1) {
  142. *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
  143. *ent12_p[1] = new >> 4;
  144. } else {
  145. *ent12_p[0] = new & 0xff;
  146. *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
  147. }
  148. spin_unlock(&fat12_entry_lock);
  149. mark_buffer_dirty_inode_sync(fatent->bhs[0], fatent->fat_inode);
  150. if (fatent->nr_bhs == 2){
  151. mark_buffer_dirty_inode_sync(fatent->bhs[1], fatent->fat_inode);
  152. }
  153. }
  154. static void fat16_ent_put(struct fat_entry *fatent, int new)
  155. {
  156. if (new == FAT_ENT_EOF)
  157. new = EOF_FAT16;
  158. *fatent->u.ent16_p = cpu_to_le16(new);
  159. mark_buffer_dirty_inode_sync(fatent->bhs[0], fatent->fat_inode);
  160. }
  161. static void fat32_ent_put(struct fat_entry *fatent, int new)
  162. {
  163. if (new == FAT_ENT_EOF)
  164. new = EOF_FAT32;
  165. WARN_ON(new & 0xf0000000);
  166. new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
  167. *fatent->u.ent32_p = cpu_to_le32(new);
  168. mark_buffer_dirty_inode_sync(fatent->bhs[0], fatent->fat_inode);
  169. }
  170. static int fat12_ent_next(struct fat_entry *fatent)
  171. {
  172. u8 **ent12_p = fatent->u.ent12_p;
  173. struct buffer_head **bhs = fatent->bhs;
  174. u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
  175. fatent->entry++;
  176. if (fatent->nr_bhs == 1) {
  177. WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2)));
  178. WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
  179. if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
  180. ent12_p[0] = nextp - 1;
  181. ent12_p[1] = nextp;
  182. return 1;
  183. }
  184. } else {
  185. WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
  186. WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
  187. ent12_p[0] = nextp - 1;
  188. ent12_p[1] = nextp;
  189. brelse(bhs[0]);
  190. bhs[0] = bhs[1];
  191. fatent->nr_bhs = 1;
  192. return 1;
  193. }
  194. ent12_p[0] = NULL;
  195. ent12_p[1] = NULL;
  196. return 0;
  197. }
  198. static int fat16_ent_next(struct fat_entry *fatent)
  199. {
  200. const struct buffer_head *bh = fatent->bhs[0];
  201. fatent->entry++;
  202. if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
  203. fatent->u.ent16_p++;
  204. return 1;
  205. }
  206. fatent->u.ent16_p = NULL;
  207. return 0;
  208. }
  209. static int fat32_ent_next(struct fat_entry *fatent)
  210. {
  211. const struct buffer_head *bh = fatent->bhs[0];
  212. fatent->entry++;
  213. if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
  214. fatent->u.ent32_p++;
  215. return 1;
  216. }
  217. fatent->u.ent32_p = NULL;
  218. return 0;
  219. }
  220. static struct fatent_operations fat12_ops = {
  221. .ent_blocknr = fat12_ent_blocknr,
  222. .ent_set_ptr = fat12_ent_set_ptr,
  223. .ent_bread = fat12_ent_bread,
  224. .ent_get = fat12_ent_get,
  225. .ent_put = fat12_ent_put,
  226. .ent_next = fat12_ent_next,
  227. };
  228. static struct fatent_operations fat16_ops = {
  229. .ent_blocknr = fat_ent_blocknr,
  230. .ent_set_ptr = fat16_ent_set_ptr,
  231. .ent_bread = fat_ent_bread,
  232. .ent_get = fat16_ent_get,
  233. .ent_put = fat16_ent_put,
  234. .ent_next = fat16_ent_next,
  235. };
  236. static struct fatent_operations fat32_ops = {
  237. .ent_blocknr = fat_ent_blocknr,
  238. .ent_set_ptr = fat32_ent_set_ptr,
  239. .ent_bread = fat_ent_bread,
  240. .ent_get = fat32_ent_get,
  241. .ent_put = fat32_ent_put,
  242. .ent_next = fat32_ent_next,
  243. };
  244. static inline void lock_fat(struct msdos_sb_info *sbi)
  245. {
  246. mutex_lock(&sbi->fat_lock);
  247. }
  248. static inline void unlock_fat(struct msdos_sb_info *sbi)
  249. {
  250. mutex_unlock(&sbi->fat_lock);
  251. }
  252. void fat_ent_access_init(struct super_block *sb)
  253. {
  254. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  255. mutex_init(&sbi->fat_lock);
  256. switch (sbi->fat_bits) {
  257. case 32:
  258. sbi->fatent_shift = 2;
  259. sbi->fatent_ops = &fat32_ops;
  260. break;
  261. case 16:
  262. sbi->fatent_shift = 1;
  263. sbi->fatent_ops = &fat16_ops;
  264. break;
  265. case 12:
  266. sbi->fatent_shift = -1;
  267. sbi->fatent_ops = &fat12_ops;
  268. break;
  269. }
  270. }
  271. static void mark_fsinfo_dirty(struct super_block *sb)
  272. {
  273. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  274. if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
  275. return;
  276. __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
  277. }
  278. static inline int fat_ent_update_ptr(struct super_block *sb,
  279. struct fat_entry *fatent,
  280. int offset, sector_t blocknr)
  281. {
  282. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  283. struct fatent_operations *ops = sbi->fatent_ops;
  284. struct buffer_head **bhs = fatent->bhs;
  285. /* Is this fatent's blocks including this entry? */
  286. if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
  287. return 0;
  288. if (sbi->fat_bits == 12) {
  289. if ((offset + 1) < sb->s_blocksize) {
  290. /* This entry is on bhs[0]. */
  291. if (fatent->nr_bhs == 2) {
  292. brelse(bhs[1]);
  293. fatent->nr_bhs = 1;
  294. }
  295. } else {
  296. /* This entry needs the next block. */
  297. if (fatent->nr_bhs != 2)
  298. return 0;
  299. if (bhs[1]->b_blocknr != (blocknr + 1))
  300. return 0;
  301. }
  302. }
  303. ops->ent_set_ptr(fatent, offset);
  304. return 1;
  305. }
  306. int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
  307. {
  308. struct super_block *sb = inode->i_sb;
  309. struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
  310. struct fatent_operations *ops = sbi->fatent_ops;
  311. int err, offset;
  312. sector_t blocknr;
  313. if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
  314. fatent_brelse(fatent);
  315. fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
  316. return -EIO;
  317. }
  318. fatent_set_entry(fatent, entry);
  319. ops->ent_blocknr(sb, entry, &offset, &blocknr);
  320. if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
  321. fatent_brelse(fatent);
  322. err = ops->ent_bread(sb, fatent, offset, blocknr);
  323. if (err)
  324. return err;
  325. }
  326. return ops->ent_get(fatent);
  327. }
  328. /* FIXME: We can write the blocks as more big chunk. */
  329. static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
  330. int nr_bhs)
  331. {
  332. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  333. struct buffer_head *c_bh;
  334. int err, n, copy;
  335. err = 0;
  336. for (copy = 1; copy < sbi->fats; copy++) {
  337. sector_t backup_fat = sbi->fat_length * copy;
  338. for (n = 0; n < nr_bhs; n++) {
  339. c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
  340. if (!c_bh) {
  341. err = -ENOMEM;
  342. goto error;
  343. }
  344. memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
  345. set_buffer_uptodate(c_bh);
  346. mark_buffer_dirty_inode_sync(c_bh, sbi->fat_inode);
  347. if (sb->s_flags & MS_SYNCHRONOUS)
  348. err = sync_dirty_buffer(c_bh);
  349. brelse(c_bh);
  350. if (err)
  351. goto error;
  352. }
  353. }
  354. error:
  355. return err;
  356. }
  357. int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
  358. int new, int wait)
  359. {
  360. struct super_block *sb = inode->i_sb;
  361. struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  362. int err;
  363. ops->ent_put(fatent, new);
  364. if (wait) {
  365. err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
  366. if (err)
  367. return err;
  368. }
  369. return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
  370. }
  371. static inline int fat_ent_next(struct msdos_sb_info *sbi,
  372. struct fat_entry *fatent)
  373. {
  374. if (sbi->fatent_ops->ent_next(fatent)) {
  375. if (fatent->entry < sbi->max_cluster)
  376. return 1;
  377. }
  378. return 0;
  379. }
  380. static inline int fat_ent_read_block(struct super_block *sb,
  381. struct fat_entry *fatent)
  382. {
  383. struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  384. sector_t blocknr;
  385. int offset;
  386. fatent_brelse(fatent);
  387. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  388. return ops->ent_bread(sb, fatent, offset, blocknr);
  389. }
  390. static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
  391. struct fat_entry *fatent)
  392. {
  393. int n, i;
  394. for (n = 0; n < fatent->nr_bhs; n++) {
  395. for (i = 0; i < *nr_bhs; i++) {
  396. if (fatent->bhs[n] == bhs[i])
  397. break;
  398. }
  399. if (i == *nr_bhs) {
  400. get_bh(fatent->bhs[n]);
  401. bhs[i] = fatent->bhs[n];
  402. (*nr_bhs)++;
  403. }
  404. }
  405. }
  406. int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
  407. {
  408. struct super_block *sb = inode->i_sb;
  409. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  410. struct fatent_operations *ops = sbi->fatent_ops;
  411. struct fat_entry fatent, prev_ent;
  412. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  413. int i, count, err, nr_bhs, idx_clus;
  414. BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
  415. lock_fat(sbi);
  416. if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
  417. sbi->free_clusters < nr_cluster) {
  418. unlock_fat(sbi);
  419. return -ENOSPC;
  420. }
  421. err = nr_bhs = idx_clus = 0;
  422. count = FAT_START_ENT;
  423. fatent_init(&prev_ent);
  424. fatent_init(&fatent);
  425. fatent_set_entry(&fatent, sbi->prev_free + 1);
  426. while (count < sbi->max_cluster) {
  427. if (fatent.entry >= sbi->max_cluster)
  428. fatent.entry = FAT_START_ENT;
  429. fatent_set_entry(&fatent, fatent.entry);
  430. err = fat_ent_read_block(sb, &fatent);
  431. if (err)
  432. goto out;
  433. /* Find the free entries in a block */
  434. do {
  435. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  436. int entry = fatent.entry;
  437. /* make the cluster chain */
  438. ops->ent_put(&fatent, FAT_ENT_EOF);
  439. if (prev_ent.nr_bhs)
  440. ops->ent_put(&prev_ent, entry);
  441. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  442. sbi->prev_free = entry;
  443. if (sbi->free_clusters != -1)
  444. sbi->free_clusters--;
  445. cluster[idx_clus] = entry;
  446. idx_clus++;
  447. if (idx_clus == nr_cluster)
  448. goto out;
  449. /*
  450. * fat_collect_bhs() gets ref-count of bhs,
  451. * so we can still use the prev_ent.
  452. */
  453. prev_ent = fatent;
  454. }
  455. count++;
  456. if (count == sbi->max_cluster)
  457. break;
  458. } while (fat_ent_next(sbi, &fatent));
  459. }
  460. /* Couldn't allocate the free entries */
  461. sbi->free_clusters = 0;
  462. sbi->free_clus_valid = 1;
  463. err = -ENOSPC;
  464. out:
  465. unlock_fat(sbi);
  466. mark_fsinfo_dirty(sb);
  467. fatent_brelse(&fatent);
  468. if (!err) {
  469. if (inode_needs_sync(inode))
  470. err = fat_sync_bhs(bhs, nr_bhs);
  471. if (!err)
  472. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  473. }
  474. for (i = 0; i < nr_bhs; i++)
  475. brelse(bhs[i]);
  476. if (err && idx_clus)
  477. fat_free_clusters(inode, cluster[0]);
  478. return err;
  479. }
  480. int fat_free_clusters(struct inode *inode, int cluster)
  481. {
  482. struct super_block *sb = inode->i_sb;
  483. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  484. struct fatent_operations *ops = sbi->fatent_ops;
  485. struct fat_entry fatent;
  486. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  487. int i, err, nr_bhs;
  488. int first_cl = cluster, dirty_fsinfo = 0;
  489. nr_bhs = 0;
  490. fatent_init(&fatent);
  491. lock_fat(sbi);
  492. do {
  493. cluster = fat_ent_read(inode, &fatent, cluster);
  494. if (cluster < 0) {
  495. err = cluster;
  496. goto error;
  497. } else if (cluster == FAT_ENT_FREE) {
  498. fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
  499. __func__);
  500. err = -EIO;
  501. goto error;
  502. }
  503. if (sbi->options.discard) {
  504. /*
  505. * Issue discard for the sectors we no longer
  506. * care about, batching contiguous clusters
  507. * into one request
  508. */
  509. if (cluster != fatent.entry + 1) {
  510. int nr_clus = fatent.entry - first_cl + 1;
  511. sb_issue_discard(sb,
  512. fat_clus_to_blknr(sbi, first_cl),
  513. nr_clus * sbi->sec_per_clus,
  514. GFP_NOFS, 0);
  515. first_cl = cluster;
  516. }
  517. }
  518. ops->ent_put(&fatent, FAT_ENT_FREE);
  519. if (sbi->free_clusters != -1) {
  520. sbi->free_clusters++;
  521. dirty_fsinfo = 1;
  522. }
  523. if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
  524. if (sb->s_flags & MS_SYNCHRONOUS) {
  525. err = fat_sync_bhs(bhs, nr_bhs);
  526. if (err)
  527. goto error;
  528. }
  529. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  530. if (err)
  531. goto error;
  532. for (i = 0; i < nr_bhs; i++)
  533. brelse(bhs[i]);
  534. nr_bhs = 0;
  535. }
  536. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  537. } while (cluster != FAT_ENT_EOF);
  538. if (sb->s_flags & MS_SYNCHRONOUS) {
  539. err = fat_sync_bhs(bhs, nr_bhs);
  540. if (err)
  541. goto error;
  542. }
  543. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  544. error:
  545. fatent_brelse(&fatent);
  546. for (i = 0; i < nr_bhs; i++)
  547. brelse(bhs[i]);
  548. unlock_fat(sbi);
  549. if (dirty_fsinfo)
  550. mark_fsinfo_dirty(sb);
  551. return err;
  552. }
  553. EXPORT_SYMBOL_GPL(fat_free_clusters);
  554. /* 128kb is the whole sectors for FAT12 and FAT16 */
  555. #define FAT_READA_SIZE (128 * 1024)
  556. static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
  557. unsigned long reada_blocks)
  558. {
  559. struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  560. sector_t blocknr;
  561. int i, offset;
  562. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  563. for (i = 0; i < reada_blocks; i++)
  564. sb_breadahead(sb, blocknr + i);
  565. }
  566. int fat_count_free_clusters(struct super_block *sb)
  567. {
  568. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  569. struct fatent_operations *ops = sbi->fatent_ops;
  570. struct fat_entry fatent;
  571. unsigned long reada_blocks, reada_mask, cur_block;
  572. int err = 0, free;
  573. lock_fat(sbi);
  574. if (sbi->free_clusters != -1 && sbi->free_clus_valid)
  575. goto out;
  576. reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
  577. reada_mask = reada_blocks - 1;
  578. cur_block = 0;
  579. free = 0;
  580. fatent_init(&fatent);
  581. fatent_set_entry(&fatent, FAT_START_ENT);
  582. while (fatent.entry < sbi->max_cluster) {
  583. /* readahead of fat blocks */
  584. if ((cur_block & reada_mask) == 0) {
  585. unsigned long rest = sbi->fat_length - cur_block;
  586. fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
  587. }
  588. cur_block++;
  589. err = fat_ent_read_block(sb, &fatent);
  590. if (err)
  591. goto out;
  592. do {
  593. if (ops->ent_get(&fatent) == FAT_ENT_FREE)
  594. free++;
  595. } while (fat_ent_next(sbi, &fatent));
  596. }
  597. sbi->free_clusters = free;
  598. sbi->free_clus_valid = 1;
  599. mark_fsinfo_dirty(sb);
  600. fatent_brelse(&fatent);
  601. out:
  602. unlock_fat(sbi);
  603. return err;
  604. }