extent.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * linux/fs/hfs/extent.c
  3. *
  4. * Copyright (C) 1995-1997 Paul H. Hargrove
  5. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6. * This file may be distributed under the terms of the GNU General Public License.
  7. *
  8. * This file contains the functions related to the extents B-tree.
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfs_fs.h"
  12. #include "btree.h"
  13. /*================ File-local functions ================*/
  14. /*
  15. * build_key
  16. */
  17. static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
  18. {
  19. key->key_len = 7;
  20. key->ext.FkType = type;
  21. key->ext.FNum = cpu_to_be32(cnid);
  22. key->ext.FABN = cpu_to_be16(block);
  23. }
  24. /*
  25. * hfs_ext_compare()
  26. *
  27. * Description:
  28. * This is the comparison function used for the extents B-tree. In
  29. * comparing extent B-tree entries, the file id is the most
  30. * significant field (compared as unsigned ints); the fork type is
  31. * the second most significant field (compared as unsigned chars);
  32. * and the allocation block number field is the least significant
  33. * (compared as unsigned ints).
  34. * Input Variable(s):
  35. * struct hfs_ext_key *key1: pointer to the first key to compare
  36. * struct hfs_ext_key *key2: pointer to the second key to compare
  37. * Output Variable(s):
  38. * NONE
  39. * Returns:
  40. * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
  41. * Preconditions:
  42. * key1 and key2 point to "valid" (struct hfs_ext_key)s.
  43. * Postconditions:
  44. * This function has no side-effects */
  45. int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
  46. {
  47. __be32 fnum1, fnum2;
  48. __be16 block1, block2;
  49. fnum1 = key1->ext.FNum;
  50. fnum2 = key2->ext.FNum;
  51. if (fnum1 != fnum2)
  52. return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
  53. if (key1->ext.FkType != key2->ext.FkType)
  54. return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
  55. block1 = key1->ext.FABN;
  56. block2 = key2->ext.FABN;
  57. if (block1 == block2)
  58. return 0;
  59. return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
  60. }
  61. /*
  62. * hfs_ext_find_block
  63. *
  64. * Find a block within an extent record
  65. */
  66. static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
  67. {
  68. int i;
  69. u16 count;
  70. for (i = 0; i < 3; ext++, i++) {
  71. count = be16_to_cpu(ext->count);
  72. if (off < count)
  73. return be16_to_cpu(ext->block) + off;
  74. off -= count;
  75. }
  76. /* panic? */
  77. return 0;
  78. }
  79. static int hfs_ext_block_count(struct hfs_extent *ext)
  80. {
  81. int i;
  82. u16 count = 0;
  83. for (i = 0; i < 3; ext++, i++)
  84. count += be16_to_cpu(ext->count);
  85. return count;
  86. }
  87. static u16 hfs_ext_lastblock(struct hfs_extent *ext)
  88. {
  89. int i;
  90. ext += 2;
  91. for (i = 0; i < 2; ext--, i++)
  92. if (ext->count)
  93. break;
  94. return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
  95. }
  96. static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
  97. {
  98. int res;
  99. hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
  100. HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  101. res = hfs_brec_find(fd);
  102. if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
  103. if (res != -ENOENT)
  104. return;
  105. hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
  106. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  107. } else {
  108. if (res)
  109. return;
  110. hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
  111. HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
  112. }
  113. }
  114. void hfs_ext_write_extent(struct inode *inode)
  115. {
  116. struct hfs_find_data fd;
  117. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
  118. hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  119. __hfs_ext_write_extent(inode, &fd);
  120. hfs_find_exit(&fd);
  121. }
  122. }
  123. static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
  124. u32 cnid, u32 block, u8 type)
  125. {
  126. int res;
  127. hfs_ext_build_key(fd->search_key, cnid, block, type);
  128. fd->key->ext.FNum = 0;
  129. res = hfs_brec_find(fd);
  130. if (res && res != -ENOENT)
  131. return res;
  132. if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
  133. fd->key->ext.FkType != fd->search_key->ext.FkType)
  134. return -ENOENT;
  135. if (fd->entrylength != sizeof(hfs_extent_rec))
  136. return -EIO;
  137. hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
  138. return 0;
  139. }
  140. static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
  141. {
  142. int res;
  143. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY)
  144. __hfs_ext_write_extent(inode, fd);
  145. res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
  146. block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  147. if (!res) {
  148. HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
  149. HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
  150. } else {
  151. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  152. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  153. }
  154. return res;
  155. }
  156. static int hfs_ext_read_extent(struct inode *inode, u16 block)
  157. {
  158. struct hfs_find_data fd;
  159. int res;
  160. if (block >= HFS_I(inode)->cached_start &&
  161. block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
  162. return 0;
  163. hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  164. res = __hfs_ext_cache_extent(&fd, inode, block);
  165. hfs_find_exit(&fd);
  166. return res;
  167. }
  168. static void hfs_dump_extent(struct hfs_extent *extent)
  169. {
  170. int i;
  171. dprint(DBG_EXTENT, " ");
  172. for (i = 0; i < 3; i++)
  173. dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block),
  174. be16_to_cpu(extent[i].count));
  175. dprint(DBG_EXTENT, "\n");
  176. }
  177. static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
  178. u16 alloc_block, u16 block_count)
  179. {
  180. u16 count, start;
  181. int i;
  182. hfs_dump_extent(extent);
  183. for (i = 0; i < 3; extent++, i++) {
  184. count = be16_to_cpu(extent->count);
  185. if (offset == count) {
  186. start = be16_to_cpu(extent->block);
  187. if (alloc_block != start + count) {
  188. if (++i >= 3)
  189. return -ENOSPC;
  190. extent++;
  191. extent->block = cpu_to_be16(alloc_block);
  192. } else
  193. block_count += count;
  194. extent->count = cpu_to_be16(block_count);
  195. return 0;
  196. } else if (offset < count)
  197. break;
  198. offset -= count;
  199. }
  200. /* panic? */
  201. return -EIO;
  202. }
  203. static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
  204. u16 offset, u16 block_nr)
  205. {
  206. u16 count, start;
  207. int i;
  208. hfs_dump_extent(extent);
  209. for (i = 0; i < 3; extent++, i++) {
  210. count = be16_to_cpu(extent->count);
  211. if (offset == count)
  212. goto found;
  213. else if (offset < count)
  214. break;
  215. offset -= count;
  216. }
  217. /* panic? */
  218. return -EIO;
  219. found:
  220. for (;;) {
  221. start = be16_to_cpu(extent->block);
  222. if (count <= block_nr) {
  223. hfs_clear_vbm_bits(sb, start, count);
  224. extent->block = 0;
  225. extent->count = 0;
  226. block_nr -= count;
  227. } else {
  228. count -= block_nr;
  229. hfs_clear_vbm_bits(sb, start + count, block_nr);
  230. extent->count = cpu_to_be16(count);
  231. block_nr = 0;
  232. }
  233. if (!block_nr || !i)
  234. return 0;
  235. i--;
  236. extent--;
  237. count = be16_to_cpu(extent->count);
  238. }
  239. }
  240. int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
  241. {
  242. struct hfs_find_data fd;
  243. u32 total_blocks, blocks, start;
  244. u32 cnid = be32_to_cpu(file->FlNum);
  245. struct hfs_extent *extent;
  246. int res, i;
  247. if (type == HFS_FK_DATA) {
  248. total_blocks = be32_to_cpu(file->PyLen);
  249. extent = file->ExtRec;
  250. } else {
  251. total_blocks = be32_to_cpu(file->RPyLen);
  252. extent = file->RExtRec;
  253. }
  254. total_blocks /= HFS_SB(sb)->alloc_blksz;
  255. if (!total_blocks)
  256. return 0;
  257. blocks = 0;
  258. for (i = 0; i < 3; extent++, i++)
  259. blocks += be16_to_cpu(extent[i].count);
  260. res = hfs_free_extents(sb, extent, blocks, blocks);
  261. if (res)
  262. return res;
  263. if (total_blocks == blocks)
  264. return 0;
  265. hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  266. do {
  267. res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
  268. if (res)
  269. break;
  270. start = be16_to_cpu(fd.key->ext.FABN);
  271. hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
  272. hfs_brec_remove(&fd);
  273. total_blocks = start;
  274. } while (total_blocks > blocks);
  275. hfs_find_exit(&fd);
  276. return res;
  277. }
  278. /*
  279. * hfs_get_block
  280. */
  281. int hfs_get_block(struct inode *inode, sector_t block,
  282. struct buffer_head *bh_result, int create)
  283. {
  284. struct super_block *sb;
  285. u16 dblock, ablock;
  286. int res;
  287. sb = inode->i_sb;
  288. /* Convert inode block to disk allocation block */
  289. ablock = (u32)block / HFS_SB(sb)->fs_div;
  290. if (block >= HFS_I(inode)->fs_blocks) {
  291. if (block > HFS_I(inode)->fs_blocks || !create)
  292. return -EIO;
  293. if (ablock >= HFS_I(inode)->alloc_blocks) {
  294. res = hfs_extend_file(inode);
  295. if (res)
  296. return res;
  297. }
  298. } else
  299. create = 0;
  300. if (ablock < HFS_I(inode)->first_blocks) {
  301. dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
  302. goto done;
  303. }
  304. mutex_lock(&HFS_I(inode)->extents_lock);
  305. res = hfs_ext_read_extent(inode, ablock);
  306. if (!res)
  307. dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
  308. ablock - HFS_I(inode)->cached_start);
  309. else {
  310. mutex_unlock(&HFS_I(inode)->extents_lock);
  311. return -EIO;
  312. }
  313. mutex_unlock(&HFS_I(inode)->extents_lock);
  314. done:
  315. map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
  316. dblock * HFS_SB(sb)->fs_div +
  317. (u32)block % HFS_SB(sb)->fs_div);
  318. if (create) {
  319. set_buffer_new(bh_result);
  320. HFS_I(inode)->phys_size += sb->s_blocksize;
  321. HFS_I(inode)->fs_blocks++;
  322. inode_add_bytes(inode, sb->s_blocksize);
  323. mark_inode_dirty(inode);
  324. }
  325. return 0;
  326. }
  327. int hfs_extend_file(struct inode *inode)
  328. {
  329. struct super_block *sb = inode->i_sb;
  330. u32 start, len, goal;
  331. int res;
  332. mutex_lock(&HFS_I(inode)->extents_lock);
  333. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
  334. goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
  335. else {
  336. res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
  337. if (res)
  338. goto out;
  339. goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
  340. }
  341. len = HFS_I(inode)->clump_blocks;
  342. start = hfs_vbm_search_free(sb, goal, &len);
  343. if (!len) {
  344. res = -ENOSPC;
  345. goto out;
  346. }
  347. dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
  348. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
  349. if (!HFS_I(inode)->first_blocks) {
  350. dprint(DBG_EXTENT, "first extents\n");
  351. /* no extents yet */
  352. HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
  353. HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
  354. res = 0;
  355. } else {
  356. /* try to append to extents in inode */
  357. res = hfs_add_extent(HFS_I(inode)->first_extents,
  358. HFS_I(inode)->alloc_blocks,
  359. start, len);
  360. if (res == -ENOSPC)
  361. goto insert_extent;
  362. }
  363. if (!res) {
  364. hfs_dump_extent(HFS_I(inode)->first_extents);
  365. HFS_I(inode)->first_blocks += len;
  366. }
  367. } else {
  368. res = hfs_add_extent(HFS_I(inode)->cached_extents,
  369. HFS_I(inode)->alloc_blocks -
  370. HFS_I(inode)->cached_start,
  371. start, len);
  372. if (!res) {
  373. hfs_dump_extent(HFS_I(inode)->cached_extents);
  374. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  375. HFS_I(inode)->cached_blocks += len;
  376. } else if (res == -ENOSPC)
  377. goto insert_extent;
  378. }
  379. out:
  380. mutex_unlock(&HFS_I(inode)->extents_lock);
  381. if (!res) {
  382. HFS_I(inode)->alloc_blocks += len;
  383. mark_inode_dirty(inode);
  384. if (inode->i_ino < HFS_FIRSTUSER_CNID)
  385. set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
  386. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  387. sb->s_dirt = 1;
  388. }
  389. return res;
  390. insert_extent:
  391. dprint(DBG_EXTENT, "insert new extent\n");
  392. hfs_ext_write_extent(inode);
  393. memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
  394. HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
  395. HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
  396. hfs_dump_extent(HFS_I(inode)->cached_extents);
  397. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
  398. HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
  399. HFS_I(inode)->cached_blocks = len;
  400. res = 0;
  401. goto out;
  402. }
  403. void hfs_file_truncate(struct inode *inode)
  404. {
  405. struct super_block *sb = inode->i_sb;
  406. struct hfs_find_data fd;
  407. u16 blk_cnt, alloc_cnt, start;
  408. u32 size;
  409. int res;
  410. dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
  411. (long long)HFS_I(inode)->phys_size, inode->i_size);
  412. if (inode->i_size > HFS_I(inode)->phys_size) {
  413. struct address_space *mapping = inode->i_mapping;
  414. void *fsdata;
  415. struct page *page;
  416. int res;
  417. /* XXX: Can use generic_cont_expand? */
  418. size = inode->i_size - 1;
  419. res = pagecache_write_begin(NULL, mapping, size+1, 0,
  420. AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
  421. if (!res) {
  422. res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
  423. page, fsdata);
  424. }
  425. if (res)
  426. inode->i_size = HFS_I(inode)->phys_size;
  427. return;
  428. } else if (inode->i_size == HFS_I(inode)->phys_size)
  429. return;
  430. size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
  431. blk_cnt = size / HFS_SB(sb)->alloc_blksz;
  432. alloc_cnt = HFS_I(inode)->alloc_blocks;
  433. if (blk_cnt == alloc_cnt)
  434. goto out;
  435. mutex_lock(&HFS_I(inode)->extents_lock);
  436. hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  437. while (1) {
  438. if (alloc_cnt == HFS_I(inode)->first_blocks) {
  439. hfs_free_extents(sb, HFS_I(inode)->first_extents,
  440. alloc_cnt, alloc_cnt - blk_cnt);
  441. hfs_dump_extent(HFS_I(inode)->first_extents);
  442. HFS_I(inode)->first_blocks = blk_cnt;
  443. break;
  444. }
  445. res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
  446. if (res)
  447. break;
  448. start = HFS_I(inode)->cached_start;
  449. hfs_free_extents(sb, HFS_I(inode)->cached_extents,
  450. alloc_cnt - start, alloc_cnt - blk_cnt);
  451. hfs_dump_extent(HFS_I(inode)->cached_extents);
  452. if (blk_cnt > start) {
  453. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  454. break;
  455. }
  456. alloc_cnt = start;
  457. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  458. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  459. hfs_brec_remove(&fd);
  460. }
  461. hfs_find_exit(&fd);
  462. mutex_unlock(&HFS_I(inode)->extents_lock);
  463. HFS_I(inode)->alloc_blocks = blk_cnt;
  464. out:
  465. HFS_I(inode)->phys_size = inode->i_size;
  466. HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  467. inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
  468. mark_inode_dirty(inode);
  469. }