move_extents.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * move_extents.c
  5. *
  6. * Copyright (C) 2011 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/fs.h>
  18. #include <linux/types.h>
  19. #include <linux/mount.h>
  20. #include <linux/swap.h>
  21. #include <cluster/masklog.h>
  22. #include "ocfs2.h"
  23. #include "ocfs2_ioctl.h"
  24. #include "alloc.h"
  25. #include "aops.h"
  26. #include "dlmglue.h"
  27. #include "extent_map.h"
  28. #include "inode.h"
  29. #include "journal.h"
  30. #include "suballoc.h"
  31. #include "uptodate.h"
  32. #include "super.h"
  33. #include "dir.h"
  34. #include "buffer_head_io.h"
  35. #include "sysfile.h"
  36. #include "refcounttree.h"
  37. #include "move_extents.h"
  38. struct ocfs2_move_extents_context {
  39. struct inode *inode;
  40. struct file *file;
  41. int auto_defrag;
  42. int partial;
  43. int credits;
  44. u32 new_phys_cpos;
  45. u32 clusters_moved;
  46. u64 refcount_loc;
  47. struct ocfs2_move_extents *range;
  48. struct ocfs2_extent_tree et;
  49. struct ocfs2_alloc_context *meta_ac;
  50. struct ocfs2_alloc_context *data_ac;
  51. struct ocfs2_cached_dealloc_ctxt dealloc;
  52. };
  53. static int __ocfs2_move_extent(handle_t *handle,
  54. struct ocfs2_move_extents_context *context,
  55. u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
  56. int ext_flags)
  57. {
  58. int ret = 0, index;
  59. struct inode *inode = context->inode;
  60. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  61. struct ocfs2_extent_rec *rec, replace_rec;
  62. struct ocfs2_path *path = NULL;
  63. struct ocfs2_extent_list *el;
  64. u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
  65. u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
  66. ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
  67. p_cpos, new_p_cpos, len);
  68. if (ret) {
  69. mlog_errno(ret);
  70. goto out;
  71. }
  72. memset(&replace_rec, 0, sizeof(replace_rec));
  73. replace_rec.e_cpos = cpu_to_le32(cpos);
  74. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  75. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  76. new_p_cpos));
  77. path = ocfs2_new_path_from_et(&context->et);
  78. if (!path) {
  79. ret = -ENOMEM;
  80. mlog_errno(ret);
  81. goto out;
  82. }
  83. ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
  84. if (ret) {
  85. mlog_errno(ret);
  86. goto out;
  87. }
  88. el = path_leaf_el(path);
  89. index = ocfs2_search_extent_list(el, cpos);
  90. if (index == -1) {
  91. ret = ocfs2_error(inode->i_sb,
  92. "Inode %llu has an extent at cpos %u which can no longer be found\n",
  93. (unsigned long long)ino, cpos);
  94. goto out;
  95. }
  96. rec = &el->l_recs[index];
  97. BUG_ON(ext_flags != rec->e_flags);
  98. /*
  99. * after moving/defraging to new location, the extent is not going
  100. * to be refcounted anymore.
  101. */
  102. replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
  103. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
  104. context->et.et_root_bh,
  105. OCFS2_JOURNAL_ACCESS_WRITE);
  106. if (ret) {
  107. mlog_errno(ret);
  108. goto out;
  109. }
  110. ret = ocfs2_split_extent(handle, &context->et, path, index,
  111. &replace_rec, context->meta_ac,
  112. &context->dealloc);
  113. if (ret) {
  114. mlog_errno(ret);
  115. goto out;
  116. }
  117. ocfs2_journal_dirty(handle, context->et.et_root_bh);
  118. context->new_phys_cpos = new_p_cpos;
  119. /*
  120. * need I to append truncate log for old clusters?
  121. */
  122. if (old_blkno) {
  123. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  124. ret = ocfs2_decrease_refcount(inode, handle,
  125. ocfs2_blocks_to_clusters(osb->sb,
  126. old_blkno),
  127. len, context->meta_ac,
  128. &context->dealloc, 1);
  129. else
  130. ret = ocfs2_truncate_log_append(osb, handle,
  131. old_blkno, len);
  132. }
  133. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  134. out:
  135. ocfs2_free_path(path);
  136. return ret;
  137. }
  138. /*
  139. * lock allocators, and reserving appropriate number of bits for
  140. * meta blocks and data clusters.
  141. *
  142. * in some cases, we don't need to reserve clusters, just let data_ac
  143. * be NULL.
  144. */
  145. static int ocfs2_lock_allocators_move_extents(struct inode *inode,
  146. struct ocfs2_extent_tree *et,
  147. u32 clusters_to_move,
  148. u32 extents_to_split,
  149. struct ocfs2_alloc_context **meta_ac,
  150. struct ocfs2_alloc_context **data_ac,
  151. int extra_blocks,
  152. int *credits)
  153. {
  154. int ret, num_free_extents;
  155. unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
  156. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  157. num_free_extents = ocfs2_num_free_extents(osb, et);
  158. if (num_free_extents < 0) {
  159. ret = num_free_extents;
  160. mlog_errno(ret);
  161. goto out;
  162. }
  163. if (!num_free_extents ||
  164. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
  165. extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
  166. ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
  167. if (ret) {
  168. mlog_errno(ret);
  169. goto out;
  170. }
  171. if (data_ac) {
  172. ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
  173. if (ret) {
  174. mlog_errno(ret);
  175. goto out;
  176. }
  177. }
  178. *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
  179. mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
  180. extra_blocks, clusters_to_move, *credits);
  181. out:
  182. if (ret) {
  183. if (*meta_ac) {
  184. ocfs2_free_alloc_context(*meta_ac);
  185. *meta_ac = NULL;
  186. }
  187. }
  188. return ret;
  189. }
  190. /*
  191. * Using one journal handle to guarantee the data consistency in case
  192. * crash happens anywhere.
  193. *
  194. * XXX: defrag can end up with finishing partial extent as requested,
  195. * due to not enough contiguous clusters can be found in allocator.
  196. */
  197. static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
  198. u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
  199. {
  200. int ret, credits = 0, extra_blocks = 0, partial = context->partial;
  201. handle_t *handle;
  202. struct inode *inode = context->inode;
  203. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  204. struct inode *tl_inode = osb->osb_tl_inode;
  205. struct ocfs2_refcount_tree *ref_tree = NULL;
  206. u32 new_phys_cpos, new_len;
  207. u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  208. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
  209. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  210. OCFS2_HAS_REFCOUNT_FL));
  211. BUG_ON(!context->refcount_loc);
  212. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  213. &ref_tree, NULL);
  214. if (ret) {
  215. mlog_errno(ret);
  216. return ret;
  217. }
  218. ret = ocfs2_prepare_refcount_change_for_del(inode,
  219. context->refcount_loc,
  220. phys_blkno,
  221. *len,
  222. &credits,
  223. &extra_blocks);
  224. if (ret) {
  225. mlog_errno(ret);
  226. goto out;
  227. }
  228. }
  229. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
  230. &context->meta_ac,
  231. &context->data_ac,
  232. extra_blocks, &credits);
  233. if (ret) {
  234. mlog_errno(ret);
  235. goto out;
  236. }
  237. /*
  238. * should be using allocation reservation strategy there?
  239. *
  240. * if (context->data_ac)
  241. * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
  242. */
  243. inode_lock(tl_inode);
  244. if (ocfs2_truncate_log_needs_flush(osb)) {
  245. ret = __ocfs2_flush_truncate_log(osb);
  246. if (ret < 0) {
  247. mlog_errno(ret);
  248. goto out_unlock_mutex;
  249. }
  250. }
  251. handle = ocfs2_start_trans(osb, credits);
  252. if (IS_ERR(handle)) {
  253. ret = PTR_ERR(handle);
  254. mlog_errno(ret);
  255. goto out_unlock_mutex;
  256. }
  257. ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
  258. &new_phys_cpos, &new_len);
  259. if (ret) {
  260. mlog_errno(ret);
  261. goto out_commit;
  262. }
  263. /*
  264. * allowing partial extent moving is kind of 'pros and cons', it makes
  265. * whole defragmentation less likely to fail, on the contrary, the bad
  266. * thing is it may make the fs even more fragmented after moving, let
  267. * userspace make a good decision here.
  268. */
  269. if (new_len != *len) {
  270. mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
  271. if (!partial) {
  272. context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
  273. ret = -ENOSPC;
  274. goto out_commit;
  275. }
  276. }
  277. mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
  278. phys_cpos, new_phys_cpos);
  279. ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
  280. new_phys_cpos, ext_flags);
  281. if (ret)
  282. mlog_errno(ret);
  283. if (partial && (new_len != *len))
  284. *len = new_len;
  285. /*
  286. * Here we should write the new page out first if we are
  287. * in write-back mode.
  288. */
  289. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
  290. if (ret)
  291. mlog_errno(ret);
  292. out_commit:
  293. ocfs2_commit_trans(osb, handle);
  294. out_unlock_mutex:
  295. inode_unlock(tl_inode);
  296. if (context->data_ac) {
  297. ocfs2_free_alloc_context(context->data_ac);
  298. context->data_ac = NULL;
  299. }
  300. if (context->meta_ac) {
  301. ocfs2_free_alloc_context(context->meta_ac);
  302. context->meta_ac = NULL;
  303. }
  304. out:
  305. if (ref_tree)
  306. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  307. return ret;
  308. }
  309. /*
  310. * find the victim alloc group, where #blkno fits.
  311. */
  312. static int ocfs2_find_victim_alloc_group(struct inode *inode,
  313. u64 vict_blkno,
  314. int type, int slot,
  315. int *vict_bit,
  316. struct buffer_head **ret_bh)
  317. {
  318. int ret, i, bits_per_unit = 0;
  319. u64 blkno;
  320. char namebuf[40];
  321. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  322. struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
  323. struct ocfs2_chain_list *cl;
  324. struct ocfs2_chain_rec *rec;
  325. struct ocfs2_dinode *ac_dinode;
  326. struct ocfs2_group_desc *bg;
  327. ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
  328. ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
  329. strlen(namebuf), &blkno);
  330. if (ret) {
  331. ret = -ENOENT;
  332. goto out;
  333. }
  334. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
  335. if (ret) {
  336. mlog_errno(ret);
  337. goto out;
  338. }
  339. ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
  340. cl = &(ac_dinode->id2.i_chain);
  341. rec = &(cl->cl_recs[0]);
  342. if (type == GLOBAL_BITMAP_SYSTEM_INODE)
  343. bits_per_unit = osb->s_clustersize_bits -
  344. inode->i_sb->s_blocksize_bits;
  345. /*
  346. * 'vict_blkno' was out of the valid range.
  347. */
  348. if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
  349. (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
  350. bits_per_unit))) {
  351. ret = -EINVAL;
  352. goto out;
  353. }
  354. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
  355. rec = &(cl->cl_recs[i]);
  356. if (!rec)
  357. continue;
  358. bg = NULL;
  359. do {
  360. if (!bg)
  361. blkno = le64_to_cpu(rec->c_blkno);
  362. else
  363. blkno = le64_to_cpu(bg->bg_next_group);
  364. if (gd_bh) {
  365. brelse(gd_bh);
  366. gd_bh = NULL;
  367. }
  368. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
  369. if (ret) {
  370. mlog_errno(ret);
  371. goto out;
  372. }
  373. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  374. if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
  375. le16_to_cpu(bg->bg_bits))) {
  376. *ret_bh = gd_bh;
  377. *vict_bit = (vict_blkno - blkno) >>
  378. bits_per_unit;
  379. mlog(0, "find the victim group: #%llu, "
  380. "total_bits: %u, vict_bit: %u\n",
  381. blkno, le16_to_cpu(bg->bg_bits),
  382. *vict_bit);
  383. goto out;
  384. }
  385. } while (le64_to_cpu(bg->bg_next_group));
  386. }
  387. ret = -EINVAL;
  388. out:
  389. brelse(ac_bh);
  390. /*
  391. * caller has to release the gd_bh properly.
  392. */
  393. return ret;
  394. }
  395. /*
  396. * XXX: helper to validate and adjust moving goal.
  397. */
  398. static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
  399. struct ocfs2_move_extents *range)
  400. {
  401. int ret, goal_bit = 0;
  402. struct buffer_head *gd_bh = NULL;
  403. struct ocfs2_group_desc *bg;
  404. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  405. int c_to_b = 1 << (osb->s_clustersize_bits -
  406. inode->i_sb->s_blocksize_bits);
  407. /*
  408. * make goal become cluster aligned.
  409. */
  410. range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
  411. range->me_goal);
  412. /*
  413. * validate goal sits within global_bitmap, and return the victim
  414. * group desc
  415. */
  416. ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
  417. GLOBAL_BITMAP_SYSTEM_INODE,
  418. OCFS2_INVALID_SLOT,
  419. &goal_bit, &gd_bh);
  420. if (ret)
  421. goto out;
  422. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  423. /*
  424. * moving goal is not allowd to start with a group desc blok(#0 blk)
  425. * let's compromise to the latter cluster.
  426. */
  427. if (range->me_goal == le64_to_cpu(bg->bg_blkno))
  428. range->me_goal += c_to_b;
  429. /*
  430. * movement is not gonna cross two groups.
  431. */
  432. if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
  433. range->me_len) {
  434. ret = -EINVAL;
  435. goto out;
  436. }
  437. /*
  438. * more exact validations/adjustments will be performed later during
  439. * moving operation for each extent range.
  440. */
  441. mlog(0, "extents get ready to be moved to #%llu block\n",
  442. range->me_goal);
  443. out:
  444. brelse(gd_bh);
  445. return ret;
  446. }
  447. static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
  448. int *goal_bit, u32 move_len, u32 max_hop,
  449. u32 *phys_cpos)
  450. {
  451. int i, used, last_free_bits = 0, base_bit = *goal_bit;
  452. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  453. u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  454. le64_to_cpu(gd->bg_blkno));
  455. for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
  456. used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
  457. if (used) {
  458. /*
  459. * we even tried searching the free chunk by jumping
  460. * a 'max_hop' distance, but still failed.
  461. */
  462. if ((i - base_bit) > max_hop) {
  463. *phys_cpos = 0;
  464. break;
  465. }
  466. if (last_free_bits)
  467. last_free_bits = 0;
  468. continue;
  469. } else
  470. last_free_bits++;
  471. if (last_free_bits == move_len) {
  472. *goal_bit = i;
  473. *phys_cpos = base_cpos + i;
  474. break;
  475. }
  476. }
  477. mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
  478. }
  479. static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
  480. u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
  481. u32 len, int ext_flags)
  482. {
  483. int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
  484. handle_t *handle;
  485. struct inode *inode = context->inode;
  486. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  487. struct inode *tl_inode = osb->osb_tl_inode;
  488. struct inode *gb_inode = NULL;
  489. struct buffer_head *gb_bh = NULL;
  490. struct buffer_head *gd_bh = NULL;
  491. struct ocfs2_group_desc *gd;
  492. struct ocfs2_refcount_tree *ref_tree = NULL;
  493. u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
  494. context->range->me_threshold);
  495. u64 phys_blkno, new_phys_blkno;
  496. phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  497. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
  498. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  499. OCFS2_HAS_REFCOUNT_FL));
  500. BUG_ON(!context->refcount_loc);
  501. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  502. &ref_tree, NULL);
  503. if (ret) {
  504. mlog_errno(ret);
  505. return ret;
  506. }
  507. ret = ocfs2_prepare_refcount_change_for_del(inode,
  508. context->refcount_loc,
  509. phys_blkno,
  510. len,
  511. &credits,
  512. &extra_blocks);
  513. if (ret) {
  514. mlog_errno(ret);
  515. goto out;
  516. }
  517. }
  518. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
  519. &context->meta_ac,
  520. NULL, extra_blocks, &credits);
  521. if (ret) {
  522. mlog_errno(ret);
  523. goto out;
  524. }
  525. /*
  526. * need to count 2 extra credits for global_bitmap inode and
  527. * group descriptor.
  528. */
  529. credits += OCFS2_INODE_UPDATE_CREDITS + 1;
  530. /*
  531. * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
  532. * logic, while we still need to lock the global_bitmap.
  533. */
  534. gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
  535. OCFS2_INVALID_SLOT);
  536. if (!gb_inode) {
  537. mlog(ML_ERROR, "unable to get global_bitmap inode\n");
  538. ret = -EIO;
  539. goto out;
  540. }
  541. inode_lock(gb_inode);
  542. ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
  543. if (ret) {
  544. mlog_errno(ret);
  545. goto out_unlock_gb_mutex;
  546. }
  547. inode_lock(tl_inode);
  548. handle = ocfs2_start_trans(osb, credits);
  549. if (IS_ERR(handle)) {
  550. ret = PTR_ERR(handle);
  551. mlog_errno(ret);
  552. goto out_unlock_tl_inode;
  553. }
  554. new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
  555. ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
  556. GLOBAL_BITMAP_SYSTEM_INODE,
  557. OCFS2_INVALID_SLOT,
  558. &goal_bit, &gd_bh);
  559. if (ret) {
  560. mlog_errno(ret);
  561. goto out_commit;
  562. }
  563. /*
  564. * probe the victim cluster group to find a proper
  565. * region to fit wanted movement, it even will perfrom
  566. * a best-effort attempt by compromising to a threshold
  567. * around the goal.
  568. */
  569. ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
  570. new_phys_cpos);
  571. if (!*new_phys_cpos) {
  572. ret = -ENOSPC;
  573. goto out_commit;
  574. }
  575. ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
  576. *new_phys_cpos, ext_flags);
  577. if (ret) {
  578. mlog_errno(ret);
  579. goto out_commit;
  580. }
  581. gd = (struct ocfs2_group_desc *)gd_bh->b_data;
  582. ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
  583. le16_to_cpu(gd->bg_chain));
  584. if (ret) {
  585. mlog_errno(ret);
  586. goto out_commit;
  587. }
  588. ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
  589. goal_bit, len);
  590. if (ret) {
  591. ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
  592. le16_to_cpu(gd->bg_chain));
  593. mlog_errno(ret);
  594. }
  595. /*
  596. * Here we should write the new page out first if we are
  597. * in write-back mode.
  598. */
  599. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
  600. if (ret)
  601. mlog_errno(ret);
  602. out_commit:
  603. ocfs2_commit_trans(osb, handle);
  604. brelse(gd_bh);
  605. out_unlock_tl_inode:
  606. inode_unlock(tl_inode);
  607. ocfs2_inode_unlock(gb_inode, 1);
  608. out_unlock_gb_mutex:
  609. inode_unlock(gb_inode);
  610. brelse(gb_bh);
  611. iput(gb_inode);
  612. out:
  613. if (context->meta_ac) {
  614. ocfs2_free_alloc_context(context->meta_ac);
  615. context->meta_ac = NULL;
  616. }
  617. if (ref_tree)
  618. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  619. return ret;
  620. }
  621. /*
  622. * Helper to calculate the defraging length in one run according to threshold.
  623. */
  624. static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
  625. u32 threshold, int *skip)
  626. {
  627. if ((*alloc_size + *len_defraged) < threshold) {
  628. /*
  629. * proceed defragmentation until we meet the thresh
  630. */
  631. *len_defraged += *alloc_size;
  632. } else if (*len_defraged == 0) {
  633. /*
  634. * XXX: skip a large extent.
  635. */
  636. *skip = 1;
  637. } else {
  638. /*
  639. * split this extent to coalesce with former pieces as
  640. * to reach the threshold.
  641. *
  642. * we're done here with one cycle of defragmentation
  643. * in a size of 'thresh', resetting 'len_defraged'
  644. * forces a new defragmentation.
  645. */
  646. *alloc_size = threshold - *len_defraged;
  647. *len_defraged = 0;
  648. }
  649. }
  650. static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
  651. struct ocfs2_move_extents_context *context)
  652. {
  653. int ret = 0, flags, do_defrag, skip = 0;
  654. u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
  655. u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
  656. struct inode *inode = context->inode;
  657. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  658. struct ocfs2_move_extents *range = context->range;
  659. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  660. if ((i_size_read(inode) == 0) || (range->me_len == 0))
  661. return 0;
  662. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  663. return 0;
  664. context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
  665. ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
  666. ocfs2_init_dealloc_ctxt(&context->dealloc);
  667. /*
  668. * TO-DO XXX:
  669. *
  670. * - xattr extents.
  671. */
  672. do_defrag = context->auto_defrag;
  673. /*
  674. * extents moving happens in unit of clusters, for the sake
  675. * of simplicity, we may ignore two clusters where 'byte_start'
  676. * and 'byte_start + len' were within.
  677. */
  678. move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
  679. len_to_move = (range->me_start + range->me_len) >>
  680. osb->s_clustersize_bits;
  681. if (len_to_move >= move_start)
  682. len_to_move -= move_start;
  683. else
  684. len_to_move = 0;
  685. if (do_defrag) {
  686. defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
  687. if (defrag_thresh <= 1)
  688. goto done;
  689. } else
  690. new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  691. range->me_goal);
  692. mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
  693. "thresh: %u\n",
  694. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  695. (unsigned long long)range->me_start,
  696. (unsigned long long)range->me_len,
  697. move_start, len_to_move, defrag_thresh);
  698. cpos = move_start;
  699. while (len_to_move) {
  700. ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
  701. &flags);
  702. if (ret) {
  703. mlog_errno(ret);
  704. goto out;
  705. }
  706. if (alloc_size > len_to_move)
  707. alloc_size = len_to_move;
  708. /*
  709. * XXX: how to deal with a hole:
  710. *
  711. * - skip the hole of course
  712. * - force a new defragmentation
  713. */
  714. if (!phys_cpos) {
  715. if (do_defrag)
  716. len_defraged = 0;
  717. goto next;
  718. }
  719. if (do_defrag) {
  720. ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
  721. defrag_thresh, &skip);
  722. /*
  723. * skip large extents
  724. */
  725. if (skip) {
  726. skip = 0;
  727. goto next;
  728. }
  729. mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
  730. "alloc_size: %u, len_defraged: %u\n",
  731. cpos, phys_cpos, alloc_size, len_defraged);
  732. ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
  733. &alloc_size, flags);
  734. } else {
  735. ret = ocfs2_move_extent(context, cpos, phys_cpos,
  736. &new_phys_cpos, alloc_size,
  737. flags);
  738. new_phys_cpos += alloc_size;
  739. }
  740. if (ret < 0) {
  741. mlog_errno(ret);
  742. goto out;
  743. }
  744. context->clusters_moved += alloc_size;
  745. next:
  746. cpos += alloc_size;
  747. len_to_move -= alloc_size;
  748. }
  749. done:
  750. range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
  751. out:
  752. range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
  753. context->clusters_moved);
  754. range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
  755. context->new_phys_cpos);
  756. ocfs2_schedule_truncate_log_flush(osb, 1);
  757. ocfs2_run_deallocs(osb, &context->dealloc);
  758. return ret;
  759. }
  760. static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
  761. {
  762. int status;
  763. handle_t *handle;
  764. struct inode *inode = context->inode;
  765. struct ocfs2_dinode *di;
  766. struct buffer_head *di_bh = NULL;
  767. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  768. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  769. return -EROFS;
  770. inode_lock(inode);
  771. /*
  772. * This prevents concurrent writes from other nodes
  773. */
  774. status = ocfs2_rw_lock(inode, 1);
  775. if (status) {
  776. mlog_errno(status);
  777. goto out;
  778. }
  779. status = ocfs2_inode_lock(inode, &di_bh, 1);
  780. if (status) {
  781. mlog_errno(status);
  782. goto out_rw_unlock;
  783. }
  784. /*
  785. * rememer ip_xattr_sem also needs to be held if necessary
  786. */
  787. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  788. status = __ocfs2_move_extents_range(di_bh, context);
  789. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  790. if (status) {
  791. mlog_errno(status);
  792. goto out_inode_unlock;
  793. }
  794. /*
  795. * We update ctime for these changes
  796. */
  797. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  798. if (IS_ERR(handle)) {
  799. status = PTR_ERR(handle);
  800. mlog_errno(status);
  801. goto out_inode_unlock;
  802. }
  803. status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  804. OCFS2_JOURNAL_ACCESS_WRITE);
  805. if (status) {
  806. mlog_errno(status);
  807. goto out_commit;
  808. }
  809. di = (struct ocfs2_dinode *)di_bh->b_data;
  810. inode->i_ctime = current_time(inode);
  811. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  812. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  813. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  814. ocfs2_journal_dirty(handle, di_bh);
  815. out_commit:
  816. ocfs2_commit_trans(osb, handle);
  817. out_inode_unlock:
  818. brelse(di_bh);
  819. ocfs2_inode_unlock(inode, 1);
  820. out_rw_unlock:
  821. ocfs2_rw_unlock(inode, 1);
  822. out:
  823. inode_unlock(inode);
  824. return status;
  825. }
  826. int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
  827. {
  828. int status;
  829. struct inode *inode = file_inode(filp);
  830. struct ocfs2_move_extents range;
  831. struct ocfs2_move_extents_context *context;
  832. if (!argp)
  833. return -EINVAL;
  834. status = mnt_want_write_file(filp);
  835. if (status)
  836. return status;
  837. if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
  838. status = -EPERM;
  839. goto out_drop;
  840. }
  841. if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
  842. status = -EPERM;
  843. goto out_drop;
  844. }
  845. context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
  846. if (!context) {
  847. status = -ENOMEM;
  848. mlog_errno(status);
  849. goto out_drop;
  850. }
  851. context->inode = inode;
  852. context->file = filp;
  853. if (copy_from_user(&range, argp, sizeof(range))) {
  854. status = -EFAULT;
  855. goto out_free;
  856. }
  857. if (range.me_start > i_size_read(inode)) {
  858. status = -EINVAL;
  859. goto out_free;
  860. }
  861. if (range.me_start + range.me_len > i_size_read(inode))
  862. range.me_len = i_size_read(inode) - range.me_start;
  863. context->range = &range;
  864. if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
  865. context->auto_defrag = 1;
  866. /*
  867. * ok, the default theshold for the defragmentation
  868. * is 1M, since our maximum clustersize was 1M also.
  869. * any thought?
  870. */
  871. if (!range.me_threshold)
  872. range.me_threshold = 1024 * 1024;
  873. if (range.me_threshold > i_size_read(inode))
  874. range.me_threshold = i_size_read(inode);
  875. if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
  876. context->partial = 1;
  877. } else {
  878. /*
  879. * first best-effort attempt to validate and adjust the goal
  880. * (physical address in block), while it can't guarantee later
  881. * operation can succeed all the time since global_bitmap may
  882. * change a bit over time.
  883. */
  884. status = ocfs2_validate_and_adjust_move_goal(inode, &range);
  885. if (status)
  886. goto out_copy;
  887. }
  888. status = ocfs2_move_extents(context);
  889. if (status)
  890. mlog_errno(status);
  891. out_copy:
  892. /*
  893. * movement/defragmentation may end up being partially completed,
  894. * that's the reason why we need to return userspace the finished
  895. * length and new_offset even if failure happens somewhere.
  896. */
  897. if (copy_to_user(argp, &range, sizeof(range)))
  898. status = -EFAULT;
  899. out_free:
  900. kfree(context);
  901. out_drop:
  902. mnt_drop_write_file(filp);
  903. return status;
  904. }