move_extents.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * move_extents.c
  5. *
  6. * Copyright (C) 2011 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/fs.h>
  18. #include <linux/types.h>
  19. #include <linux/mount.h>
  20. #include <linux/swap.h>
  21. #include <cluster/masklog.h>
  22. #include "ocfs2.h"
  23. #include "ocfs2_ioctl.h"
  24. #include "alloc.h"
  25. #include "aops.h"
  26. #include "dlmglue.h"
  27. #include "extent_map.h"
  28. #include "inode.h"
  29. #include "journal.h"
  30. #include "suballoc.h"
  31. #include "uptodate.h"
  32. #include "super.h"
  33. #include "dir.h"
  34. #include "buffer_head_io.h"
  35. #include "sysfile.h"
  36. #include "suballoc.h"
  37. #include "refcounttree.h"
  38. #include "move_extents.h"
  39. struct ocfs2_move_extents_context {
  40. struct inode *inode;
  41. struct file *file;
  42. int auto_defrag;
  43. int partial;
  44. int credits;
  45. u32 new_phys_cpos;
  46. u32 clusters_moved;
  47. u64 refcount_loc;
  48. struct ocfs2_move_extents *range;
  49. struct ocfs2_extent_tree et;
  50. struct ocfs2_alloc_context *meta_ac;
  51. struct ocfs2_alloc_context *data_ac;
  52. struct ocfs2_cached_dealloc_ctxt dealloc;
  53. };
  54. static int __ocfs2_move_extent(handle_t *handle,
  55. struct ocfs2_move_extents_context *context,
  56. u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
  57. int ext_flags)
  58. {
  59. int ret = 0, index;
  60. struct inode *inode = context->inode;
  61. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  62. struct ocfs2_extent_rec *rec, replace_rec;
  63. struct ocfs2_path *path = NULL;
  64. struct ocfs2_extent_list *el;
  65. u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
  66. u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
  67. ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
  68. p_cpos, new_p_cpos, len);
  69. if (ret) {
  70. mlog_errno(ret);
  71. goto out;
  72. }
  73. memset(&replace_rec, 0, sizeof(replace_rec));
  74. replace_rec.e_cpos = cpu_to_le32(cpos);
  75. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  76. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  77. new_p_cpos));
  78. path = ocfs2_new_path_from_et(&context->et);
  79. if (!path) {
  80. ret = -ENOMEM;
  81. mlog_errno(ret);
  82. goto out;
  83. }
  84. ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
  85. if (ret) {
  86. mlog_errno(ret);
  87. goto out;
  88. }
  89. el = path_leaf_el(path);
  90. index = ocfs2_search_extent_list(el, cpos);
  91. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  92. ocfs2_error(inode->i_sb,
  93. "Inode %llu has an extent at cpos %u which can no "
  94. "longer be found.\n",
  95. (unsigned long long)ino, cpos);
  96. ret = -EROFS;
  97. goto out;
  98. }
  99. rec = &el->l_recs[index];
  100. BUG_ON(ext_flags != rec->e_flags);
  101. /*
  102. * after moving/defraging to new location, the extent is not going
  103. * to be refcounted anymore.
  104. */
  105. replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
  106. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
  107. context->et.et_root_bh,
  108. OCFS2_JOURNAL_ACCESS_WRITE);
  109. if (ret) {
  110. mlog_errno(ret);
  111. goto out;
  112. }
  113. ret = ocfs2_split_extent(handle, &context->et, path, index,
  114. &replace_rec, context->meta_ac,
  115. &context->dealloc);
  116. if (ret) {
  117. mlog_errno(ret);
  118. goto out;
  119. }
  120. ocfs2_journal_dirty(handle, context->et.et_root_bh);
  121. context->new_phys_cpos = new_p_cpos;
  122. /*
  123. * need I to append truncate log for old clusters?
  124. */
  125. if (old_blkno) {
  126. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  127. ret = ocfs2_decrease_refcount(inode, handle,
  128. ocfs2_blocks_to_clusters(osb->sb,
  129. old_blkno),
  130. len, context->meta_ac,
  131. &context->dealloc, 1);
  132. else
  133. ret = ocfs2_truncate_log_append(osb, handle,
  134. old_blkno, len);
  135. }
  136. out:
  137. return ret;
  138. }
  139. /*
  140. * lock allocators, and reserving appropriate number of bits for
  141. * meta blocks and data clusters.
  142. *
  143. * in some cases, we don't need to reserve clusters, just let data_ac
  144. * be NULL.
  145. */
  146. static int ocfs2_lock_allocators_move_extents(struct inode *inode,
  147. struct ocfs2_extent_tree *et,
  148. u32 clusters_to_move,
  149. u32 extents_to_split,
  150. struct ocfs2_alloc_context **meta_ac,
  151. struct ocfs2_alloc_context **data_ac,
  152. int extra_blocks,
  153. int *credits)
  154. {
  155. int ret, num_free_extents;
  156. unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
  157. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  158. num_free_extents = ocfs2_num_free_extents(osb, et);
  159. if (num_free_extents < 0) {
  160. ret = num_free_extents;
  161. mlog_errno(ret);
  162. goto out;
  163. }
  164. if (!num_free_extents ||
  165. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
  166. extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
  167. ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
  168. if (ret) {
  169. mlog_errno(ret);
  170. goto out;
  171. }
  172. if (data_ac) {
  173. ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
  174. if (ret) {
  175. mlog_errno(ret);
  176. goto out;
  177. }
  178. }
  179. *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el,
  180. clusters_to_move + 2);
  181. mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
  182. extra_blocks, clusters_to_move, *credits);
  183. out:
  184. if (ret) {
  185. if (*meta_ac) {
  186. ocfs2_free_alloc_context(*meta_ac);
  187. *meta_ac = NULL;
  188. }
  189. }
  190. return ret;
  191. }
  192. /*
  193. * Using one journal handle to guarantee the data consistency in case
  194. * crash happens anywhere.
  195. *
  196. * XXX: defrag can end up with finishing partial extent as requested,
  197. * due to not enough contiguous clusters can be found in allocator.
  198. */
  199. static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
  200. u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
  201. {
  202. int ret, credits = 0, extra_blocks = 0, partial = context->partial;
  203. handle_t *handle;
  204. struct inode *inode = context->inode;
  205. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  206. struct inode *tl_inode = osb->osb_tl_inode;
  207. struct ocfs2_refcount_tree *ref_tree = NULL;
  208. u32 new_phys_cpos, new_len;
  209. u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  210. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
  211. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  212. OCFS2_HAS_REFCOUNT_FL));
  213. BUG_ON(!context->refcount_loc);
  214. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  215. &ref_tree, NULL);
  216. if (ret) {
  217. mlog_errno(ret);
  218. return ret;
  219. }
  220. ret = ocfs2_prepare_refcount_change_for_del(inode,
  221. context->refcount_loc,
  222. phys_blkno,
  223. *len,
  224. &credits,
  225. &extra_blocks);
  226. if (ret) {
  227. mlog_errno(ret);
  228. goto out;
  229. }
  230. }
  231. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
  232. &context->meta_ac,
  233. &context->data_ac,
  234. extra_blocks, &credits);
  235. if (ret) {
  236. mlog_errno(ret);
  237. goto out;
  238. }
  239. /*
  240. * should be using allocation reservation strategy there?
  241. *
  242. * if (context->data_ac)
  243. * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
  244. */
  245. mutex_lock(&tl_inode->i_mutex);
  246. if (ocfs2_truncate_log_needs_flush(osb)) {
  247. ret = __ocfs2_flush_truncate_log(osb);
  248. if (ret < 0) {
  249. mlog_errno(ret);
  250. goto out_unlock_mutex;
  251. }
  252. }
  253. handle = ocfs2_start_trans(osb, credits);
  254. if (IS_ERR(handle)) {
  255. ret = PTR_ERR(handle);
  256. mlog_errno(ret);
  257. goto out_unlock_mutex;
  258. }
  259. ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
  260. &new_phys_cpos, &new_len);
  261. if (ret) {
  262. mlog_errno(ret);
  263. goto out_commit;
  264. }
  265. /*
  266. * allowing partial extent moving is kind of 'pros and cons', it makes
  267. * whole defragmentation less likely to fail, on the contrary, the bad
  268. * thing is it may make the fs even more fragmented after moving, let
  269. * userspace make a good decision here.
  270. */
  271. if (new_len != *len) {
  272. mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
  273. if (!partial) {
  274. context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
  275. ret = -ENOSPC;
  276. goto out_commit;
  277. }
  278. }
  279. mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
  280. phys_cpos, new_phys_cpos);
  281. ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
  282. new_phys_cpos, ext_flags);
  283. if (ret)
  284. mlog_errno(ret);
  285. if (partial && (new_len != *len))
  286. *len = new_len;
  287. /*
  288. * Here we should write the new page out first if we are
  289. * in write-back mode.
  290. */
  291. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
  292. if (ret)
  293. mlog_errno(ret);
  294. out_commit:
  295. ocfs2_commit_trans(osb, handle);
  296. out_unlock_mutex:
  297. mutex_unlock(&tl_inode->i_mutex);
  298. if (context->data_ac) {
  299. ocfs2_free_alloc_context(context->data_ac);
  300. context->data_ac = NULL;
  301. }
  302. if (context->meta_ac) {
  303. ocfs2_free_alloc_context(context->meta_ac);
  304. context->meta_ac = NULL;
  305. }
  306. out:
  307. if (ref_tree)
  308. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  309. return ret;
  310. }
  311. /*
  312. * find the victim alloc group, where #blkno fits.
  313. */
  314. static int ocfs2_find_victim_alloc_group(struct inode *inode,
  315. u64 vict_blkno,
  316. int type, int slot,
  317. int *vict_bit,
  318. struct buffer_head **ret_bh)
  319. {
  320. int ret, i, bits_per_unit = 0;
  321. u64 blkno;
  322. char namebuf[40];
  323. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  324. struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
  325. struct ocfs2_chain_list *cl;
  326. struct ocfs2_chain_rec *rec;
  327. struct ocfs2_dinode *ac_dinode;
  328. struct ocfs2_group_desc *bg;
  329. ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
  330. ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
  331. strlen(namebuf), &blkno);
  332. if (ret) {
  333. ret = -ENOENT;
  334. goto out;
  335. }
  336. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
  337. if (ret) {
  338. mlog_errno(ret);
  339. goto out;
  340. }
  341. ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
  342. cl = &(ac_dinode->id2.i_chain);
  343. rec = &(cl->cl_recs[0]);
  344. if (type == GLOBAL_BITMAP_SYSTEM_INODE)
  345. bits_per_unit = osb->s_clustersize_bits -
  346. inode->i_sb->s_blocksize_bits;
  347. /*
  348. * 'vict_blkno' was out of the valid range.
  349. */
  350. if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
  351. (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
  352. bits_per_unit))) {
  353. ret = -EINVAL;
  354. goto out;
  355. }
  356. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
  357. rec = &(cl->cl_recs[i]);
  358. if (!rec)
  359. continue;
  360. bg = NULL;
  361. do {
  362. if (!bg)
  363. blkno = le64_to_cpu(rec->c_blkno);
  364. else
  365. blkno = le64_to_cpu(bg->bg_next_group);
  366. if (gd_bh) {
  367. brelse(gd_bh);
  368. gd_bh = NULL;
  369. }
  370. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
  371. if (ret) {
  372. mlog_errno(ret);
  373. goto out;
  374. }
  375. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  376. if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
  377. le16_to_cpu(bg->bg_bits))) {
  378. *ret_bh = gd_bh;
  379. *vict_bit = (vict_blkno - blkno) >>
  380. bits_per_unit;
  381. mlog(0, "find the victim group: #%llu, "
  382. "total_bits: %u, vict_bit: %u\n",
  383. blkno, le16_to_cpu(bg->bg_bits),
  384. *vict_bit);
  385. goto out;
  386. }
  387. } while (le64_to_cpu(bg->bg_next_group));
  388. }
  389. ret = -EINVAL;
  390. out:
  391. brelse(ac_bh);
  392. /*
  393. * caller has to release the gd_bh properly.
  394. */
  395. return ret;
  396. }
  397. /*
  398. * XXX: helper to validate and adjust moving goal.
  399. */
  400. static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
  401. struct ocfs2_move_extents *range)
  402. {
  403. int ret, goal_bit = 0;
  404. struct buffer_head *gd_bh = NULL;
  405. struct ocfs2_group_desc *bg = NULL;
  406. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  407. int c_to_b = 1 << (osb->s_clustersize_bits -
  408. inode->i_sb->s_blocksize_bits);
  409. /*
  410. * make goal become cluster aligned.
  411. */
  412. range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
  413. range->me_goal);
  414. /*
  415. * moving goal is not allowd to start with a group desc blok(#0 blk)
  416. * let's compromise to the latter cluster.
  417. */
  418. if (range->me_goal == le64_to_cpu(bg->bg_blkno))
  419. range->me_goal += c_to_b;
  420. /*
  421. * validate goal sits within global_bitmap, and return the victim
  422. * group desc
  423. */
  424. ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
  425. GLOBAL_BITMAP_SYSTEM_INODE,
  426. OCFS2_INVALID_SLOT,
  427. &goal_bit, &gd_bh);
  428. if (ret)
  429. goto out;
  430. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  431. /*
  432. * movement is not gonna cross two groups.
  433. */
  434. if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
  435. range->me_len) {
  436. ret = -EINVAL;
  437. goto out;
  438. }
  439. /*
  440. * more exact validations/adjustments will be performed later during
  441. * moving operation for each extent range.
  442. */
  443. mlog(0, "extents get ready to be moved to #%llu block\n",
  444. range->me_goal);
  445. out:
  446. brelse(gd_bh);
  447. return ret;
  448. }
  449. static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
  450. int *goal_bit, u32 move_len, u32 max_hop,
  451. u32 *phys_cpos)
  452. {
  453. int i, used, last_free_bits = 0, base_bit = *goal_bit;
  454. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  455. u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  456. le64_to_cpu(gd->bg_blkno));
  457. for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
  458. used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
  459. if (used) {
  460. /*
  461. * we even tried searching the free chunk by jumping
  462. * a 'max_hop' distance, but still failed.
  463. */
  464. if ((i - base_bit) > max_hop) {
  465. *phys_cpos = 0;
  466. break;
  467. }
  468. if (last_free_bits)
  469. last_free_bits = 0;
  470. continue;
  471. } else
  472. last_free_bits++;
  473. if (last_free_bits == move_len) {
  474. *goal_bit = i;
  475. *phys_cpos = base_cpos + i;
  476. break;
  477. }
  478. }
  479. mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
  480. }
  481. static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
  482. handle_t *handle,
  483. struct buffer_head *di_bh,
  484. u32 num_bits,
  485. u16 chain)
  486. {
  487. int ret;
  488. u32 tmp_used;
  489. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  490. struct ocfs2_chain_list *cl =
  491. (struct ocfs2_chain_list *) &di->id2.i_chain;
  492. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  493. OCFS2_JOURNAL_ACCESS_WRITE);
  494. if (ret < 0) {
  495. mlog_errno(ret);
  496. goto out;
  497. }
  498. tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
  499. di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
  500. le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
  501. ocfs2_journal_dirty(handle, di_bh);
  502. out:
  503. return ret;
  504. }
  505. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  506. struct inode *alloc_inode,
  507. struct ocfs2_group_desc *bg,
  508. struct buffer_head *group_bh,
  509. unsigned int bit_off,
  510. unsigned int num_bits)
  511. {
  512. int status;
  513. void *bitmap = bg->bg_bitmap;
  514. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  515. /* All callers get the descriptor via
  516. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  517. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  518. BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
  519. mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
  520. num_bits);
  521. if (ocfs2_is_cluster_bitmap(alloc_inode))
  522. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  523. status = ocfs2_journal_access_gd(handle,
  524. INODE_CACHE(alloc_inode),
  525. group_bh,
  526. journal_type);
  527. if (status < 0) {
  528. mlog_errno(status);
  529. goto bail;
  530. }
  531. le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
  532. if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
  533. ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
  534. " count %u but claims %u are freed. num_bits %d",
  535. (unsigned long long)le64_to_cpu(bg->bg_blkno),
  536. le16_to_cpu(bg->bg_bits),
  537. le16_to_cpu(bg->bg_free_bits_count), num_bits);
  538. return -EROFS;
  539. }
  540. while (num_bits--)
  541. ocfs2_set_bit(bit_off++, bitmap);
  542. ocfs2_journal_dirty(handle, group_bh);
  543. bail:
  544. return status;
  545. }
  546. static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
  547. u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
  548. u32 len, int ext_flags)
  549. {
  550. int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
  551. handle_t *handle;
  552. struct inode *inode = context->inode;
  553. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  554. struct inode *tl_inode = osb->osb_tl_inode;
  555. struct inode *gb_inode = NULL;
  556. struct buffer_head *gb_bh = NULL;
  557. struct buffer_head *gd_bh = NULL;
  558. struct ocfs2_group_desc *gd;
  559. struct ocfs2_refcount_tree *ref_tree = NULL;
  560. u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
  561. context->range->me_threshold);
  562. u64 phys_blkno, new_phys_blkno;
  563. phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  564. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
  565. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  566. OCFS2_HAS_REFCOUNT_FL));
  567. BUG_ON(!context->refcount_loc);
  568. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  569. &ref_tree, NULL);
  570. if (ret) {
  571. mlog_errno(ret);
  572. return ret;
  573. }
  574. ret = ocfs2_prepare_refcount_change_for_del(inode,
  575. context->refcount_loc,
  576. phys_blkno,
  577. len,
  578. &credits,
  579. &extra_blocks);
  580. if (ret) {
  581. mlog_errno(ret);
  582. goto out;
  583. }
  584. }
  585. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
  586. &context->meta_ac,
  587. NULL, extra_blocks, &credits);
  588. if (ret) {
  589. mlog_errno(ret);
  590. goto out;
  591. }
  592. /*
  593. * need to count 2 extra credits for global_bitmap inode and
  594. * group descriptor.
  595. */
  596. credits += OCFS2_INODE_UPDATE_CREDITS + 1;
  597. /*
  598. * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
  599. * logic, while we still need to lock the global_bitmap.
  600. */
  601. gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
  602. OCFS2_INVALID_SLOT);
  603. if (!gb_inode) {
  604. mlog(ML_ERROR, "unable to get global_bitmap inode\n");
  605. ret = -EIO;
  606. goto out;
  607. }
  608. mutex_lock(&gb_inode->i_mutex);
  609. ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
  610. if (ret) {
  611. mlog_errno(ret);
  612. goto out_unlock_gb_mutex;
  613. }
  614. mutex_lock(&tl_inode->i_mutex);
  615. handle = ocfs2_start_trans(osb, credits);
  616. if (IS_ERR(handle)) {
  617. ret = PTR_ERR(handle);
  618. mlog_errno(ret);
  619. goto out_unlock_tl_inode;
  620. }
  621. new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
  622. ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
  623. GLOBAL_BITMAP_SYSTEM_INODE,
  624. OCFS2_INVALID_SLOT,
  625. &goal_bit, &gd_bh);
  626. if (ret) {
  627. mlog_errno(ret);
  628. goto out_commit;
  629. }
  630. /*
  631. * probe the victim cluster group to find a proper
  632. * region to fit wanted movement, it even will perfrom
  633. * a best-effort attempt by compromising to a threshold
  634. * around the goal.
  635. */
  636. ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
  637. new_phys_cpos);
  638. if (!new_phys_cpos) {
  639. ret = -ENOSPC;
  640. goto out_commit;
  641. }
  642. ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
  643. *new_phys_cpos, ext_flags);
  644. if (ret) {
  645. mlog_errno(ret);
  646. goto out_commit;
  647. }
  648. gd = (struct ocfs2_group_desc *)gd_bh->b_data;
  649. ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
  650. le16_to_cpu(gd->bg_chain));
  651. if (ret) {
  652. mlog_errno(ret);
  653. goto out_commit;
  654. }
  655. ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
  656. goal_bit, len);
  657. if (ret)
  658. mlog_errno(ret);
  659. /*
  660. * Here we should write the new page out first if we are
  661. * in write-back mode.
  662. */
  663. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
  664. if (ret)
  665. mlog_errno(ret);
  666. out_commit:
  667. ocfs2_commit_trans(osb, handle);
  668. brelse(gd_bh);
  669. out_unlock_tl_inode:
  670. mutex_unlock(&tl_inode->i_mutex);
  671. ocfs2_inode_unlock(gb_inode, 1);
  672. out_unlock_gb_mutex:
  673. mutex_unlock(&gb_inode->i_mutex);
  674. brelse(gb_bh);
  675. iput(gb_inode);
  676. out:
  677. if (context->meta_ac) {
  678. ocfs2_free_alloc_context(context->meta_ac);
  679. context->meta_ac = NULL;
  680. }
  681. if (ref_tree)
  682. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  683. return ret;
  684. }
  685. /*
  686. * Helper to calculate the defraging length in one run according to threshold.
  687. */
  688. static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
  689. u32 threshold, int *skip)
  690. {
  691. if ((*alloc_size + *len_defraged) < threshold) {
  692. /*
  693. * proceed defragmentation until we meet the thresh
  694. */
  695. *len_defraged += *alloc_size;
  696. } else if (*len_defraged == 0) {
  697. /*
  698. * XXX: skip a large extent.
  699. */
  700. *skip = 1;
  701. } else {
  702. /*
  703. * split this extent to coalesce with former pieces as
  704. * to reach the threshold.
  705. *
  706. * we're done here with one cycle of defragmentation
  707. * in a size of 'thresh', resetting 'len_defraged'
  708. * forces a new defragmentation.
  709. */
  710. *alloc_size = threshold - *len_defraged;
  711. *len_defraged = 0;
  712. }
  713. }
  714. static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
  715. struct ocfs2_move_extents_context *context)
  716. {
  717. int ret = 0, flags, do_defrag, skip = 0;
  718. u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
  719. u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
  720. struct inode *inode = context->inode;
  721. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  722. struct ocfs2_move_extents *range = context->range;
  723. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  724. if ((inode->i_size == 0) || (range->me_len == 0))
  725. return 0;
  726. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  727. return 0;
  728. context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
  729. ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
  730. ocfs2_init_dealloc_ctxt(&context->dealloc);
  731. /*
  732. * TO-DO XXX:
  733. *
  734. * - xattr extents.
  735. */
  736. do_defrag = context->auto_defrag;
  737. /*
  738. * extents moving happens in unit of clusters, for the sake
  739. * of simplicity, we may ignore two clusters where 'byte_start'
  740. * and 'byte_start + len' were within.
  741. */
  742. move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
  743. len_to_move = (range->me_start + range->me_len) >>
  744. osb->s_clustersize_bits;
  745. if (len_to_move >= move_start)
  746. len_to_move -= move_start;
  747. else
  748. len_to_move = 0;
  749. if (do_defrag) {
  750. defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
  751. if (defrag_thresh <= 1)
  752. goto done;
  753. } else
  754. new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  755. range->me_goal);
  756. mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
  757. "thresh: %u\n",
  758. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  759. (unsigned long long)range->me_start,
  760. (unsigned long long)range->me_len,
  761. move_start, len_to_move, defrag_thresh);
  762. cpos = move_start;
  763. while (len_to_move) {
  764. ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
  765. &flags);
  766. if (ret) {
  767. mlog_errno(ret);
  768. goto out;
  769. }
  770. if (alloc_size > len_to_move)
  771. alloc_size = len_to_move;
  772. /*
  773. * XXX: how to deal with a hole:
  774. *
  775. * - skip the hole of course
  776. * - force a new defragmentation
  777. */
  778. if (!phys_cpos) {
  779. if (do_defrag)
  780. len_defraged = 0;
  781. goto next;
  782. }
  783. if (do_defrag) {
  784. ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
  785. defrag_thresh, &skip);
  786. /*
  787. * skip large extents
  788. */
  789. if (skip) {
  790. skip = 0;
  791. goto next;
  792. }
  793. mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
  794. "alloc_size: %u, len_defraged: %u\n",
  795. cpos, phys_cpos, alloc_size, len_defraged);
  796. ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
  797. &alloc_size, flags);
  798. } else {
  799. ret = ocfs2_move_extent(context, cpos, phys_cpos,
  800. &new_phys_cpos, alloc_size,
  801. flags);
  802. new_phys_cpos += alloc_size;
  803. }
  804. if (ret < 0) {
  805. mlog_errno(ret);
  806. goto out;
  807. }
  808. context->clusters_moved += alloc_size;
  809. next:
  810. cpos += alloc_size;
  811. len_to_move -= alloc_size;
  812. }
  813. done:
  814. range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
  815. out:
  816. range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
  817. context->clusters_moved);
  818. range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
  819. context->new_phys_cpos);
  820. ocfs2_schedule_truncate_log_flush(osb, 1);
  821. ocfs2_run_deallocs(osb, &context->dealloc);
  822. return ret;
  823. }
  824. static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
  825. {
  826. int status;
  827. handle_t *handle;
  828. struct inode *inode = context->inode;
  829. struct ocfs2_dinode *di;
  830. struct buffer_head *di_bh = NULL;
  831. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  832. if (!inode)
  833. return -ENOENT;
  834. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  835. return -EROFS;
  836. mutex_lock(&inode->i_mutex);
  837. /*
  838. * This prevents concurrent writes from other nodes
  839. */
  840. status = ocfs2_rw_lock(inode, 1);
  841. if (status) {
  842. mlog_errno(status);
  843. goto out;
  844. }
  845. status = ocfs2_inode_lock(inode, &di_bh, 1);
  846. if (status) {
  847. mlog_errno(status);
  848. goto out_rw_unlock;
  849. }
  850. /*
  851. * rememer ip_xattr_sem also needs to be held if necessary
  852. */
  853. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  854. status = __ocfs2_move_extents_range(di_bh, context);
  855. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  856. if (status) {
  857. mlog_errno(status);
  858. goto out_inode_unlock;
  859. }
  860. /*
  861. * We update ctime for these changes
  862. */
  863. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  864. if (IS_ERR(handle)) {
  865. status = PTR_ERR(handle);
  866. mlog_errno(status);
  867. goto out_inode_unlock;
  868. }
  869. status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  870. OCFS2_JOURNAL_ACCESS_WRITE);
  871. if (status) {
  872. mlog_errno(status);
  873. goto out_commit;
  874. }
  875. di = (struct ocfs2_dinode *)di_bh->b_data;
  876. inode->i_ctime = CURRENT_TIME;
  877. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  878. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  879. ocfs2_journal_dirty(handle, di_bh);
  880. out_commit:
  881. ocfs2_commit_trans(osb, handle);
  882. out_inode_unlock:
  883. brelse(di_bh);
  884. ocfs2_inode_unlock(inode, 1);
  885. out_rw_unlock:
  886. ocfs2_rw_unlock(inode, 1);
  887. out:
  888. mutex_unlock(&inode->i_mutex);
  889. return status;
  890. }
  891. int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
  892. {
  893. int status;
  894. struct inode *inode = filp->f_path.dentry->d_inode;
  895. struct ocfs2_move_extents range;
  896. struct ocfs2_move_extents_context *context = NULL;
  897. status = mnt_want_write(filp->f_path.mnt);
  898. if (status)
  899. return status;
  900. if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE))
  901. goto out;
  902. if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
  903. status = -EPERM;
  904. goto out;
  905. }
  906. context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
  907. if (!context) {
  908. status = -ENOMEM;
  909. mlog_errno(status);
  910. goto out;
  911. }
  912. context->inode = inode;
  913. context->file = filp;
  914. if (argp) {
  915. if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
  916. sizeof(range))) {
  917. status = -EFAULT;
  918. goto out;
  919. }
  920. } else {
  921. status = -EINVAL;
  922. goto out;
  923. }
  924. if (range.me_start > i_size_read(inode))
  925. goto out;
  926. if (range.me_start + range.me_len > i_size_read(inode))
  927. range.me_len = i_size_read(inode) - range.me_start;
  928. context->range = &range;
  929. if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
  930. context->auto_defrag = 1;
  931. /*
  932. * ok, the default theshold for the defragmentation
  933. * is 1M, since our maximum clustersize was 1M also.
  934. * any thought?
  935. */
  936. if (!range.me_threshold)
  937. range.me_threshold = 1024 * 1024;
  938. if (range.me_threshold > i_size_read(inode))
  939. range.me_threshold = i_size_read(inode);
  940. if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
  941. context->partial = 1;
  942. } else {
  943. /*
  944. * first best-effort attempt to validate and adjust the goal
  945. * (physical address in block), while it can't guarantee later
  946. * operation can succeed all the time since global_bitmap may
  947. * change a bit over time.
  948. */
  949. status = ocfs2_validate_and_adjust_move_goal(inode, &range);
  950. if (status)
  951. goto out;
  952. }
  953. status = ocfs2_move_extents(context);
  954. if (status)
  955. mlog_errno(status);
  956. out:
  957. /*
  958. * movement/defragmentation may end up being partially completed,
  959. * that's the reason why we need to return userspace the finished
  960. * length and new_offset even if failure happens somewhere.
  961. */
  962. if (argp) {
  963. if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
  964. sizeof(range)))
  965. status = -EFAULT;
  966. }
  967. kfree(context);
  968. mnt_drop_write(filp->f_path.mnt);
  969. return status;
  970. }