gc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /*
  2. * fs/f2fs/gc.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/module.h>
  13. #include <linux/backing-dev.h>
  14. #include <linux/init.h>
  15. #include <linux/f2fs_fs.h>
  16. #include <linux/kthread.h>
  17. #include <linux/delay.h>
  18. #include <linux/freezer.h>
  19. #include "f2fs.h"
  20. #include "node.h"
  21. #include "segment.h"
  22. #include "gc.h"
  23. #include <trace/events/f2fs.h>
  24. static int gc_thread_func(void *data)
  25. {
  26. struct f2fs_sb_info *sbi = data;
  27. struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  28. wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
  29. long wait_ms;
  30. wait_ms = gc_th->min_sleep_time;
  31. do {
  32. if (try_to_freeze())
  33. continue;
  34. else
  35. wait_event_interruptible_timeout(*wq,
  36. kthread_should_stop(),
  37. msecs_to_jiffies(wait_ms));
  38. if (kthread_should_stop())
  39. break;
  40. if (sbi->sb->s_frozen >= SB_FREEZE_WRITE) {
  41. increase_sleep_time(gc_th, &wait_ms);
  42. continue;
  43. }
  44. #ifdef CONFIG_F2FS_FAULT_INJECTION
  45. if (time_to_inject(sbi, FAULT_CHECKPOINT))
  46. f2fs_stop_checkpoint(sbi, false);
  47. #endif
  48. /*
  49. * [GC triggering condition]
  50. * 0. GC is not conducted currently.
  51. * 1. There are enough dirty segments.
  52. * 2. IO subsystem is idle by checking the # of writeback pages.
  53. * 3. IO subsystem is idle by checking the # of requests in
  54. * bdev's request list.
  55. *
  56. * Note) We have to avoid triggering GCs frequently.
  57. * Because it is possible that some segments can be
  58. * invalidated soon after by user update or deletion.
  59. * So, I'd like to wait some time to collect dirty segments.
  60. */
  61. if (!mutex_trylock(&sbi->gc_mutex))
  62. continue;
  63. if (!is_idle(sbi)) {
  64. increase_sleep_time(gc_th, &wait_ms);
  65. mutex_unlock(&sbi->gc_mutex);
  66. continue;
  67. }
  68. if (has_enough_invalid_blocks(sbi))
  69. decrease_sleep_time(gc_th, &wait_ms);
  70. else
  71. increase_sleep_time(gc_th, &wait_ms);
  72. stat_inc_bggc_count(sbi);
  73. /* if return value is not zero, no victim was selected */
  74. if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
  75. wait_ms = gc_th->no_gc_sleep_time;
  76. trace_f2fs_background_gc(sbi->sb, wait_ms,
  77. prefree_segments(sbi), free_segments(sbi));
  78. /* balancing f2fs's metadata periodically */
  79. f2fs_balance_fs_bg(sbi);
  80. } while (!kthread_should_stop());
  81. return 0;
  82. }
  83. int start_gc_thread(struct f2fs_sb_info *sbi)
  84. {
  85. struct f2fs_gc_kthread *gc_th;
  86. dev_t dev = sbi->sb->s_bdev->bd_dev;
  87. int err = 0;
  88. gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
  89. if (!gc_th) {
  90. err = -ENOMEM;
  91. goto out;
  92. }
  93. gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
  94. gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
  95. gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
  96. gc_th->gc_idle = 0;
  97. sbi->gc_thread = gc_th;
  98. init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
  99. sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
  100. "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
  101. if (IS_ERR(gc_th->f2fs_gc_task)) {
  102. err = PTR_ERR(gc_th->f2fs_gc_task);
  103. kfree(gc_th);
  104. sbi->gc_thread = NULL;
  105. }
  106. out:
  107. return err;
  108. }
  109. void stop_gc_thread(struct f2fs_sb_info *sbi)
  110. {
  111. struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
  112. if (!gc_th)
  113. return;
  114. kthread_stop(gc_th->f2fs_gc_task);
  115. kfree(gc_th);
  116. sbi->gc_thread = NULL;
  117. }
  118. static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
  119. {
  120. int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
  121. if (gc_th && gc_th->gc_idle) {
  122. if (gc_th->gc_idle == 1)
  123. gc_mode = GC_CB;
  124. else if (gc_th->gc_idle == 2)
  125. gc_mode = GC_GREEDY;
  126. }
  127. return gc_mode;
  128. }
  129. static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
  130. int type, struct victim_sel_policy *p)
  131. {
  132. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  133. if (p->alloc_mode == SSR) {
  134. p->gc_mode = GC_GREEDY;
  135. p->dirty_segmap = dirty_i->dirty_segmap[type];
  136. p->max_search = dirty_i->nr_dirty[type];
  137. p->ofs_unit = 1;
  138. } else {
  139. p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
  140. p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
  141. p->max_search = dirty_i->nr_dirty[DIRTY];
  142. p->ofs_unit = sbi->segs_per_sec;
  143. }
  144. if (p->max_search > sbi->max_victim_search)
  145. p->max_search = sbi->max_victim_search;
  146. p->offset = sbi->last_victim[p->gc_mode];
  147. }
  148. static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
  149. struct victim_sel_policy *p)
  150. {
  151. /* SSR allocates in a segment unit */
  152. if (p->alloc_mode == SSR)
  153. return sbi->blocks_per_seg;
  154. if (p->gc_mode == GC_GREEDY)
  155. return sbi->blocks_per_seg * p->ofs_unit;
  156. else if (p->gc_mode == GC_CB)
  157. return UINT_MAX;
  158. else /* No other gc_mode */
  159. return 0;
  160. }
  161. static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
  162. {
  163. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  164. unsigned int secno;
  165. /*
  166. * If the gc_type is FG_GC, we can select victim segments
  167. * selected by background GC before.
  168. * Those segments guarantee they have small valid blocks.
  169. */
  170. for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
  171. if (sec_usage_check(sbi, secno))
  172. continue;
  173. clear_bit(secno, dirty_i->victim_secmap);
  174. return secno * sbi->segs_per_sec;
  175. }
  176. return NULL_SEGNO;
  177. }
  178. static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
  179. {
  180. struct sit_info *sit_i = SIT_I(sbi);
  181. unsigned int secno = GET_SECNO(sbi, segno);
  182. unsigned int start = secno * sbi->segs_per_sec;
  183. unsigned long long mtime = 0;
  184. unsigned int vblocks;
  185. unsigned char age = 0;
  186. unsigned char u;
  187. unsigned int i;
  188. for (i = 0; i < sbi->segs_per_sec; i++)
  189. mtime += get_seg_entry(sbi, start + i)->mtime;
  190. vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
  191. mtime = div_u64(mtime, sbi->segs_per_sec);
  192. vblocks = div_u64(vblocks, sbi->segs_per_sec);
  193. u = (vblocks * 100) >> sbi->log_blocks_per_seg;
  194. /* Handle if the system time has changed by the user */
  195. if (mtime < sit_i->min_mtime)
  196. sit_i->min_mtime = mtime;
  197. if (mtime > sit_i->max_mtime)
  198. sit_i->max_mtime = mtime;
  199. if (sit_i->max_mtime != sit_i->min_mtime)
  200. age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
  201. sit_i->max_mtime - sit_i->min_mtime);
  202. return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
  203. }
  204. static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
  205. unsigned int segno, struct victim_sel_policy *p)
  206. {
  207. if (p->alloc_mode == SSR)
  208. return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
  209. /* alloc_mode == LFS */
  210. if (p->gc_mode == GC_GREEDY)
  211. return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
  212. else
  213. return get_cb_cost(sbi, segno);
  214. }
  215. static unsigned int count_bits(const unsigned long *addr,
  216. unsigned int offset, unsigned int len)
  217. {
  218. unsigned int end = offset + len, sum = 0;
  219. while (offset < end) {
  220. if (test_bit(offset++, addr))
  221. ++sum;
  222. }
  223. return sum;
  224. }
  225. /*
  226. * This function is called from two paths.
  227. * One is garbage collection and the other is SSR segment selection.
  228. * When it is called during GC, it just gets a victim segment
  229. * and it does not remove it from dirty seglist.
  230. * When it is called from SSR segment selection, it finds a segment
  231. * which has minimum valid blocks and removes it from dirty seglist.
  232. */
  233. static int get_victim_by_default(struct f2fs_sb_info *sbi,
  234. unsigned int *result, int gc_type, int type, char alloc_mode)
  235. {
  236. struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
  237. struct victim_sel_policy p;
  238. unsigned int secno, last_victim;
  239. unsigned int last_segment = MAIN_SEGS(sbi);
  240. unsigned int nsearched = 0;
  241. mutex_lock(&dirty_i->seglist_lock);
  242. p.alloc_mode = alloc_mode;
  243. select_policy(sbi, gc_type, type, &p);
  244. p.min_segno = NULL_SEGNO;
  245. p.min_cost = get_max_cost(sbi, &p);
  246. if (p.max_search == 0)
  247. goto out;
  248. last_victim = sbi->last_victim[p.gc_mode];
  249. if (p.alloc_mode == LFS && gc_type == FG_GC) {
  250. p.min_segno = check_bg_victims(sbi);
  251. if (p.min_segno != NULL_SEGNO)
  252. goto got_it;
  253. }
  254. while (1) {
  255. unsigned long cost;
  256. unsigned int segno;
  257. segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
  258. if (segno >= last_segment) {
  259. if (sbi->last_victim[p.gc_mode]) {
  260. last_segment = sbi->last_victim[p.gc_mode];
  261. sbi->last_victim[p.gc_mode] = 0;
  262. p.offset = 0;
  263. continue;
  264. }
  265. break;
  266. }
  267. p.offset = segno + p.ofs_unit;
  268. if (p.ofs_unit > 1) {
  269. p.offset -= segno % p.ofs_unit;
  270. nsearched += count_bits(p.dirty_segmap,
  271. p.offset - p.ofs_unit,
  272. p.ofs_unit);
  273. } else {
  274. nsearched++;
  275. }
  276. secno = GET_SECNO(sbi, segno);
  277. if (sec_usage_check(sbi, secno))
  278. goto next;
  279. if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
  280. goto next;
  281. cost = get_gc_cost(sbi, segno, &p);
  282. if (p.min_cost > cost) {
  283. p.min_segno = segno;
  284. p.min_cost = cost;
  285. }
  286. next:
  287. if (nsearched >= p.max_search) {
  288. if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
  289. sbi->last_victim[p.gc_mode] = last_victim + 1;
  290. else
  291. sbi->last_victim[p.gc_mode] = segno + 1;
  292. break;
  293. }
  294. }
  295. if (p.min_segno != NULL_SEGNO) {
  296. got_it:
  297. if (p.alloc_mode == LFS) {
  298. secno = GET_SECNO(sbi, p.min_segno);
  299. if (gc_type == FG_GC)
  300. sbi->cur_victim_sec = secno;
  301. else
  302. set_bit(secno, dirty_i->victim_secmap);
  303. }
  304. *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
  305. trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
  306. sbi->cur_victim_sec,
  307. prefree_segments(sbi), free_segments(sbi));
  308. }
  309. out:
  310. mutex_unlock(&dirty_i->seglist_lock);
  311. return (p.min_segno == NULL_SEGNO) ? 0 : 1;
  312. }
  313. static const struct victim_selection default_v_ops = {
  314. .get_victim = get_victim_by_default,
  315. };
  316. static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
  317. {
  318. struct inode_entry *ie;
  319. ie = radix_tree_lookup(&gc_list->iroot, ino);
  320. if (ie)
  321. return ie->inode;
  322. return NULL;
  323. }
  324. static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
  325. {
  326. struct inode_entry *new_ie;
  327. if (inode == find_gc_inode(gc_list, inode->i_ino)) {
  328. iput(inode);
  329. return;
  330. }
  331. new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
  332. new_ie->inode = inode;
  333. f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
  334. list_add_tail(&new_ie->list, &gc_list->ilist);
  335. }
  336. static void put_gc_inode(struct gc_inode_list *gc_list)
  337. {
  338. struct inode_entry *ie, *next_ie;
  339. list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
  340. radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
  341. iput(ie->inode);
  342. list_del(&ie->list);
  343. kmem_cache_free(inode_entry_slab, ie);
  344. }
  345. }
  346. static int check_valid_map(struct f2fs_sb_info *sbi,
  347. unsigned int segno, int offset)
  348. {
  349. struct sit_info *sit_i = SIT_I(sbi);
  350. struct seg_entry *sentry;
  351. int ret;
  352. mutex_lock(&sit_i->sentry_lock);
  353. sentry = get_seg_entry(sbi, segno);
  354. ret = f2fs_test_bit(offset, sentry->cur_valid_map);
  355. mutex_unlock(&sit_i->sentry_lock);
  356. return ret;
  357. }
  358. /*
  359. * This function compares node address got in summary with that in NAT.
  360. * On validity, copy that node with cold status, otherwise (invalid node)
  361. * ignore that.
  362. */
  363. static void gc_node_segment(struct f2fs_sb_info *sbi,
  364. struct f2fs_summary *sum, unsigned int segno, int gc_type)
  365. {
  366. struct f2fs_summary *entry;
  367. block_t start_addr;
  368. int off;
  369. int phase = 0;
  370. start_addr = START_BLOCK(sbi, segno);
  371. next_step:
  372. entry = sum;
  373. for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
  374. nid_t nid = le32_to_cpu(entry->nid);
  375. struct page *node_page;
  376. struct node_info ni;
  377. /* stop BG_GC if there is not enough free sections. */
  378. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
  379. return;
  380. if (check_valid_map(sbi, segno, off) == 0)
  381. continue;
  382. if (phase == 0) {
  383. ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
  384. META_NAT, true);
  385. continue;
  386. }
  387. if (phase == 1) {
  388. ra_node_page(sbi, nid);
  389. continue;
  390. }
  391. /* phase == 2 */
  392. node_page = get_node_page(sbi, nid);
  393. if (IS_ERR(node_page))
  394. continue;
  395. /* block may become invalid during get_node_page */
  396. if (check_valid_map(sbi, segno, off) == 0) {
  397. f2fs_put_page(node_page, 1);
  398. continue;
  399. }
  400. get_node_info(sbi, nid, &ni);
  401. if (ni.blk_addr != start_addr + off) {
  402. f2fs_put_page(node_page, 1);
  403. continue;
  404. }
  405. move_node_page(node_page, gc_type);
  406. stat_inc_node_blk_count(sbi, 1, gc_type);
  407. }
  408. if (++phase < 3)
  409. goto next_step;
  410. }
  411. /*
  412. * Calculate start block index indicating the given node offset.
  413. * Be careful, caller should give this node offset only indicating direct node
  414. * blocks. If any node offsets, which point the other types of node blocks such
  415. * as indirect or double indirect node blocks, are given, it must be a caller's
  416. * bug.
  417. */
  418. block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
  419. {
  420. unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
  421. unsigned int bidx;
  422. if (node_ofs == 0)
  423. return 0;
  424. if (node_ofs <= 2) {
  425. bidx = node_ofs - 1;
  426. } else if (node_ofs <= indirect_blks) {
  427. int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
  428. bidx = node_ofs - 2 - dec;
  429. } else {
  430. int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
  431. bidx = node_ofs - 5 - dec;
  432. }
  433. return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
  434. }
  435. static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  436. struct node_info *dni, block_t blkaddr, unsigned int *nofs)
  437. {
  438. struct page *node_page;
  439. nid_t nid;
  440. unsigned int ofs_in_node;
  441. block_t source_blkaddr;
  442. nid = le32_to_cpu(sum->nid);
  443. ofs_in_node = le16_to_cpu(sum->ofs_in_node);
  444. node_page = get_node_page(sbi, nid);
  445. if (IS_ERR(node_page))
  446. return false;
  447. get_node_info(sbi, nid, dni);
  448. if (sum->version != dni->version) {
  449. f2fs_put_page(node_page, 1);
  450. return false;
  451. }
  452. *nofs = ofs_of_node(node_page);
  453. source_blkaddr = datablock_addr(node_page, ofs_in_node);
  454. f2fs_put_page(node_page, 1);
  455. if (source_blkaddr != blkaddr)
  456. return false;
  457. return true;
  458. }
  459. static void move_encrypted_block(struct inode *inode, block_t bidx,
  460. unsigned int segno, int off)
  461. {
  462. struct f2fs_io_info fio = {
  463. .sbi = F2FS_I_SB(inode),
  464. .type = DATA,
  465. .rw = READ_SYNC,
  466. .encrypted_page = NULL,
  467. };
  468. struct dnode_of_data dn;
  469. struct f2fs_summary sum;
  470. struct node_info ni;
  471. struct page *page;
  472. block_t newaddr;
  473. int err;
  474. /* do not read out */
  475. page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
  476. if (!page)
  477. return;
  478. if (!check_valid_map(F2FS_I_SB(inode), segno, off))
  479. goto out;
  480. set_new_dnode(&dn, inode, NULL, NULL, 0);
  481. err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
  482. if (err)
  483. goto out;
  484. if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
  485. ClearPageUptodate(page);
  486. goto put_out;
  487. }
  488. /*
  489. * don't cache encrypted data into meta inode until previous dirty
  490. * data were writebacked to avoid racing between GC and flush.
  491. */
  492. f2fs_wait_on_page_writeback(page, DATA, true);
  493. get_node_info(fio.sbi, dn.nid, &ni);
  494. set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
  495. /* read page */
  496. fio.page = page;
  497. fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
  498. allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
  499. &sum, CURSEG_COLD_DATA);
  500. fio.encrypted_page = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
  501. newaddr, true);
  502. if (!fio.encrypted_page) {
  503. err = -ENOMEM;
  504. goto recover_block;
  505. }
  506. err = f2fs_submit_page_bio(&fio);
  507. if (err)
  508. goto put_page_out;
  509. /* write page */
  510. lock_page(fio.encrypted_page);
  511. if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
  512. err = -EIO;
  513. goto put_page_out;
  514. }
  515. if (unlikely(!PageUptodate(fio.encrypted_page))) {
  516. err = -EIO;
  517. goto put_page_out;
  518. }
  519. set_page_dirty(fio.encrypted_page);
  520. f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
  521. if (clear_page_dirty_for_io(fio.encrypted_page))
  522. dec_page_count(fio.sbi, F2FS_DIRTY_META);
  523. set_page_writeback(fio.encrypted_page);
  524. /* allocate block address */
  525. f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
  526. fio.rw = WRITE_SYNC;
  527. fio.new_blkaddr = newaddr;
  528. f2fs_submit_page_mbio(&fio);
  529. f2fs_update_data_blkaddr(&dn, newaddr);
  530. set_inode_flag(inode, FI_APPEND_WRITE);
  531. if (page->index == 0)
  532. set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
  533. put_page_out:
  534. f2fs_put_page(fio.encrypted_page, 1);
  535. recover_block:
  536. if (err)
  537. __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
  538. true, true);
  539. put_out:
  540. f2fs_put_dnode(&dn);
  541. out:
  542. f2fs_put_page(page, 1);
  543. }
  544. static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
  545. unsigned int segno, int off)
  546. {
  547. struct page *page;
  548. page = get_lock_data_page(inode, bidx, true);
  549. if (IS_ERR(page))
  550. return;
  551. if (!check_valid_map(F2FS_I_SB(inode), segno, off))
  552. goto out;
  553. if (gc_type == BG_GC) {
  554. if (PageWriteback(page))
  555. goto out;
  556. set_page_dirty(page);
  557. set_cold_data(page);
  558. } else {
  559. struct f2fs_io_info fio = {
  560. .sbi = F2FS_I_SB(inode),
  561. .type = DATA,
  562. .rw = WRITE_SYNC,
  563. .page = page,
  564. .encrypted_page = NULL,
  565. };
  566. bool is_dirty = PageDirty(page);
  567. int err;
  568. retry:
  569. set_page_dirty(page);
  570. f2fs_wait_on_page_writeback(page, DATA, true);
  571. if (clear_page_dirty_for_io(page)) {
  572. inode_dec_dirty_pages(inode);
  573. remove_dirty_inode(inode);
  574. }
  575. set_cold_data(page);
  576. err = do_write_data_page(&fio);
  577. if (err == -ENOMEM && is_dirty) {
  578. congestion_wait(BLK_RW_ASYNC, HZ/50);
  579. goto retry;
  580. }
  581. }
  582. out:
  583. f2fs_put_page(page, 1);
  584. }
  585. /*
  586. * This function tries to get parent node of victim data block, and identifies
  587. * data block validity. If the block is valid, copy that with cold status and
  588. * modify parent node.
  589. * If the parent node is not valid or the data block address is different,
  590. * the victim data block is ignored.
  591. */
  592. static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  593. struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
  594. {
  595. struct super_block *sb = sbi->sb;
  596. struct f2fs_summary *entry;
  597. block_t start_addr;
  598. int off;
  599. int phase = 0;
  600. start_addr = START_BLOCK(sbi, segno);
  601. next_step:
  602. entry = sum;
  603. for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
  604. struct page *data_page;
  605. struct inode *inode;
  606. struct node_info dni; /* dnode info for the data */
  607. unsigned int ofs_in_node, nofs;
  608. block_t start_bidx;
  609. nid_t nid = le32_to_cpu(entry->nid);
  610. /* stop BG_GC if there is not enough free sections. */
  611. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
  612. return;
  613. if (check_valid_map(sbi, segno, off) == 0)
  614. continue;
  615. if (phase == 0) {
  616. ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
  617. META_NAT, true);
  618. continue;
  619. }
  620. if (phase == 1) {
  621. ra_node_page(sbi, nid);
  622. continue;
  623. }
  624. /* Get an inode by ino with checking validity */
  625. if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
  626. continue;
  627. if (phase == 2) {
  628. ra_node_page(sbi, dni.ino);
  629. continue;
  630. }
  631. ofs_in_node = le16_to_cpu(entry->ofs_in_node);
  632. if (phase == 3) {
  633. inode = f2fs_iget(sb, dni.ino);
  634. if (IS_ERR(inode) || is_bad_inode(inode))
  635. continue;
  636. /* if encrypted inode, let's go phase 3 */
  637. if (f2fs_encrypted_inode(inode) &&
  638. S_ISREG(inode->i_mode)) {
  639. add_gc_inode(gc_list, inode);
  640. continue;
  641. }
  642. start_bidx = start_bidx_of_node(nofs, inode);
  643. data_page = get_read_data_page(inode,
  644. start_bidx + ofs_in_node, READA, true);
  645. if (IS_ERR(data_page)) {
  646. iput(inode);
  647. continue;
  648. }
  649. f2fs_put_page(data_page, 0);
  650. add_gc_inode(gc_list, inode);
  651. continue;
  652. }
  653. /* phase 4 */
  654. inode = find_gc_inode(gc_list, dni.ino);
  655. if (inode) {
  656. struct f2fs_inode_info *fi = F2FS_I(inode);
  657. bool locked = false;
  658. if (S_ISREG(inode->i_mode)) {
  659. if (!down_write_trylock(&fi->dio_rwsem[READ]))
  660. continue;
  661. if (!down_write_trylock(
  662. &fi->dio_rwsem[WRITE])) {
  663. up_write(&fi->dio_rwsem[READ]);
  664. continue;
  665. }
  666. locked = true;
  667. }
  668. start_bidx = start_bidx_of_node(nofs, inode)
  669. + ofs_in_node;
  670. if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
  671. move_encrypted_block(inode, start_bidx, segno, off);
  672. else
  673. move_data_page(inode, start_bidx, gc_type, segno, off);
  674. if (locked) {
  675. up_write(&fi->dio_rwsem[WRITE]);
  676. up_write(&fi->dio_rwsem[READ]);
  677. }
  678. stat_inc_data_blk_count(sbi, 1, gc_type);
  679. }
  680. }
  681. if (++phase < 5)
  682. goto next_step;
  683. }
  684. static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
  685. int gc_type)
  686. {
  687. struct sit_info *sit_i = SIT_I(sbi);
  688. int ret;
  689. mutex_lock(&sit_i->sentry_lock);
  690. ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
  691. NO_CHECK_TYPE, LFS);
  692. mutex_unlock(&sit_i->sentry_lock);
  693. return ret;
  694. }
  695. static int do_garbage_collect(struct f2fs_sb_info *sbi,
  696. unsigned int start_segno,
  697. struct gc_inode_list *gc_list, int gc_type)
  698. {
  699. struct page *sum_page;
  700. struct f2fs_summary_block *sum;
  701. struct blk_plug plug;
  702. unsigned int segno = start_segno;
  703. unsigned int end_segno = start_segno + sbi->segs_per_sec;
  704. int sec_freed = 0;
  705. unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
  706. SUM_TYPE_DATA : SUM_TYPE_NODE;
  707. /* readahead multi ssa blocks those have contiguous address */
  708. if (sbi->segs_per_sec > 1)
  709. ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
  710. sbi->segs_per_sec, META_SSA, true);
  711. /* reference all summary page */
  712. while (segno < end_segno) {
  713. sum_page = get_sum_page(sbi, segno++);
  714. unlock_page(sum_page);
  715. }
  716. blk_start_plug(&plug);
  717. for (segno = start_segno; segno < end_segno; segno++) {
  718. /* find segment summary of victim */
  719. sum_page = find_get_page(META_MAPPING(sbi),
  720. GET_SUM_BLOCK(sbi, segno));
  721. f2fs_put_page(sum_page, 0);
  722. if (get_valid_blocks(sbi, segno, 1) == 0 ||
  723. !PageUptodate(sum_page) ||
  724. unlikely(f2fs_cp_error(sbi)))
  725. goto next;
  726. sum = page_address(sum_page);
  727. f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
  728. /*
  729. * this is to avoid deadlock:
  730. * - lock_page(sum_page) - f2fs_replace_block
  731. * - check_valid_map() - mutex_lock(sentry_lock)
  732. * - mutex_lock(sentry_lock) - change_curseg()
  733. * - lock_page(sum_page)
  734. */
  735. if (type == SUM_TYPE_NODE)
  736. gc_node_segment(sbi, sum->entries, segno, gc_type);
  737. else
  738. gc_data_segment(sbi, sum->entries, gc_list, segno,
  739. gc_type);
  740. stat_inc_seg_count(sbi, type, gc_type);
  741. next:
  742. f2fs_put_page(sum_page, 0);
  743. }
  744. if (gc_type == FG_GC)
  745. f2fs_submit_merged_bio(sbi,
  746. (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
  747. blk_finish_plug(&plug);
  748. if (gc_type == FG_GC &&
  749. get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
  750. sec_freed = 1;
  751. stat_inc_call_count(sbi->stat_info);
  752. return sec_freed;
  753. }
  754. int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
  755. {
  756. unsigned int segno;
  757. int gc_type = sync ? FG_GC : BG_GC;
  758. int sec_freed = 0;
  759. int ret = -EINVAL;
  760. struct cp_control cpc;
  761. struct gc_inode_list gc_list = {
  762. .ilist = LIST_HEAD_INIT(gc_list.ilist),
  763. .iroot = RADIX_TREE_INIT(GFP_NOFS),
  764. };
  765. cpc.reason = __get_cp_reason(sbi);
  766. gc_more:
  767. segno = NULL_SEGNO;
  768. if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
  769. goto stop;
  770. if (unlikely(f2fs_cp_error(sbi))) {
  771. ret = -EIO;
  772. goto stop;
  773. }
  774. if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
  775. gc_type = FG_GC;
  776. /*
  777. * If there is no victim and no prefree segment but still not
  778. * enough free sections, we should flush dent/node blocks and do
  779. * garbage collections.
  780. */
  781. if (__get_victim(sbi, &segno, gc_type) ||
  782. prefree_segments(sbi)) {
  783. ret = write_checkpoint(sbi, &cpc);
  784. if (ret)
  785. goto stop;
  786. segno = NULL_SEGNO;
  787. } else if (has_not_enough_free_secs(sbi, 0, 0)) {
  788. ret = write_checkpoint(sbi, &cpc);
  789. if (ret)
  790. goto stop;
  791. }
  792. } else if (gc_type == BG_GC && !background) {
  793. /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
  794. goto stop;
  795. }
  796. if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
  797. goto stop;
  798. ret = 0;
  799. if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
  800. gc_type == FG_GC)
  801. sec_freed++;
  802. if (gc_type == FG_GC)
  803. sbi->cur_victim_sec = NULL_SEGNO;
  804. if (!sync) {
  805. if (has_not_enough_free_secs(sbi, sec_freed, 0))
  806. goto gc_more;
  807. if (gc_type == FG_GC)
  808. ret = write_checkpoint(sbi, &cpc);
  809. }
  810. stop:
  811. mutex_unlock(&sbi->gc_mutex);
  812. put_gc_inode(&gc_list);
  813. if (sync)
  814. ret = sec_freed ? 0 : -EAGAIN;
  815. return ret;
  816. }
  817. void build_gc_manager(struct f2fs_sb_info *sbi)
  818. {
  819. DIRTY_I(sbi)->v_ops = &default_v_ops;
  820. }