sufile.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Koji Sato.
  17. * Revised by Ryusuke Konishi.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/string.h>
  22. #include <linux/buffer_head.h>
  23. #include <linux/errno.h>
  24. #include "mdt.h"
  25. #include "sufile.h"
  26. #include <trace/events/nilfs2.h>
  27. /**
  28. * struct nilfs_sufile_info - on-memory private data of sufile
  29. * @mi: on-memory private data of metadata file
  30. * @ncleansegs: number of clean segments
  31. * @allocmin: lower limit of allocatable segment range
  32. * @allocmax: upper limit of allocatable segment range
  33. */
  34. struct nilfs_sufile_info {
  35. struct nilfs_mdt_info mi;
  36. unsigned long ncleansegs;/* number of clean segments */
  37. __u64 allocmin; /* lower limit of allocatable segment range */
  38. __u64 allocmax; /* upper limit of allocatable segment range */
  39. };
  40. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  41. {
  42. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  43. }
  44. static inline unsigned long
  45. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  46. {
  47. return NILFS_MDT(sufile)->mi_entries_per_block;
  48. }
  49. static unsigned long
  50. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  51. {
  52. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  53. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  54. return (unsigned long)t;
  55. }
  56. static unsigned long
  57. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  58. {
  59. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  60. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  61. }
  62. static unsigned long
  63. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  64. __u64 max)
  65. {
  66. return min_t(unsigned long,
  67. nilfs_sufile_segment_usages_per_block(sufile) -
  68. nilfs_sufile_get_offset(sufile, curr),
  69. max - curr + 1);
  70. }
  71. static struct nilfs_segment_usage *
  72. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  73. struct buffer_head *bh, void *kaddr)
  74. {
  75. return kaddr + bh_offset(bh) +
  76. nilfs_sufile_get_offset(sufile, segnum) *
  77. NILFS_MDT(sufile)->mi_entry_size;
  78. }
  79. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  80. struct buffer_head **bhp)
  81. {
  82. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  83. }
  84. static inline int
  85. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  86. int create, struct buffer_head **bhp)
  87. {
  88. return nilfs_mdt_get_block(sufile,
  89. nilfs_sufile_get_blkoff(sufile, segnum),
  90. create, NULL, bhp);
  91. }
  92. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  93. __u64 segnum)
  94. {
  95. return nilfs_mdt_delete_block(sufile,
  96. nilfs_sufile_get_blkoff(sufile, segnum));
  97. }
  98. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  99. u64 ncleanadd, u64 ndirtyadd)
  100. {
  101. struct nilfs_sufile_header *header;
  102. void *kaddr;
  103. kaddr = kmap_atomic(header_bh->b_page);
  104. header = kaddr + bh_offset(header_bh);
  105. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  106. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  107. kunmap_atomic(kaddr);
  108. mark_buffer_dirty(header_bh);
  109. }
  110. /**
  111. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  112. * @sufile: inode of segment usage file
  113. */
  114. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  115. {
  116. return NILFS_SUI(sufile)->ncleansegs;
  117. }
  118. /**
  119. * nilfs_sufile_updatev - modify multiple segment usages at a time
  120. * @sufile: inode of segment usage file
  121. * @segnumv: array of segment numbers
  122. * @nsegs: size of @segnumv array
  123. * @create: creation flag
  124. * @ndone: place to store number of modified segments on @segnumv
  125. * @dofunc: primitive operation for the update
  126. *
  127. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  128. * against the given array of segments. The @dofunc is called with
  129. * buffers of a header block and the sufile block in which the target
  130. * segment usage entry is contained. If @ndone is given, the number
  131. * of successfully modified segments from the head is stored in the
  132. * place @ndone points to.
  133. *
  134. * Return Value: On success, zero is returned. On error, one of the
  135. * following negative error codes is returned.
  136. *
  137. * %-EIO - I/O error.
  138. *
  139. * %-ENOMEM - Insufficient amount of memory available.
  140. *
  141. * %-ENOENT - Given segment usage is in hole block (may be returned if
  142. * @create is zero)
  143. *
  144. * %-EINVAL - Invalid segment usage number
  145. */
  146. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  147. int create, size_t *ndone,
  148. void (*dofunc)(struct inode *, __u64,
  149. struct buffer_head *,
  150. struct buffer_head *))
  151. {
  152. struct buffer_head *header_bh, *bh;
  153. unsigned long blkoff, prev_blkoff;
  154. __u64 *seg;
  155. size_t nerr = 0, n = 0;
  156. int ret = 0;
  157. if (unlikely(nsegs == 0))
  158. goto out;
  159. down_write(&NILFS_MDT(sufile)->mi_sem);
  160. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  161. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  162. nilfs_msg(sufile->i_sb, KERN_WARNING,
  163. "%s: invalid segment number: %llu",
  164. __func__, (unsigned long long)*seg);
  165. nerr++;
  166. }
  167. }
  168. if (nerr > 0) {
  169. ret = -EINVAL;
  170. goto out_sem;
  171. }
  172. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  173. if (ret < 0)
  174. goto out_sem;
  175. seg = segnumv;
  176. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  177. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  178. if (ret < 0)
  179. goto out_header;
  180. for (;;) {
  181. dofunc(sufile, *seg, header_bh, bh);
  182. if (++seg >= segnumv + nsegs)
  183. break;
  184. prev_blkoff = blkoff;
  185. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  186. if (blkoff == prev_blkoff)
  187. continue;
  188. /* get different block */
  189. brelse(bh);
  190. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  191. if (unlikely(ret < 0))
  192. goto out_header;
  193. }
  194. brelse(bh);
  195. out_header:
  196. n = seg - segnumv;
  197. brelse(header_bh);
  198. out_sem:
  199. up_write(&NILFS_MDT(sufile)->mi_sem);
  200. out:
  201. if (ndone)
  202. *ndone = n;
  203. return ret;
  204. }
  205. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  206. void (*dofunc)(struct inode *, __u64,
  207. struct buffer_head *,
  208. struct buffer_head *))
  209. {
  210. struct buffer_head *header_bh, *bh;
  211. int ret;
  212. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  213. nilfs_msg(sufile->i_sb, KERN_WARNING,
  214. "%s: invalid segment number: %llu",
  215. __func__, (unsigned long long)segnum);
  216. return -EINVAL;
  217. }
  218. down_write(&NILFS_MDT(sufile)->mi_sem);
  219. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  220. if (ret < 0)
  221. goto out_sem;
  222. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  223. if (!ret) {
  224. dofunc(sufile, segnum, header_bh, bh);
  225. brelse(bh);
  226. }
  227. brelse(header_bh);
  228. out_sem:
  229. up_write(&NILFS_MDT(sufile)->mi_sem);
  230. return ret;
  231. }
  232. /**
  233. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  234. * @sufile: inode of segment usage file
  235. * @start: minimum segment number of allocatable region (inclusive)
  236. * @end: maximum segment number of allocatable region (inclusive)
  237. *
  238. * Return Value: On success, 0 is returned. On error, one of the
  239. * following negative error codes is returned.
  240. *
  241. * %-ERANGE - invalid segment region
  242. */
  243. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  244. {
  245. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  246. __u64 nsegs;
  247. int ret = -ERANGE;
  248. down_write(&NILFS_MDT(sufile)->mi_sem);
  249. nsegs = nilfs_sufile_get_nsegments(sufile);
  250. if (start <= end && end < nsegs) {
  251. sui->allocmin = start;
  252. sui->allocmax = end;
  253. ret = 0;
  254. }
  255. up_write(&NILFS_MDT(sufile)->mi_sem);
  256. return ret;
  257. }
  258. /**
  259. * nilfs_sufile_alloc - allocate a segment
  260. * @sufile: inode of segment usage file
  261. * @segnump: pointer to segment number
  262. *
  263. * Description: nilfs_sufile_alloc() allocates a clean segment.
  264. *
  265. * Return Value: On success, 0 is returned and the segment number of the
  266. * allocated segment is stored in the place pointed by @segnump. On error, one
  267. * of the following negative error codes is returned.
  268. *
  269. * %-EIO - I/O error.
  270. *
  271. * %-ENOMEM - Insufficient amount of memory available.
  272. *
  273. * %-ENOSPC - No clean segment left.
  274. */
  275. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  276. {
  277. struct buffer_head *header_bh, *su_bh;
  278. struct nilfs_sufile_header *header;
  279. struct nilfs_segment_usage *su;
  280. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  281. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  282. __u64 segnum, maxsegnum, last_alloc;
  283. void *kaddr;
  284. unsigned long nsegments, nsus, cnt;
  285. int ret, j;
  286. down_write(&NILFS_MDT(sufile)->mi_sem);
  287. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  288. if (ret < 0)
  289. goto out_sem;
  290. kaddr = kmap_atomic(header_bh->b_page);
  291. header = kaddr + bh_offset(header_bh);
  292. last_alloc = le64_to_cpu(header->sh_last_alloc);
  293. kunmap_atomic(kaddr);
  294. nsegments = nilfs_sufile_get_nsegments(sufile);
  295. maxsegnum = sui->allocmax;
  296. segnum = last_alloc + 1;
  297. if (segnum < sui->allocmin || segnum > sui->allocmax)
  298. segnum = sui->allocmin;
  299. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  300. if (segnum > maxsegnum) {
  301. if (cnt < sui->allocmax - sui->allocmin + 1) {
  302. /*
  303. * wrap around in the limited region.
  304. * if allocation started from
  305. * sui->allocmin, this never happens.
  306. */
  307. segnum = sui->allocmin;
  308. maxsegnum = last_alloc;
  309. } else if (segnum > sui->allocmin &&
  310. sui->allocmax + 1 < nsegments) {
  311. segnum = sui->allocmax + 1;
  312. maxsegnum = nsegments - 1;
  313. } else if (sui->allocmin > 0) {
  314. segnum = 0;
  315. maxsegnum = sui->allocmin - 1;
  316. } else {
  317. break; /* never happens */
  318. }
  319. }
  320. trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
  321. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  322. &su_bh);
  323. if (ret < 0)
  324. goto out_header;
  325. kaddr = kmap_atomic(su_bh->b_page);
  326. su = nilfs_sufile_block_get_segment_usage(
  327. sufile, segnum, su_bh, kaddr);
  328. nsus = nilfs_sufile_segment_usages_in_block(
  329. sufile, segnum, maxsegnum);
  330. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  331. if (!nilfs_segment_usage_clean(su))
  332. continue;
  333. /* found a clean segment */
  334. nilfs_segment_usage_set_dirty(su);
  335. kunmap_atomic(kaddr);
  336. kaddr = kmap_atomic(header_bh->b_page);
  337. header = kaddr + bh_offset(header_bh);
  338. le64_add_cpu(&header->sh_ncleansegs, -1);
  339. le64_add_cpu(&header->sh_ndirtysegs, 1);
  340. header->sh_last_alloc = cpu_to_le64(segnum);
  341. kunmap_atomic(kaddr);
  342. sui->ncleansegs--;
  343. mark_buffer_dirty(header_bh);
  344. mark_buffer_dirty(su_bh);
  345. nilfs_mdt_mark_dirty(sufile);
  346. brelse(su_bh);
  347. *segnump = segnum;
  348. trace_nilfs2_segment_usage_allocated(sufile, segnum);
  349. goto out_header;
  350. }
  351. kunmap_atomic(kaddr);
  352. brelse(su_bh);
  353. }
  354. /* no segments left */
  355. ret = -ENOSPC;
  356. out_header:
  357. brelse(header_bh);
  358. out_sem:
  359. up_write(&NILFS_MDT(sufile)->mi_sem);
  360. return ret;
  361. }
  362. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  363. struct buffer_head *header_bh,
  364. struct buffer_head *su_bh)
  365. {
  366. struct nilfs_segment_usage *su;
  367. void *kaddr;
  368. kaddr = kmap_atomic(su_bh->b_page);
  369. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  370. if (unlikely(!nilfs_segment_usage_clean(su))) {
  371. nilfs_msg(sufile->i_sb, KERN_WARNING,
  372. "%s: segment %llu must be clean", __func__,
  373. (unsigned long long)segnum);
  374. kunmap_atomic(kaddr);
  375. return;
  376. }
  377. nilfs_segment_usage_set_dirty(su);
  378. kunmap_atomic(kaddr);
  379. nilfs_sufile_mod_counter(header_bh, -1, 1);
  380. NILFS_SUI(sufile)->ncleansegs--;
  381. mark_buffer_dirty(su_bh);
  382. nilfs_mdt_mark_dirty(sufile);
  383. }
  384. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  385. struct buffer_head *header_bh,
  386. struct buffer_head *su_bh)
  387. {
  388. struct nilfs_segment_usage *su;
  389. void *kaddr;
  390. int clean, dirty;
  391. kaddr = kmap_atomic(su_bh->b_page);
  392. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  393. if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
  394. su->su_nblocks == cpu_to_le32(0)) {
  395. kunmap_atomic(kaddr);
  396. return;
  397. }
  398. clean = nilfs_segment_usage_clean(su);
  399. dirty = nilfs_segment_usage_dirty(su);
  400. /* make the segment garbage */
  401. su->su_lastmod = cpu_to_le64(0);
  402. su->su_nblocks = cpu_to_le32(0);
  403. su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
  404. kunmap_atomic(kaddr);
  405. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  406. NILFS_SUI(sufile)->ncleansegs -= clean;
  407. mark_buffer_dirty(su_bh);
  408. nilfs_mdt_mark_dirty(sufile);
  409. }
  410. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  411. struct buffer_head *header_bh,
  412. struct buffer_head *su_bh)
  413. {
  414. struct nilfs_segment_usage *su;
  415. void *kaddr;
  416. int sudirty;
  417. kaddr = kmap_atomic(su_bh->b_page);
  418. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  419. if (nilfs_segment_usage_clean(su)) {
  420. nilfs_msg(sufile->i_sb, KERN_WARNING,
  421. "%s: segment %llu is already clean",
  422. __func__, (unsigned long long)segnum);
  423. kunmap_atomic(kaddr);
  424. return;
  425. }
  426. WARN_ON(nilfs_segment_usage_error(su));
  427. WARN_ON(!nilfs_segment_usage_dirty(su));
  428. sudirty = nilfs_segment_usage_dirty(su);
  429. nilfs_segment_usage_set_clean(su);
  430. kunmap_atomic(kaddr);
  431. mark_buffer_dirty(su_bh);
  432. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  433. NILFS_SUI(sufile)->ncleansegs++;
  434. nilfs_mdt_mark_dirty(sufile);
  435. trace_nilfs2_segment_usage_freed(sufile, segnum);
  436. }
  437. /**
  438. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  439. * @sufile: inode of segment usage file
  440. * @segnum: segment number
  441. */
  442. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  443. {
  444. struct buffer_head *bh;
  445. int ret;
  446. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  447. if (!ret) {
  448. mark_buffer_dirty(bh);
  449. nilfs_mdt_mark_dirty(sufile);
  450. brelse(bh);
  451. }
  452. return ret;
  453. }
  454. /**
  455. * nilfs_sufile_set_segment_usage - set usage of a segment
  456. * @sufile: inode of segment usage file
  457. * @segnum: segment number
  458. * @nblocks: number of live blocks in the segment
  459. * @modtime: modification time (option)
  460. */
  461. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  462. unsigned long nblocks, time_t modtime)
  463. {
  464. struct buffer_head *bh;
  465. struct nilfs_segment_usage *su;
  466. void *kaddr;
  467. int ret;
  468. down_write(&NILFS_MDT(sufile)->mi_sem);
  469. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  470. if (ret < 0)
  471. goto out_sem;
  472. kaddr = kmap_atomic(bh->b_page);
  473. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  474. WARN_ON(nilfs_segment_usage_error(su));
  475. if (modtime)
  476. su->su_lastmod = cpu_to_le64(modtime);
  477. su->su_nblocks = cpu_to_le32(nblocks);
  478. kunmap_atomic(kaddr);
  479. mark_buffer_dirty(bh);
  480. nilfs_mdt_mark_dirty(sufile);
  481. brelse(bh);
  482. out_sem:
  483. up_write(&NILFS_MDT(sufile)->mi_sem);
  484. return ret;
  485. }
  486. /**
  487. * nilfs_sufile_get_stat - get segment usage statistics
  488. * @sufile: inode of segment usage file
  489. * @stat: pointer to a structure of segment usage statistics
  490. *
  491. * Description: nilfs_sufile_get_stat() returns information about segment
  492. * usage.
  493. *
  494. * Return Value: On success, 0 is returned, and segment usage information is
  495. * stored in the place pointed by @stat. On error, one of the following
  496. * negative error codes is returned.
  497. *
  498. * %-EIO - I/O error.
  499. *
  500. * %-ENOMEM - Insufficient amount of memory available.
  501. */
  502. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  503. {
  504. struct buffer_head *header_bh;
  505. struct nilfs_sufile_header *header;
  506. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  507. void *kaddr;
  508. int ret;
  509. down_read(&NILFS_MDT(sufile)->mi_sem);
  510. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  511. if (ret < 0)
  512. goto out_sem;
  513. kaddr = kmap_atomic(header_bh->b_page);
  514. header = kaddr + bh_offset(header_bh);
  515. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  516. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  517. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  518. sustat->ss_ctime = nilfs->ns_ctime;
  519. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  520. spin_lock(&nilfs->ns_last_segment_lock);
  521. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  522. spin_unlock(&nilfs->ns_last_segment_lock);
  523. kunmap_atomic(kaddr);
  524. brelse(header_bh);
  525. out_sem:
  526. up_read(&NILFS_MDT(sufile)->mi_sem);
  527. return ret;
  528. }
  529. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  530. struct buffer_head *header_bh,
  531. struct buffer_head *su_bh)
  532. {
  533. struct nilfs_segment_usage *su;
  534. void *kaddr;
  535. int suclean;
  536. kaddr = kmap_atomic(su_bh->b_page);
  537. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  538. if (nilfs_segment_usage_error(su)) {
  539. kunmap_atomic(kaddr);
  540. return;
  541. }
  542. suclean = nilfs_segment_usage_clean(su);
  543. nilfs_segment_usage_set_error(su);
  544. kunmap_atomic(kaddr);
  545. if (suclean) {
  546. nilfs_sufile_mod_counter(header_bh, -1, 0);
  547. NILFS_SUI(sufile)->ncleansegs--;
  548. }
  549. mark_buffer_dirty(su_bh);
  550. nilfs_mdt_mark_dirty(sufile);
  551. }
  552. /**
  553. * nilfs_sufile_truncate_range - truncate range of segment array
  554. * @sufile: inode of segment usage file
  555. * @start: start segment number (inclusive)
  556. * @end: end segment number (inclusive)
  557. *
  558. * Return Value: On success, 0 is returned. On error, one of the
  559. * following negative error codes is returned.
  560. *
  561. * %-EIO - I/O error.
  562. *
  563. * %-ENOMEM - Insufficient amount of memory available.
  564. *
  565. * %-EINVAL - Invalid number of segments specified
  566. *
  567. * %-EBUSY - Dirty or active segments are present in the range
  568. */
  569. static int nilfs_sufile_truncate_range(struct inode *sufile,
  570. __u64 start, __u64 end)
  571. {
  572. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  573. struct buffer_head *header_bh;
  574. struct buffer_head *su_bh;
  575. struct nilfs_segment_usage *su, *su2;
  576. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  577. unsigned long segusages_per_block;
  578. unsigned long nsegs, ncleaned;
  579. __u64 segnum;
  580. void *kaddr;
  581. ssize_t n, nc;
  582. int ret;
  583. int j;
  584. nsegs = nilfs_sufile_get_nsegments(sufile);
  585. ret = -EINVAL;
  586. if (start > end || start >= nsegs)
  587. goto out;
  588. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  589. if (ret < 0)
  590. goto out;
  591. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  592. ncleaned = 0;
  593. for (segnum = start; segnum <= end; segnum += n) {
  594. n = min_t(unsigned long,
  595. segusages_per_block -
  596. nilfs_sufile_get_offset(sufile, segnum),
  597. end - segnum + 1);
  598. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  599. &su_bh);
  600. if (ret < 0) {
  601. if (ret != -ENOENT)
  602. goto out_header;
  603. /* hole */
  604. continue;
  605. }
  606. kaddr = kmap_atomic(su_bh->b_page);
  607. su = nilfs_sufile_block_get_segment_usage(
  608. sufile, segnum, su_bh, kaddr);
  609. su2 = su;
  610. for (j = 0; j < n; j++, su = (void *)su + susz) {
  611. if ((le32_to_cpu(su->su_flags) &
  612. ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
  613. nilfs_segment_is_active(nilfs, segnum + j)) {
  614. ret = -EBUSY;
  615. kunmap_atomic(kaddr);
  616. brelse(su_bh);
  617. goto out_header;
  618. }
  619. }
  620. nc = 0;
  621. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  622. if (nilfs_segment_usage_error(su)) {
  623. nilfs_segment_usage_set_clean(su);
  624. nc++;
  625. }
  626. }
  627. kunmap_atomic(kaddr);
  628. if (nc > 0) {
  629. mark_buffer_dirty(su_bh);
  630. ncleaned += nc;
  631. }
  632. brelse(su_bh);
  633. if (n == segusages_per_block) {
  634. /* make hole */
  635. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  636. }
  637. }
  638. ret = 0;
  639. out_header:
  640. if (ncleaned > 0) {
  641. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  642. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  643. nilfs_mdt_mark_dirty(sufile);
  644. }
  645. brelse(header_bh);
  646. out:
  647. return ret;
  648. }
  649. /**
  650. * nilfs_sufile_resize - resize segment array
  651. * @sufile: inode of segment usage file
  652. * @newnsegs: new number of segments
  653. *
  654. * Return Value: On success, 0 is returned. On error, one of the
  655. * following negative error codes is returned.
  656. *
  657. * %-EIO - I/O error.
  658. *
  659. * %-ENOMEM - Insufficient amount of memory available.
  660. *
  661. * %-ENOSPC - Enough free space is not left for shrinking
  662. *
  663. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  664. */
  665. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  666. {
  667. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  668. struct buffer_head *header_bh;
  669. struct nilfs_sufile_header *header;
  670. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  671. void *kaddr;
  672. unsigned long nsegs, nrsvsegs;
  673. int ret = 0;
  674. down_write(&NILFS_MDT(sufile)->mi_sem);
  675. nsegs = nilfs_sufile_get_nsegments(sufile);
  676. if (nsegs == newnsegs)
  677. goto out;
  678. ret = -ENOSPC;
  679. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  680. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  681. goto out;
  682. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  683. if (ret < 0)
  684. goto out;
  685. if (newnsegs > nsegs) {
  686. sui->ncleansegs += newnsegs - nsegs;
  687. } else /* newnsegs < nsegs */ {
  688. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  689. if (ret < 0)
  690. goto out_header;
  691. sui->ncleansegs -= nsegs - newnsegs;
  692. }
  693. kaddr = kmap_atomic(header_bh->b_page);
  694. header = kaddr + bh_offset(header_bh);
  695. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  696. kunmap_atomic(kaddr);
  697. mark_buffer_dirty(header_bh);
  698. nilfs_mdt_mark_dirty(sufile);
  699. nilfs_set_nsegments(nilfs, newnsegs);
  700. out_header:
  701. brelse(header_bh);
  702. out:
  703. up_write(&NILFS_MDT(sufile)->mi_sem);
  704. return ret;
  705. }
  706. /**
  707. * nilfs_sufile_get_suinfo -
  708. * @sufile: inode of segment usage file
  709. * @segnum: segment number to start looking
  710. * @buf: array of suinfo
  711. * @sisz: byte size of suinfo
  712. * @nsi: size of suinfo array
  713. *
  714. * Description:
  715. *
  716. * Return Value: On success, 0 is returned and .... On error, one of the
  717. * following negative error codes is returned.
  718. *
  719. * %-EIO - I/O error.
  720. *
  721. * %-ENOMEM - Insufficient amount of memory available.
  722. */
  723. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  724. unsigned int sisz, size_t nsi)
  725. {
  726. struct buffer_head *su_bh;
  727. struct nilfs_segment_usage *su;
  728. struct nilfs_suinfo *si = buf;
  729. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  730. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  731. void *kaddr;
  732. unsigned long nsegs, segusages_per_block;
  733. ssize_t n;
  734. int ret, i, j;
  735. down_read(&NILFS_MDT(sufile)->mi_sem);
  736. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  737. nsegs = min_t(unsigned long,
  738. nilfs_sufile_get_nsegments(sufile) - segnum,
  739. nsi);
  740. for (i = 0; i < nsegs; i += n, segnum += n) {
  741. n = min_t(unsigned long,
  742. segusages_per_block -
  743. nilfs_sufile_get_offset(sufile, segnum),
  744. nsegs - i);
  745. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  746. &su_bh);
  747. if (ret < 0) {
  748. if (ret != -ENOENT)
  749. goto out;
  750. /* hole */
  751. memset(si, 0, sisz * n);
  752. si = (void *)si + sisz * n;
  753. continue;
  754. }
  755. kaddr = kmap_atomic(su_bh->b_page);
  756. su = nilfs_sufile_block_get_segment_usage(
  757. sufile, segnum, su_bh, kaddr);
  758. for (j = 0; j < n;
  759. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  760. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  761. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  762. si->sui_flags = le32_to_cpu(su->su_flags) &
  763. ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  764. if (nilfs_segment_is_active(nilfs, segnum + j))
  765. si->sui_flags |=
  766. BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  767. }
  768. kunmap_atomic(kaddr);
  769. brelse(su_bh);
  770. }
  771. ret = nsegs;
  772. out:
  773. up_read(&NILFS_MDT(sufile)->mi_sem);
  774. return ret;
  775. }
  776. /**
  777. * nilfs_sufile_set_suinfo - sets segment usage info
  778. * @sufile: inode of segment usage file
  779. * @buf: array of suinfo_update
  780. * @supsz: byte size of suinfo_update
  781. * @nsup: size of suinfo_update array
  782. *
  783. * Description: Takes an array of nilfs_suinfo_update structs and updates
  784. * segment usage accordingly. Only the fields indicated by the sup_flags
  785. * are updated.
  786. *
  787. * Return Value: On success, 0 is returned. On error, one of the
  788. * following negative error codes is returned.
  789. *
  790. * %-EIO - I/O error.
  791. *
  792. * %-ENOMEM - Insufficient amount of memory available.
  793. *
  794. * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  795. */
  796. ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
  797. unsigned int supsz, size_t nsup)
  798. {
  799. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  800. struct buffer_head *header_bh, *bh;
  801. struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
  802. struct nilfs_segment_usage *su;
  803. void *kaddr;
  804. unsigned long blkoff, prev_blkoff;
  805. int cleansi, cleansu, dirtysi, dirtysu;
  806. long ncleaned = 0, ndirtied = 0;
  807. int ret = 0;
  808. if (unlikely(nsup == 0))
  809. return ret;
  810. for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
  811. if (sup->sup_segnum >= nilfs->ns_nsegments
  812. || (sup->sup_flags &
  813. (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
  814. || (nilfs_suinfo_update_nblocks(sup) &&
  815. sup->sup_sui.sui_nblocks >
  816. nilfs->ns_blocks_per_segment))
  817. return -EINVAL;
  818. }
  819. down_write(&NILFS_MDT(sufile)->mi_sem);
  820. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  821. if (ret < 0)
  822. goto out_sem;
  823. sup = buf;
  824. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  825. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  826. if (ret < 0)
  827. goto out_header;
  828. for (;;) {
  829. kaddr = kmap_atomic(bh->b_page);
  830. su = nilfs_sufile_block_get_segment_usage(
  831. sufile, sup->sup_segnum, bh, kaddr);
  832. if (nilfs_suinfo_update_lastmod(sup))
  833. su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
  834. if (nilfs_suinfo_update_nblocks(sup))
  835. su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
  836. if (nilfs_suinfo_update_flags(sup)) {
  837. /*
  838. * Active flag is a virtual flag projected by running
  839. * nilfs kernel code - drop it not to write it to
  840. * disk.
  841. */
  842. sup->sup_sui.sui_flags &=
  843. ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  844. cleansi = nilfs_suinfo_clean(&sup->sup_sui);
  845. cleansu = nilfs_segment_usage_clean(su);
  846. dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
  847. dirtysu = nilfs_segment_usage_dirty(su);
  848. if (cleansi && !cleansu)
  849. ++ncleaned;
  850. else if (!cleansi && cleansu)
  851. --ncleaned;
  852. if (dirtysi && !dirtysu)
  853. ++ndirtied;
  854. else if (!dirtysi && dirtysu)
  855. --ndirtied;
  856. su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
  857. }
  858. kunmap_atomic(kaddr);
  859. sup = (void *)sup + supsz;
  860. if (sup >= supend)
  861. break;
  862. prev_blkoff = blkoff;
  863. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  864. if (blkoff == prev_blkoff)
  865. continue;
  866. /* get different block */
  867. mark_buffer_dirty(bh);
  868. put_bh(bh);
  869. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  870. if (unlikely(ret < 0))
  871. goto out_mark;
  872. }
  873. mark_buffer_dirty(bh);
  874. put_bh(bh);
  875. out_mark:
  876. if (ncleaned || ndirtied) {
  877. nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
  878. (u64)ndirtied);
  879. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  880. }
  881. nilfs_mdt_mark_dirty(sufile);
  882. out_header:
  883. put_bh(header_bh);
  884. out_sem:
  885. up_write(&NILFS_MDT(sufile)->mi_sem);
  886. return ret;
  887. }
  888. /**
  889. * nilfs_sufile_trim_fs() - trim ioctl handle function
  890. * @sufile: inode of segment usage file
  891. * @range: fstrim_range structure
  892. *
  893. * start: First Byte to trim
  894. * len: number of Bytes to trim from start
  895. * minlen: minimum extent length in Bytes
  896. *
  897. * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
  898. * from start to start+len. start is rounded up to the next block boundary
  899. * and start+len is rounded down. For each clean segment blkdev_issue_discard
  900. * function is invoked.
  901. *
  902. * Return Value: On success, 0 is returned or negative error code, otherwise.
  903. */
  904. int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
  905. {
  906. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  907. struct buffer_head *su_bh;
  908. struct nilfs_segment_usage *su;
  909. void *kaddr;
  910. size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
  911. sector_t seg_start, seg_end, start_block, end_block;
  912. sector_t start = 0, nblocks = 0;
  913. u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
  914. int ret = 0;
  915. unsigned int sects_per_block;
  916. sects_per_block = (1 << nilfs->ns_blocksize_bits) /
  917. bdev_logical_block_size(nilfs->ns_bdev);
  918. len = range->len >> nilfs->ns_blocksize_bits;
  919. minlen = range->minlen >> nilfs->ns_blocksize_bits;
  920. max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
  921. if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
  922. return -EINVAL;
  923. start_block = (range->start + nilfs->ns_blocksize - 1) >>
  924. nilfs->ns_blocksize_bits;
  925. /*
  926. * range->len can be very large (actually, it is set to
  927. * ULLONG_MAX by default) - truncate upper end of the range
  928. * carefully so as not to overflow.
  929. */
  930. if (max_blocks - start_block < len)
  931. end_block = max_blocks - 1;
  932. else
  933. end_block = start_block + len - 1;
  934. segnum = nilfs_get_segnum_of_block(nilfs, start_block);
  935. segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
  936. down_read(&NILFS_MDT(sufile)->mi_sem);
  937. while (segnum <= segnum_end) {
  938. n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
  939. segnum_end);
  940. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  941. &su_bh);
  942. if (ret < 0) {
  943. if (ret != -ENOENT)
  944. goto out_sem;
  945. /* hole */
  946. segnum += n;
  947. continue;
  948. }
  949. kaddr = kmap_atomic(su_bh->b_page);
  950. su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
  951. su_bh, kaddr);
  952. for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
  953. if (!nilfs_segment_usage_clean(su))
  954. continue;
  955. nilfs_get_segment_range(nilfs, segnum, &seg_start,
  956. &seg_end);
  957. if (!nblocks) {
  958. /* start new extent */
  959. start = seg_start;
  960. nblocks = seg_end - seg_start + 1;
  961. continue;
  962. }
  963. if (start + nblocks == seg_start) {
  964. /* add to previous extent */
  965. nblocks += seg_end - seg_start + 1;
  966. continue;
  967. }
  968. /* discard previous extent */
  969. if (start < start_block) {
  970. nblocks -= start_block - start;
  971. start = start_block;
  972. }
  973. if (nblocks >= minlen) {
  974. kunmap_atomic(kaddr);
  975. ret = blkdev_issue_discard(nilfs->ns_bdev,
  976. start * sects_per_block,
  977. nblocks * sects_per_block,
  978. GFP_NOFS, 0);
  979. if (ret < 0) {
  980. put_bh(su_bh);
  981. goto out_sem;
  982. }
  983. ndiscarded += nblocks;
  984. kaddr = kmap_atomic(su_bh->b_page);
  985. su = nilfs_sufile_block_get_segment_usage(
  986. sufile, segnum, su_bh, kaddr);
  987. }
  988. /* start new extent */
  989. start = seg_start;
  990. nblocks = seg_end - seg_start + 1;
  991. }
  992. kunmap_atomic(kaddr);
  993. put_bh(su_bh);
  994. }
  995. if (nblocks) {
  996. /* discard last extent */
  997. if (start < start_block) {
  998. nblocks -= start_block - start;
  999. start = start_block;
  1000. }
  1001. if (start + nblocks > end_block + 1)
  1002. nblocks = end_block - start + 1;
  1003. if (nblocks >= minlen) {
  1004. ret = blkdev_issue_discard(nilfs->ns_bdev,
  1005. start * sects_per_block,
  1006. nblocks * sects_per_block,
  1007. GFP_NOFS, 0);
  1008. if (!ret)
  1009. ndiscarded += nblocks;
  1010. }
  1011. }
  1012. out_sem:
  1013. up_read(&NILFS_MDT(sufile)->mi_sem);
  1014. range->len = ndiscarded << nilfs->ns_blocksize_bits;
  1015. return ret;
  1016. }
  1017. /**
  1018. * nilfs_sufile_read - read or get sufile inode
  1019. * @sb: super block instance
  1020. * @susize: size of a segment usage entry
  1021. * @raw_inode: on-disk sufile inode
  1022. * @inodep: buffer to store the inode
  1023. */
  1024. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  1025. struct nilfs_inode *raw_inode, struct inode **inodep)
  1026. {
  1027. struct inode *sufile;
  1028. struct nilfs_sufile_info *sui;
  1029. struct buffer_head *header_bh;
  1030. struct nilfs_sufile_header *header;
  1031. void *kaddr;
  1032. int err;
  1033. if (susize > sb->s_blocksize) {
  1034. nilfs_msg(sb, KERN_ERR,
  1035. "too large segment usage size: %zu bytes", susize);
  1036. return -EINVAL;
  1037. } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
  1038. nilfs_msg(sb, KERN_ERR,
  1039. "too small segment usage size: %zu bytes", susize);
  1040. return -EINVAL;
  1041. }
  1042. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  1043. if (unlikely(!sufile))
  1044. return -ENOMEM;
  1045. if (!(sufile->i_state & I_NEW))
  1046. goto out;
  1047. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  1048. if (err)
  1049. goto failed;
  1050. nilfs_mdt_set_entry_size(sufile, susize,
  1051. sizeof(struct nilfs_sufile_header));
  1052. err = nilfs_read_inode_common(sufile, raw_inode);
  1053. if (err)
  1054. goto failed;
  1055. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  1056. if (err)
  1057. goto failed;
  1058. sui = NILFS_SUI(sufile);
  1059. kaddr = kmap_atomic(header_bh->b_page);
  1060. header = kaddr + bh_offset(header_bh);
  1061. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  1062. kunmap_atomic(kaddr);
  1063. brelse(header_bh);
  1064. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  1065. sui->allocmin = 0;
  1066. unlock_new_inode(sufile);
  1067. out:
  1068. *inodep = sufile;
  1069. return 0;
  1070. failed:
  1071. iget_failed(sufile);
  1072. return err;
  1073. }