sufile.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Koji Sato <koji@osrg.net>.
  21. * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/string.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/errno.h>
  28. #include <linux/nilfs2_fs.h>
  29. #include "mdt.h"
  30. #include "sufile.h"
  31. struct nilfs_sufile_info {
  32. struct nilfs_mdt_info mi;
  33. unsigned long ncleansegs;/* number of clean segments */
  34. __u64 allocmin; /* lower limit of allocatable segment range */
  35. __u64 allocmax; /* upper limit of allocatable segment range */
  36. };
  37. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  38. {
  39. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  40. }
  41. static inline unsigned long
  42. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  43. {
  44. return NILFS_MDT(sufile)->mi_entries_per_block;
  45. }
  46. static unsigned long
  47. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  48. {
  49. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  50. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  51. return (unsigned long)t;
  52. }
  53. static unsigned long
  54. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  55. {
  56. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  57. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  58. }
  59. static unsigned long
  60. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  61. __u64 max)
  62. {
  63. return min_t(unsigned long,
  64. nilfs_sufile_segment_usages_per_block(sufile) -
  65. nilfs_sufile_get_offset(sufile, curr),
  66. max - curr + 1);
  67. }
  68. static struct nilfs_segment_usage *
  69. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  70. struct buffer_head *bh, void *kaddr)
  71. {
  72. return kaddr + bh_offset(bh) +
  73. nilfs_sufile_get_offset(sufile, segnum) *
  74. NILFS_MDT(sufile)->mi_entry_size;
  75. }
  76. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  77. struct buffer_head **bhp)
  78. {
  79. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  80. }
  81. static inline int
  82. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  83. int create, struct buffer_head **bhp)
  84. {
  85. return nilfs_mdt_get_block(sufile,
  86. nilfs_sufile_get_blkoff(sufile, segnum),
  87. create, NULL, bhp);
  88. }
  89. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  90. __u64 segnum)
  91. {
  92. return nilfs_mdt_delete_block(sufile,
  93. nilfs_sufile_get_blkoff(sufile, segnum));
  94. }
  95. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  96. u64 ncleanadd, u64 ndirtyadd)
  97. {
  98. struct nilfs_sufile_header *header;
  99. void *kaddr;
  100. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  101. header = kaddr + bh_offset(header_bh);
  102. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  103. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  104. kunmap_atomic(kaddr, KM_USER0);
  105. mark_buffer_dirty(header_bh);
  106. }
  107. /**
  108. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  109. * @sufile: inode of segment usage file
  110. */
  111. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  112. {
  113. return NILFS_SUI(sufile)->ncleansegs;
  114. }
  115. /**
  116. * nilfs_sufile_updatev - modify multiple segment usages at a time
  117. * @sufile: inode of segment usage file
  118. * @segnumv: array of segment numbers
  119. * @nsegs: size of @segnumv array
  120. * @create: creation flag
  121. * @ndone: place to store number of modified segments on @segnumv
  122. * @dofunc: primitive operation for the update
  123. *
  124. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  125. * against the given array of segments. The @dofunc is called with
  126. * buffers of a header block and the sufile block in which the target
  127. * segment usage entry is contained. If @ndone is given, the number
  128. * of successfully modified segments from the head is stored in the
  129. * place @ndone points to.
  130. *
  131. * Return Value: On success, zero is returned. On error, one of the
  132. * following negative error codes is returned.
  133. *
  134. * %-EIO - I/O error.
  135. *
  136. * %-ENOMEM - Insufficient amount of memory available.
  137. *
  138. * %-ENOENT - Given segment usage is in hole block (may be returned if
  139. * @create is zero)
  140. *
  141. * %-EINVAL - Invalid segment usage number
  142. */
  143. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  144. int create, size_t *ndone,
  145. void (*dofunc)(struct inode *, __u64,
  146. struct buffer_head *,
  147. struct buffer_head *))
  148. {
  149. struct buffer_head *header_bh, *bh;
  150. unsigned long blkoff, prev_blkoff;
  151. __u64 *seg;
  152. size_t nerr = 0, n = 0;
  153. int ret = 0;
  154. if (unlikely(nsegs == 0))
  155. goto out;
  156. down_write(&NILFS_MDT(sufile)->mi_sem);
  157. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  158. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  159. printk(KERN_WARNING
  160. "%s: invalid segment number: %llu\n", __func__,
  161. (unsigned long long)*seg);
  162. nerr++;
  163. }
  164. }
  165. if (nerr > 0) {
  166. ret = -EINVAL;
  167. goto out_sem;
  168. }
  169. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  170. if (ret < 0)
  171. goto out_sem;
  172. seg = segnumv;
  173. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  174. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  175. if (ret < 0)
  176. goto out_header;
  177. for (;;) {
  178. dofunc(sufile, *seg, header_bh, bh);
  179. if (++seg >= segnumv + nsegs)
  180. break;
  181. prev_blkoff = blkoff;
  182. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  183. if (blkoff == prev_blkoff)
  184. continue;
  185. /* get different block */
  186. brelse(bh);
  187. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  188. if (unlikely(ret < 0))
  189. goto out_header;
  190. }
  191. brelse(bh);
  192. out_header:
  193. n = seg - segnumv;
  194. brelse(header_bh);
  195. out_sem:
  196. up_write(&NILFS_MDT(sufile)->mi_sem);
  197. out:
  198. if (ndone)
  199. *ndone = n;
  200. return ret;
  201. }
  202. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  203. void (*dofunc)(struct inode *, __u64,
  204. struct buffer_head *,
  205. struct buffer_head *))
  206. {
  207. struct buffer_head *header_bh, *bh;
  208. int ret;
  209. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  210. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  211. __func__, (unsigned long long)segnum);
  212. return -EINVAL;
  213. }
  214. down_write(&NILFS_MDT(sufile)->mi_sem);
  215. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  216. if (ret < 0)
  217. goto out_sem;
  218. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  219. if (!ret) {
  220. dofunc(sufile, segnum, header_bh, bh);
  221. brelse(bh);
  222. }
  223. brelse(header_bh);
  224. out_sem:
  225. up_write(&NILFS_MDT(sufile)->mi_sem);
  226. return ret;
  227. }
  228. /**
  229. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  230. * @sufile: inode of segment usage file
  231. * @start: minimum segment number of allocatable region (inclusive)
  232. * @end: maximum segment number of allocatable region (inclusive)
  233. *
  234. * Return Value: On success, 0 is returned. On error, one of the
  235. * following negative error codes is returned.
  236. *
  237. * %-ERANGE - invalid segment region
  238. */
  239. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  240. {
  241. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  242. __u64 nsegs;
  243. int ret = -ERANGE;
  244. down_write(&NILFS_MDT(sufile)->mi_sem);
  245. nsegs = nilfs_sufile_get_nsegments(sufile);
  246. if (start <= end && end < nsegs) {
  247. sui->allocmin = start;
  248. sui->allocmax = end;
  249. ret = 0;
  250. }
  251. up_write(&NILFS_MDT(sufile)->mi_sem);
  252. return ret;
  253. }
  254. /**
  255. * nilfs_sufile_alloc - allocate a segment
  256. * @sufile: inode of segment usage file
  257. * @segnump: pointer to segment number
  258. *
  259. * Description: nilfs_sufile_alloc() allocates a clean segment.
  260. *
  261. * Return Value: On success, 0 is returned and the segment number of the
  262. * allocated segment is stored in the place pointed by @segnump. On error, one
  263. * of the following negative error codes is returned.
  264. *
  265. * %-EIO - I/O error.
  266. *
  267. * %-ENOMEM - Insufficient amount of memory available.
  268. *
  269. * %-ENOSPC - No clean segment left.
  270. */
  271. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  272. {
  273. struct buffer_head *header_bh, *su_bh;
  274. struct nilfs_sufile_header *header;
  275. struct nilfs_segment_usage *su;
  276. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  277. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  278. __u64 segnum, maxsegnum, last_alloc;
  279. void *kaddr;
  280. unsigned long nsegments, ncleansegs, nsus, cnt;
  281. int ret, j;
  282. down_write(&NILFS_MDT(sufile)->mi_sem);
  283. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  284. if (ret < 0)
  285. goto out_sem;
  286. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  287. header = kaddr + bh_offset(header_bh);
  288. ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  289. last_alloc = le64_to_cpu(header->sh_last_alloc);
  290. kunmap_atomic(kaddr, KM_USER0);
  291. nsegments = nilfs_sufile_get_nsegments(sufile);
  292. maxsegnum = sui->allocmax;
  293. segnum = last_alloc + 1;
  294. if (segnum < sui->allocmin || segnum > sui->allocmax)
  295. segnum = sui->allocmin;
  296. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  297. if (segnum > maxsegnum) {
  298. if (cnt < sui->allocmax - sui->allocmin + 1) {
  299. /*
  300. * wrap around in the limited region.
  301. * if allocation started from
  302. * sui->allocmin, this never happens.
  303. */
  304. segnum = sui->allocmin;
  305. maxsegnum = last_alloc;
  306. } else if (segnum > sui->allocmin &&
  307. sui->allocmax + 1 < nsegments) {
  308. segnum = sui->allocmax + 1;
  309. maxsegnum = nsegments - 1;
  310. } else if (sui->allocmin > 0) {
  311. segnum = 0;
  312. maxsegnum = sui->allocmin - 1;
  313. } else {
  314. break; /* never happens */
  315. }
  316. }
  317. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  318. &su_bh);
  319. if (ret < 0)
  320. goto out_header;
  321. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  322. su = nilfs_sufile_block_get_segment_usage(
  323. sufile, segnum, su_bh, kaddr);
  324. nsus = nilfs_sufile_segment_usages_in_block(
  325. sufile, segnum, maxsegnum);
  326. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  327. if (!nilfs_segment_usage_clean(su))
  328. continue;
  329. /* found a clean segment */
  330. nilfs_segment_usage_set_dirty(su);
  331. kunmap_atomic(kaddr, KM_USER0);
  332. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  333. header = kaddr + bh_offset(header_bh);
  334. le64_add_cpu(&header->sh_ncleansegs, -1);
  335. le64_add_cpu(&header->sh_ndirtysegs, 1);
  336. header->sh_last_alloc = cpu_to_le64(segnum);
  337. kunmap_atomic(kaddr, KM_USER0);
  338. sui->ncleansegs--;
  339. mark_buffer_dirty(header_bh);
  340. mark_buffer_dirty(su_bh);
  341. nilfs_mdt_mark_dirty(sufile);
  342. brelse(su_bh);
  343. *segnump = segnum;
  344. goto out_header;
  345. }
  346. kunmap_atomic(kaddr, KM_USER0);
  347. brelse(su_bh);
  348. }
  349. /* no segments left */
  350. ret = -ENOSPC;
  351. out_header:
  352. brelse(header_bh);
  353. out_sem:
  354. up_write(&NILFS_MDT(sufile)->mi_sem);
  355. return ret;
  356. }
  357. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  358. struct buffer_head *header_bh,
  359. struct buffer_head *su_bh)
  360. {
  361. struct nilfs_segment_usage *su;
  362. void *kaddr;
  363. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  364. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  365. if (unlikely(!nilfs_segment_usage_clean(su))) {
  366. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  367. __func__, (unsigned long long)segnum);
  368. kunmap_atomic(kaddr, KM_USER0);
  369. return;
  370. }
  371. nilfs_segment_usage_set_dirty(su);
  372. kunmap_atomic(kaddr, KM_USER0);
  373. nilfs_sufile_mod_counter(header_bh, -1, 1);
  374. NILFS_SUI(sufile)->ncleansegs--;
  375. mark_buffer_dirty(su_bh);
  376. nilfs_mdt_mark_dirty(sufile);
  377. }
  378. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  379. struct buffer_head *header_bh,
  380. struct buffer_head *su_bh)
  381. {
  382. struct nilfs_segment_usage *su;
  383. void *kaddr;
  384. int clean, dirty;
  385. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  386. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  387. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  388. su->su_nblocks == cpu_to_le32(0)) {
  389. kunmap_atomic(kaddr, KM_USER0);
  390. return;
  391. }
  392. clean = nilfs_segment_usage_clean(su);
  393. dirty = nilfs_segment_usage_dirty(su);
  394. /* make the segment garbage */
  395. su->su_lastmod = cpu_to_le64(0);
  396. su->su_nblocks = cpu_to_le32(0);
  397. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  398. kunmap_atomic(kaddr, KM_USER0);
  399. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  400. NILFS_SUI(sufile)->ncleansegs -= clean;
  401. mark_buffer_dirty(su_bh);
  402. nilfs_mdt_mark_dirty(sufile);
  403. }
  404. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  405. struct buffer_head *header_bh,
  406. struct buffer_head *su_bh)
  407. {
  408. struct nilfs_segment_usage *su;
  409. void *kaddr;
  410. int sudirty;
  411. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  412. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  413. if (nilfs_segment_usage_clean(su)) {
  414. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  415. __func__, (unsigned long long)segnum);
  416. kunmap_atomic(kaddr, KM_USER0);
  417. return;
  418. }
  419. WARN_ON(nilfs_segment_usage_error(su));
  420. WARN_ON(!nilfs_segment_usage_dirty(su));
  421. sudirty = nilfs_segment_usage_dirty(su);
  422. nilfs_segment_usage_set_clean(su);
  423. kunmap_atomic(kaddr, KM_USER0);
  424. mark_buffer_dirty(su_bh);
  425. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  426. NILFS_SUI(sufile)->ncleansegs++;
  427. nilfs_mdt_mark_dirty(sufile);
  428. }
  429. /**
  430. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  431. * @sufile: inode of segment usage file
  432. * @segnum: segment number
  433. */
  434. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  435. {
  436. struct buffer_head *bh;
  437. int ret;
  438. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  439. if (!ret) {
  440. mark_buffer_dirty(bh);
  441. nilfs_mdt_mark_dirty(sufile);
  442. brelse(bh);
  443. }
  444. return ret;
  445. }
  446. /**
  447. * nilfs_sufile_set_segment_usage - set usage of a segment
  448. * @sufile: inode of segment usage file
  449. * @segnum: segment number
  450. * @nblocks: number of live blocks in the segment
  451. * @modtime: modification time (option)
  452. */
  453. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  454. unsigned long nblocks, time_t modtime)
  455. {
  456. struct buffer_head *bh;
  457. struct nilfs_segment_usage *su;
  458. void *kaddr;
  459. int ret;
  460. down_write(&NILFS_MDT(sufile)->mi_sem);
  461. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  462. if (ret < 0)
  463. goto out_sem;
  464. kaddr = kmap_atomic(bh->b_page, KM_USER0);
  465. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  466. WARN_ON(nilfs_segment_usage_error(su));
  467. if (modtime)
  468. su->su_lastmod = cpu_to_le64(modtime);
  469. su->su_nblocks = cpu_to_le32(nblocks);
  470. kunmap_atomic(kaddr, KM_USER0);
  471. mark_buffer_dirty(bh);
  472. nilfs_mdt_mark_dirty(sufile);
  473. brelse(bh);
  474. out_sem:
  475. up_write(&NILFS_MDT(sufile)->mi_sem);
  476. return ret;
  477. }
  478. /**
  479. * nilfs_sufile_get_stat - get segment usage statistics
  480. * @sufile: inode of segment usage file
  481. * @stat: pointer to a structure of segment usage statistics
  482. *
  483. * Description: nilfs_sufile_get_stat() returns information about segment
  484. * usage.
  485. *
  486. * Return Value: On success, 0 is returned, and segment usage information is
  487. * stored in the place pointed by @stat. On error, one of the following
  488. * negative error codes is returned.
  489. *
  490. * %-EIO - I/O error.
  491. *
  492. * %-ENOMEM - Insufficient amount of memory available.
  493. */
  494. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  495. {
  496. struct buffer_head *header_bh;
  497. struct nilfs_sufile_header *header;
  498. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  499. void *kaddr;
  500. int ret;
  501. down_read(&NILFS_MDT(sufile)->mi_sem);
  502. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  503. if (ret < 0)
  504. goto out_sem;
  505. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  506. header = kaddr + bh_offset(header_bh);
  507. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  508. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  509. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  510. sustat->ss_ctime = nilfs->ns_ctime;
  511. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  512. spin_lock(&nilfs->ns_last_segment_lock);
  513. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  514. spin_unlock(&nilfs->ns_last_segment_lock);
  515. kunmap_atomic(kaddr, KM_USER0);
  516. brelse(header_bh);
  517. out_sem:
  518. up_read(&NILFS_MDT(sufile)->mi_sem);
  519. return ret;
  520. }
  521. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  522. struct buffer_head *header_bh,
  523. struct buffer_head *su_bh)
  524. {
  525. struct nilfs_segment_usage *su;
  526. void *kaddr;
  527. int suclean;
  528. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  529. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  530. if (nilfs_segment_usage_error(su)) {
  531. kunmap_atomic(kaddr, KM_USER0);
  532. return;
  533. }
  534. suclean = nilfs_segment_usage_clean(su);
  535. nilfs_segment_usage_set_error(su);
  536. kunmap_atomic(kaddr, KM_USER0);
  537. if (suclean) {
  538. nilfs_sufile_mod_counter(header_bh, -1, 0);
  539. NILFS_SUI(sufile)->ncleansegs--;
  540. }
  541. mark_buffer_dirty(su_bh);
  542. nilfs_mdt_mark_dirty(sufile);
  543. }
  544. /**
  545. * nilfs_sufile_truncate_range - truncate range of segment array
  546. * @sufile: inode of segment usage file
  547. * @start: start segment number (inclusive)
  548. * @end: end segment number (inclusive)
  549. *
  550. * Return Value: On success, 0 is returned. On error, one of the
  551. * following negative error codes is returned.
  552. *
  553. * %-EIO - I/O error.
  554. *
  555. * %-ENOMEM - Insufficient amount of memory available.
  556. *
  557. * %-EINVAL - Invalid number of segments specified
  558. *
  559. * %-EBUSY - Dirty or active segments are present in the range
  560. */
  561. static int nilfs_sufile_truncate_range(struct inode *sufile,
  562. __u64 start, __u64 end)
  563. {
  564. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  565. struct buffer_head *header_bh;
  566. struct buffer_head *su_bh;
  567. struct nilfs_segment_usage *su, *su2;
  568. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  569. unsigned long segusages_per_block;
  570. unsigned long nsegs, ncleaned;
  571. __u64 segnum;
  572. void *kaddr;
  573. ssize_t n, nc;
  574. int ret;
  575. int j;
  576. nsegs = nilfs_sufile_get_nsegments(sufile);
  577. ret = -EINVAL;
  578. if (start > end || start >= nsegs)
  579. goto out;
  580. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  581. if (ret < 0)
  582. goto out;
  583. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  584. ncleaned = 0;
  585. for (segnum = start; segnum <= end; segnum += n) {
  586. n = min_t(unsigned long,
  587. segusages_per_block -
  588. nilfs_sufile_get_offset(sufile, segnum),
  589. end - segnum + 1);
  590. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  591. &su_bh);
  592. if (ret < 0) {
  593. if (ret != -ENOENT)
  594. goto out_header;
  595. /* hole */
  596. continue;
  597. }
  598. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  599. su = nilfs_sufile_block_get_segment_usage(
  600. sufile, segnum, su_bh, kaddr);
  601. su2 = su;
  602. for (j = 0; j < n; j++, su = (void *)su + susz) {
  603. if ((le32_to_cpu(su->su_flags) &
  604. ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
  605. nilfs_segment_is_active(nilfs, segnum + j)) {
  606. ret = -EBUSY;
  607. kunmap_atomic(kaddr, KM_USER0);
  608. brelse(su_bh);
  609. goto out_header;
  610. }
  611. }
  612. nc = 0;
  613. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  614. if (nilfs_segment_usage_error(su)) {
  615. nilfs_segment_usage_set_clean(su);
  616. nc++;
  617. }
  618. }
  619. kunmap_atomic(kaddr, KM_USER0);
  620. if (nc > 0) {
  621. mark_buffer_dirty(su_bh);
  622. ncleaned += nc;
  623. }
  624. brelse(su_bh);
  625. if (n == segusages_per_block) {
  626. /* make hole */
  627. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  628. }
  629. }
  630. ret = 0;
  631. out_header:
  632. if (ncleaned > 0) {
  633. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  634. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  635. nilfs_mdt_mark_dirty(sufile);
  636. }
  637. brelse(header_bh);
  638. out:
  639. return ret;
  640. }
  641. /**
  642. * nilfs_sufile_resize - resize segment array
  643. * @sufile: inode of segment usage file
  644. * @newnsegs: new number of segments
  645. *
  646. * Return Value: On success, 0 is returned. On error, one of the
  647. * following negative error codes is returned.
  648. *
  649. * %-EIO - I/O error.
  650. *
  651. * %-ENOMEM - Insufficient amount of memory available.
  652. *
  653. * %-ENOSPC - Enough free space is not left for shrinking
  654. *
  655. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  656. */
  657. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  658. {
  659. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  660. struct buffer_head *header_bh;
  661. struct nilfs_sufile_header *header;
  662. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  663. void *kaddr;
  664. unsigned long nsegs, nrsvsegs;
  665. int ret = 0;
  666. down_write(&NILFS_MDT(sufile)->mi_sem);
  667. nsegs = nilfs_sufile_get_nsegments(sufile);
  668. if (nsegs == newnsegs)
  669. goto out;
  670. ret = -ENOSPC;
  671. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  672. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  673. goto out;
  674. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  675. if (ret < 0)
  676. goto out;
  677. if (newnsegs > nsegs) {
  678. sui->ncleansegs += newnsegs - nsegs;
  679. } else /* newnsegs < nsegs */ {
  680. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  681. if (ret < 0)
  682. goto out_header;
  683. sui->ncleansegs -= nsegs - newnsegs;
  684. }
  685. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  686. header = kaddr + bh_offset(header_bh);
  687. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  688. kunmap_atomic(kaddr, KM_USER0);
  689. mark_buffer_dirty(header_bh);
  690. nilfs_mdt_mark_dirty(sufile);
  691. nilfs_set_nsegments(nilfs, newnsegs);
  692. out_header:
  693. brelse(header_bh);
  694. out:
  695. up_write(&NILFS_MDT(sufile)->mi_sem);
  696. return ret;
  697. }
  698. /**
  699. * nilfs_sufile_get_suinfo -
  700. * @sufile: inode of segment usage file
  701. * @segnum: segment number to start looking
  702. * @buf: array of suinfo
  703. * @sisz: byte size of suinfo
  704. * @nsi: size of suinfo array
  705. *
  706. * Description:
  707. *
  708. * Return Value: On success, 0 is returned and .... On error, one of the
  709. * following negative error codes is returned.
  710. *
  711. * %-EIO - I/O error.
  712. *
  713. * %-ENOMEM - Insufficient amount of memory available.
  714. */
  715. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  716. unsigned sisz, size_t nsi)
  717. {
  718. struct buffer_head *su_bh;
  719. struct nilfs_segment_usage *su;
  720. struct nilfs_suinfo *si = buf;
  721. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  722. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  723. void *kaddr;
  724. unsigned long nsegs, segusages_per_block;
  725. ssize_t n;
  726. int ret, i, j;
  727. down_read(&NILFS_MDT(sufile)->mi_sem);
  728. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  729. nsegs = min_t(unsigned long,
  730. nilfs_sufile_get_nsegments(sufile) - segnum,
  731. nsi);
  732. for (i = 0; i < nsegs; i += n, segnum += n) {
  733. n = min_t(unsigned long,
  734. segusages_per_block -
  735. nilfs_sufile_get_offset(sufile, segnum),
  736. nsegs - i);
  737. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  738. &su_bh);
  739. if (ret < 0) {
  740. if (ret != -ENOENT)
  741. goto out;
  742. /* hole */
  743. memset(si, 0, sisz * n);
  744. si = (void *)si + sisz * n;
  745. continue;
  746. }
  747. kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
  748. su = nilfs_sufile_block_get_segment_usage(
  749. sufile, segnum, su_bh, kaddr);
  750. for (j = 0; j < n;
  751. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  752. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  753. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  754. si->sui_flags = le32_to_cpu(su->su_flags) &
  755. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  756. if (nilfs_segment_is_active(nilfs, segnum + j))
  757. si->sui_flags |=
  758. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  759. }
  760. kunmap_atomic(kaddr, KM_USER0);
  761. brelse(su_bh);
  762. }
  763. ret = nsegs;
  764. out:
  765. up_read(&NILFS_MDT(sufile)->mi_sem);
  766. return ret;
  767. }
  768. /**
  769. * nilfs_sufile_read - read or get sufile inode
  770. * @sb: super block instance
  771. * @susize: size of a segment usage entry
  772. * @raw_inode: on-disk sufile inode
  773. * @inodep: buffer to store the inode
  774. */
  775. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  776. struct nilfs_inode *raw_inode, struct inode **inodep)
  777. {
  778. struct inode *sufile;
  779. struct nilfs_sufile_info *sui;
  780. struct buffer_head *header_bh;
  781. struct nilfs_sufile_header *header;
  782. void *kaddr;
  783. int err;
  784. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  785. if (unlikely(!sufile))
  786. return -ENOMEM;
  787. if (!(sufile->i_state & I_NEW))
  788. goto out;
  789. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  790. if (err)
  791. goto failed;
  792. nilfs_mdt_set_entry_size(sufile, susize,
  793. sizeof(struct nilfs_sufile_header));
  794. err = nilfs_read_inode_common(sufile, raw_inode);
  795. if (err)
  796. goto failed;
  797. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  798. if (err)
  799. goto failed;
  800. sui = NILFS_SUI(sufile);
  801. kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
  802. header = kaddr + bh_offset(header_bh);
  803. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  804. kunmap_atomic(kaddr, KM_USER0);
  805. brelse(header_bh);
  806. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  807. sui->allocmin = 0;
  808. unlock_new_inode(sufile);
  809. out:
  810. *inodep = sufile;
  811. return 0;
  812. failed:
  813. iget_failed(sufile);
  814. return err;
  815. }