quota_tree.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * vfsv0 quota IO operations on file
  3. */
  4. #include <linux/errno.h>
  5. #include <linux/fs.h>
  6. #include <linux/mount.h>
  7. #include <linux/dqblk_v2.h>
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/quotaops.h>
  13. #include <asm/byteorder.h>
  14. #include "quota_tree.h"
  15. MODULE_AUTHOR("Jan Kara");
  16. MODULE_DESCRIPTION("Quota trie support");
  17. MODULE_LICENSE("GPL");
  18. #define __QUOTA_QT_PARANOIA
  19. static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
  20. {
  21. unsigned int epb = info->dqi_usable_bs >> 2;
  22. depth = info->dqi_qtree_depth - depth - 1;
  23. while (depth--)
  24. id /= epb;
  25. return id % epb;
  26. }
  27. /* Number of entries in one blocks */
  28. static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
  29. {
  30. return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
  31. / info->dqi_entry_size;
  32. }
  33. static char *getdqbuf(size_t size)
  34. {
  35. char *buf = kmalloc(size, GFP_NOFS);
  36. if (!buf)
  37. printk(KERN_WARNING
  38. "VFS: Not enough memory for quota buffers.\n");
  39. return buf;
  40. }
  41. static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  42. {
  43. struct super_block *sb = info->dqi_sb;
  44. memset(buf, 0, info->dqi_usable_bs);
  45. return sb->s_op->quota_read(sb, info->dqi_type, buf,
  46. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  47. }
  48. static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  49. {
  50. struct super_block *sb = info->dqi_sb;
  51. ssize_t ret;
  52. ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
  53. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  54. if (ret != info->dqi_usable_bs) {
  55. quota_error(sb, "dquota write failed");
  56. if (ret >= 0)
  57. ret = -EIO;
  58. }
  59. return ret;
  60. }
  61. /* Remove empty block from list and return it */
  62. static int get_free_dqblk(struct qtree_mem_dqinfo *info)
  63. {
  64. char *buf = getdqbuf(info->dqi_usable_bs);
  65. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  66. int ret, blk;
  67. if (!buf)
  68. return -ENOMEM;
  69. if (info->dqi_free_blk) {
  70. blk = info->dqi_free_blk;
  71. ret = read_blk(info, blk, buf);
  72. if (ret < 0)
  73. goto out_buf;
  74. info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
  75. }
  76. else {
  77. memset(buf, 0, info->dqi_usable_bs);
  78. /* Assure block allocation... */
  79. ret = write_blk(info, info->dqi_blocks, buf);
  80. if (ret < 0)
  81. goto out_buf;
  82. blk = info->dqi_blocks++;
  83. }
  84. mark_info_dirty(info->dqi_sb, info->dqi_type);
  85. ret = blk;
  86. out_buf:
  87. kfree(buf);
  88. return ret;
  89. }
  90. /* Insert empty block to the list */
  91. static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
  92. {
  93. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  94. int err;
  95. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
  96. dh->dqdh_prev_free = cpu_to_le32(0);
  97. dh->dqdh_entries = cpu_to_le16(0);
  98. err = write_blk(info, blk, buf);
  99. if (err < 0)
  100. return err;
  101. info->dqi_free_blk = blk;
  102. mark_info_dirty(info->dqi_sb, info->dqi_type);
  103. return 0;
  104. }
  105. /* Remove given block from the list of blocks with free entries */
  106. static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  107. uint blk)
  108. {
  109. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  110. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  111. uint nextblk = le32_to_cpu(dh->dqdh_next_free);
  112. uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
  113. int err;
  114. if (!tmpbuf)
  115. return -ENOMEM;
  116. if (nextblk) {
  117. err = read_blk(info, nextblk, tmpbuf);
  118. if (err < 0)
  119. goto out_buf;
  120. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  121. dh->dqdh_prev_free;
  122. err = write_blk(info, nextblk, tmpbuf);
  123. if (err < 0)
  124. goto out_buf;
  125. }
  126. if (prevblk) {
  127. err = read_blk(info, prevblk, tmpbuf);
  128. if (err < 0)
  129. goto out_buf;
  130. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
  131. dh->dqdh_next_free;
  132. err = write_blk(info, prevblk, tmpbuf);
  133. if (err < 0)
  134. goto out_buf;
  135. } else {
  136. info->dqi_free_entry = nextblk;
  137. mark_info_dirty(info->dqi_sb, info->dqi_type);
  138. }
  139. kfree(tmpbuf);
  140. dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
  141. /* No matter whether write succeeds block is out of list */
  142. if (write_blk(info, blk, buf) < 0)
  143. quota_error(info->dqi_sb, "Can't write block (%u) "
  144. "with free entries", blk);
  145. return 0;
  146. out_buf:
  147. kfree(tmpbuf);
  148. return err;
  149. }
  150. /* Insert given block to the beginning of list with free entries */
  151. static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  152. uint blk)
  153. {
  154. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  155. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  156. int err;
  157. if (!tmpbuf)
  158. return -ENOMEM;
  159. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
  160. dh->dqdh_prev_free = cpu_to_le32(0);
  161. err = write_blk(info, blk, buf);
  162. if (err < 0)
  163. goto out_buf;
  164. if (info->dqi_free_entry) {
  165. err = read_blk(info, info->dqi_free_entry, tmpbuf);
  166. if (err < 0)
  167. goto out_buf;
  168. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  169. cpu_to_le32(blk);
  170. err = write_blk(info, info->dqi_free_entry, tmpbuf);
  171. if (err < 0)
  172. goto out_buf;
  173. }
  174. kfree(tmpbuf);
  175. info->dqi_free_entry = blk;
  176. mark_info_dirty(info->dqi_sb, info->dqi_type);
  177. return 0;
  178. out_buf:
  179. kfree(tmpbuf);
  180. return err;
  181. }
  182. /* Is the entry in the block free? */
  183. int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
  184. {
  185. int i;
  186. for (i = 0; i < info->dqi_entry_size; i++)
  187. if (disk[i])
  188. return 0;
  189. return 1;
  190. }
  191. EXPORT_SYMBOL(qtree_entry_unused);
  192. /* Find space for dquot */
  193. static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
  194. struct dquot *dquot, int *err)
  195. {
  196. uint blk, i;
  197. struct qt_disk_dqdbheader *dh;
  198. char *buf = getdqbuf(info->dqi_usable_bs);
  199. char *ddquot;
  200. *err = 0;
  201. if (!buf) {
  202. *err = -ENOMEM;
  203. return 0;
  204. }
  205. dh = (struct qt_disk_dqdbheader *)buf;
  206. if (info->dqi_free_entry) {
  207. blk = info->dqi_free_entry;
  208. *err = read_blk(info, blk, buf);
  209. if (*err < 0)
  210. goto out_buf;
  211. } else {
  212. blk = get_free_dqblk(info);
  213. if ((int)blk < 0) {
  214. *err = blk;
  215. kfree(buf);
  216. return 0;
  217. }
  218. memset(buf, 0, info->dqi_usable_bs);
  219. /* This is enough as the block is already zeroed and the entry
  220. * list is empty... */
  221. info->dqi_free_entry = blk;
  222. mark_info_dirty(dquot->dq_sb, dquot->dq_type);
  223. }
  224. /* Block will be full? */
  225. if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
  226. *err = remove_free_dqentry(info, buf, blk);
  227. if (*err < 0) {
  228. quota_error(dquot->dq_sb, "Can't remove block (%u) "
  229. "from entry free list", blk);
  230. goto out_buf;
  231. }
  232. }
  233. le16_add_cpu(&dh->dqdh_entries, 1);
  234. /* Find free structure in block */
  235. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  236. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  237. if (qtree_entry_unused(info, ddquot))
  238. break;
  239. ddquot += info->dqi_entry_size;
  240. }
  241. #ifdef __QUOTA_QT_PARANOIA
  242. if (i == qtree_dqstr_in_blk(info)) {
  243. quota_error(dquot->dq_sb, "Data block full but it shouldn't");
  244. *err = -EIO;
  245. goto out_buf;
  246. }
  247. #endif
  248. *err = write_blk(info, blk, buf);
  249. if (*err < 0) {
  250. quota_error(dquot->dq_sb, "Can't write quota data block %u",
  251. blk);
  252. goto out_buf;
  253. }
  254. dquot->dq_off = (blk << info->dqi_blocksize_bits) +
  255. sizeof(struct qt_disk_dqdbheader) +
  256. i * info->dqi_entry_size;
  257. kfree(buf);
  258. return blk;
  259. out_buf:
  260. kfree(buf);
  261. return 0;
  262. }
  263. /* Insert reference to structure into the trie */
  264. static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  265. uint *treeblk, int depth)
  266. {
  267. char *buf = getdqbuf(info->dqi_usable_bs);
  268. int ret = 0, newson = 0, newact = 0;
  269. __le32 *ref;
  270. uint newblk;
  271. if (!buf)
  272. return -ENOMEM;
  273. if (!*treeblk) {
  274. ret = get_free_dqblk(info);
  275. if (ret < 0)
  276. goto out_buf;
  277. *treeblk = ret;
  278. memset(buf, 0, info->dqi_usable_bs);
  279. newact = 1;
  280. } else {
  281. ret = read_blk(info, *treeblk, buf);
  282. if (ret < 0) {
  283. quota_error(dquot->dq_sb, "Can't read tree quota "
  284. "block %u", *treeblk);
  285. goto out_buf;
  286. }
  287. }
  288. ref = (__le32 *)buf;
  289. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  290. if (!newblk)
  291. newson = 1;
  292. if (depth == info->dqi_qtree_depth - 1) {
  293. #ifdef __QUOTA_QT_PARANOIA
  294. if (newblk) {
  295. quota_error(dquot->dq_sb, "Inserting already present "
  296. "quota entry (block %u)",
  297. le32_to_cpu(ref[get_index(info,
  298. dquot->dq_id, depth)]));
  299. ret = -EIO;
  300. goto out_buf;
  301. }
  302. #endif
  303. newblk = find_free_dqentry(info, dquot, &ret);
  304. } else {
  305. ret = do_insert_tree(info, dquot, &newblk, depth+1);
  306. }
  307. if (newson && ret >= 0) {
  308. ref[get_index(info, dquot->dq_id, depth)] =
  309. cpu_to_le32(newblk);
  310. ret = write_blk(info, *treeblk, buf);
  311. } else if (newact && ret < 0) {
  312. put_free_dqblk(info, buf, *treeblk);
  313. }
  314. out_buf:
  315. kfree(buf);
  316. return ret;
  317. }
  318. /* Wrapper for inserting quota structure into tree */
  319. static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  320. struct dquot *dquot)
  321. {
  322. int tmp = QT_TREEOFF;
  323. return do_insert_tree(info, dquot, &tmp, 0);
  324. }
  325. /*
  326. * We don't have to be afraid of deadlocks as we never have quotas on quota
  327. * files...
  328. */
  329. int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  330. {
  331. int type = dquot->dq_type;
  332. struct super_block *sb = dquot->dq_sb;
  333. ssize_t ret;
  334. char *ddquot = getdqbuf(info->dqi_entry_size);
  335. if (!ddquot)
  336. return -ENOMEM;
  337. /* dq_off is guarded by dqio_mutex */
  338. if (!dquot->dq_off) {
  339. ret = dq_insert_tree(info, dquot);
  340. if (ret < 0) {
  341. quota_error(sb, "Error %zd occurred while creating "
  342. "quota", ret);
  343. kfree(ddquot);
  344. return ret;
  345. }
  346. }
  347. spin_lock(&dq_data_lock);
  348. info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
  349. spin_unlock(&dq_data_lock);
  350. ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
  351. dquot->dq_off);
  352. if (ret != info->dqi_entry_size) {
  353. quota_error(sb, "dquota write failed");
  354. if (ret >= 0)
  355. ret = -ENOSPC;
  356. } else {
  357. ret = 0;
  358. }
  359. dqstats_inc(DQST_WRITES);
  360. kfree(ddquot);
  361. return ret;
  362. }
  363. EXPORT_SYMBOL(qtree_write_dquot);
  364. /* Free dquot entry in data block */
  365. static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  366. uint blk)
  367. {
  368. struct qt_disk_dqdbheader *dh;
  369. char *buf = getdqbuf(info->dqi_usable_bs);
  370. int ret = 0;
  371. if (!buf)
  372. return -ENOMEM;
  373. if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
  374. quota_error(dquot->dq_sb, "Quota structure has offset to "
  375. "other block (%u) than it should (%u)", blk,
  376. (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
  377. goto out_buf;
  378. }
  379. ret = read_blk(info, blk, buf);
  380. if (ret < 0) {
  381. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  382. blk);
  383. goto out_buf;
  384. }
  385. dh = (struct qt_disk_dqdbheader *)buf;
  386. le16_add_cpu(&dh->dqdh_entries, -1);
  387. if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
  388. ret = remove_free_dqentry(info, buf, blk);
  389. if (ret >= 0)
  390. ret = put_free_dqblk(info, buf, blk);
  391. if (ret < 0) {
  392. quota_error(dquot->dq_sb, "Can't move quota data block "
  393. "(%u) to free list", blk);
  394. goto out_buf;
  395. }
  396. } else {
  397. memset(buf +
  398. (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
  399. 0, info->dqi_entry_size);
  400. if (le16_to_cpu(dh->dqdh_entries) ==
  401. qtree_dqstr_in_blk(info) - 1) {
  402. /* Insert will write block itself */
  403. ret = insert_free_dqentry(info, buf, blk);
  404. if (ret < 0) {
  405. quota_error(dquot->dq_sb, "Can't insert quota "
  406. "data block (%u) to free entry list", blk);
  407. goto out_buf;
  408. }
  409. } else {
  410. ret = write_blk(info, blk, buf);
  411. if (ret < 0) {
  412. quota_error(dquot->dq_sb, "Can't write quota "
  413. "data block %u", blk);
  414. goto out_buf;
  415. }
  416. }
  417. }
  418. dquot->dq_off = 0; /* Quota is now unattached */
  419. out_buf:
  420. kfree(buf);
  421. return ret;
  422. }
  423. /* Remove reference to dquot from tree */
  424. static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  425. uint *blk, int depth)
  426. {
  427. char *buf = getdqbuf(info->dqi_usable_bs);
  428. int ret = 0;
  429. uint newblk;
  430. __le32 *ref = (__le32 *)buf;
  431. if (!buf)
  432. return -ENOMEM;
  433. ret = read_blk(info, *blk, buf);
  434. if (ret < 0) {
  435. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  436. *blk);
  437. goto out_buf;
  438. }
  439. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  440. if (depth == info->dqi_qtree_depth - 1) {
  441. ret = free_dqentry(info, dquot, newblk);
  442. newblk = 0;
  443. } else {
  444. ret = remove_tree(info, dquot, &newblk, depth+1);
  445. }
  446. if (ret >= 0 && !newblk) {
  447. int i;
  448. ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
  449. /* Block got empty? */
  450. for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
  451. ;
  452. /* Don't put the root block into the free block list */
  453. if (i == (info->dqi_usable_bs >> 2)
  454. && *blk != QT_TREEOFF) {
  455. put_free_dqblk(info, buf, *blk);
  456. *blk = 0;
  457. } else {
  458. ret = write_blk(info, *blk, buf);
  459. if (ret < 0)
  460. quota_error(dquot->dq_sb,
  461. "Can't write quota tree block %u",
  462. *blk);
  463. }
  464. }
  465. out_buf:
  466. kfree(buf);
  467. return ret;
  468. }
  469. /* Delete dquot from tree */
  470. int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  471. {
  472. uint tmp = QT_TREEOFF;
  473. if (!dquot->dq_off) /* Even not allocated? */
  474. return 0;
  475. return remove_tree(info, dquot, &tmp, 0);
  476. }
  477. EXPORT_SYMBOL(qtree_delete_dquot);
  478. /* Find entry in block */
  479. static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
  480. struct dquot *dquot, uint blk)
  481. {
  482. char *buf = getdqbuf(info->dqi_usable_bs);
  483. loff_t ret = 0;
  484. int i;
  485. char *ddquot;
  486. if (!buf)
  487. return -ENOMEM;
  488. ret = read_blk(info, blk, buf);
  489. if (ret < 0) {
  490. quota_error(dquot->dq_sb, "Can't read quota tree "
  491. "block %u", blk);
  492. goto out_buf;
  493. }
  494. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  495. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  496. if (info->dqi_ops->is_id(ddquot, dquot))
  497. break;
  498. ddquot += info->dqi_entry_size;
  499. }
  500. if (i == qtree_dqstr_in_blk(info)) {
  501. quota_error(dquot->dq_sb, "Quota for id %u referenced "
  502. "but not present", dquot->dq_id);
  503. ret = -EIO;
  504. goto out_buf;
  505. } else {
  506. ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
  507. qt_disk_dqdbheader) + i * info->dqi_entry_size;
  508. }
  509. out_buf:
  510. kfree(buf);
  511. return ret;
  512. }
  513. /* Find entry for given id in the tree */
  514. static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
  515. struct dquot *dquot, uint blk, int depth)
  516. {
  517. char *buf = getdqbuf(info->dqi_usable_bs);
  518. loff_t ret = 0;
  519. __le32 *ref = (__le32 *)buf;
  520. if (!buf)
  521. return -ENOMEM;
  522. ret = read_blk(info, blk, buf);
  523. if (ret < 0) {
  524. quota_error(dquot->dq_sb, "Can't read quota tree block %u",
  525. blk);
  526. goto out_buf;
  527. }
  528. ret = 0;
  529. blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  530. if (!blk) /* No reference? */
  531. goto out_buf;
  532. if (depth < info->dqi_qtree_depth - 1)
  533. ret = find_tree_dqentry(info, dquot, blk, depth+1);
  534. else
  535. ret = find_block_dqentry(info, dquot, blk);
  536. out_buf:
  537. kfree(buf);
  538. return ret;
  539. }
  540. /* Find entry for given id in the tree - wrapper function */
  541. static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
  542. struct dquot *dquot)
  543. {
  544. return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
  545. }
  546. int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  547. {
  548. int type = dquot->dq_type;
  549. struct super_block *sb = dquot->dq_sb;
  550. loff_t offset;
  551. char *ddquot;
  552. int ret = 0;
  553. #ifdef __QUOTA_QT_PARANOIA
  554. /* Invalidated quota? */
  555. if (!sb_dqopt(dquot->dq_sb)->files[type]) {
  556. quota_error(sb, "Quota invalidated while reading!");
  557. return -EIO;
  558. }
  559. #endif
  560. /* Do we know offset of the dquot entry in the quota file? */
  561. if (!dquot->dq_off) {
  562. offset = find_dqentry(info, dquot);
  563. if (offset <= 0) { /* Entry not present? */
  564. if (offset < 0)
  565. quota_error(sb, "Can't read quota structure "
  566. "for id %u", dquot->dq_id);
  567. dquot->dq_off = 0;
  568. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  569. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  570. ret = offset;
  571. goto out;
  572. }
  573. dquot->dq_off = offset;
  574. }
  575. ddquot = getdqbuf(info->dqi_entry_size);
  576. if (!ddquot)
  577. return -ENOMEM;
  578. ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
  579. dquot->dq_off);
  580. if (ret != info->dqi_entry_size) {
  581. if (ret >= 0)
  582. ret = -EIO;
  583. quota_error(sb, "Error while reading quota structure for id %u",
  584. dquot->dq_id);
  585. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  586. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  587. kfree(ddquot);
  588. goto out;
  589. }
  590. spin_lock(&dq_data_lock);
  591. info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
  592. if (!dquot->dq_dqb.dqb_bhardlimit &&
  593. !dquot->dq_dqb.dqb_bsoftlimit &&
  594. !dquot->dq_dqb.dqb_ihardlimit &&
  595. !dquot->dq_dqb.dqb_isoftlimit)
  596. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  597. spin_unlock(&dq_data_lock);
  598. kfree(ddquot);
  599. out:
  600. dqstats_inc(DQST_READS);
  601. return ret;
  602. }
  603. EXPORT_SYMBOL(qtree_read_dquot);
  604. /* Check whether dquot should not be deleted. We know we are
  605. * the only one operating on dquot (thanks to dq_lock) */
  606. int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  607. {
  608. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
  609. !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
  610. return qtree_delete_dquot(info, dquot);
  611. return 0;
  612. }
  613. EXPORT_SYMBOL(qtree_release_dquot);