quota_tree.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * vfsv0 quota IO operations on file
  3. */
  4. #include <linux/errno.h>
  5. #include <linux/fs.h>
  6. #include <linux/mount.h>
  7. #include <linux/dqblk_v2.h>
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/quotaops.h>
  13. #include <asm/byteorder.h>
  14. #include "quota_tree.h"
  15. MODULE_AUTHOR("Jan Kara");
  16. MODULE_DESCRIPTION("Quota trie support");
  17. MODULE_LICENSE("GPL");
  18. #define __QUOTA_QT_PARANOIA
  19. static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
  20. {
  21. unsigned int epb = info->dqi_usable_bs >> 2;
  22. depth = info->dqi_qtree_depth - depth - 1;
  23. while (depth--)
  24. id /= epb;
  25. return id % epb;
  26. }
  27. /* Number of entries in one blocks */
  28. static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
  29. {
  30. return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
  31. / info->dqi_entry_size;
  32. }
  33. static char *getdqbuf(size_t size)
  34. {
  35. char *buf = kmalloc(size, GFP_NOFS);
  36. if (!buf)
  37. printk(KERN_WARNING
  38. "VFS: Not enough memory for quota buffers.\n");
  39. return buf;
  40. }
  41. static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  42. {
  43. struct super_block *sb = info->dqi_sb;
  44. memset(buf, 0, info->dqi_usable_bs);
  45. return sb->s_op->quota_read(sb, info->dqi_type, buf,
  46. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  47. }
  48. static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  49. {
  50. struct super_block *sb = info->dqi_sb;
  51. ssize_t ret;
  52. ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
  53. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  54. if (ret != info->dqi_usable_bs) {
  55. q_warn(KERN_WARNING "VFS: dquota write failed on "
  56. "dev %s\n", sb->s_id);
  57. if (ret >= 0)
  58. ret = -EIO;
  59. }
  60. return ret;
  61. }
  62. /* Remove empty block from list and return it */
  63. static int get_free_dqblk(struct qtree_mem_dqinfo *info)
  64. {
  65. char *buf = getdqbuf(info->dqi_usable_bs);
  66. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  67. int ret, blk;
  68. if (!buf)
  69. return -ENOMEM;
  70. if (info->dqi_free_blk) {
  71. blk = info->dqi_free_blk;
  72. ret = read_blk(info, blk, buf);
  73. if (ret < 0)
  74. goto out_buf;
  75. info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
  76. }
  77. else {
  78. memset(buf, 0, info->dqi_usable_bs);
  79. /* Assure block allocation... */
  80. ret = write_blk(info, info->dqi_blocks, buf);
  81. if (ret < 0)
  82. goto out_buf;
  83. blk = info->dqi_blocks++;
  84. }
  85. mark_info_dirty(info->dqi_sb, info->dqi_type);
  86. ret = blk;
  87. out_buf:
  88. kfree(buf);
  89. return ret;
  90. }
  91. /* Insert empty block to the list */
  92. static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
  93. {
  94. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  95. int err;
  96. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
  97. dh->dqdh_prev_free = cpu_to_le32(0);
  98. dh->dqdh_entries = cpu_to_le16(0);
  99. err = write_blk(info, blk, buf);
  100. if (err < 0)
  101. return err;
  102. info->dqi_free_blk = blk;
  103. mark_info_dirty(info->dqi_sb, info->dqi_type);
  104. return 0;
  105. }
  106. /* Remove given block from the list of blocks with free entries */
  107. static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  108. uint blk)
  109. {
  110. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  111. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  112. uint nextblk = le32_to_cpu(dh->dqdh_next_free);
  113. uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
  114. int err;
  115. if (!tmpbuf)
  116. return -ENOMEM;
  117. if (nextblk) {
  118. err = read_blk(info, nextblk, tmpbuf);
  119. if (err < 0)
  120. goto out_buf;
  121. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  122. dh->dqdh_prev_free;
  123. err = write_blk(info, nextblk, tmpbuf);
  124. if (err < 0)
  125. goto out_buf;
  126. }
  127. if (prevblk) {
  128. err = read_blk(info, prevblk, tmpbuf);
  129. if (err < 0)
  130. goto out_buf;
  131. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
  132. dh->dqdh_next_free;
  133. err = write_blk(info, prevblk, tmpbuf);
  134. if (err < 0)
  135. goto out_buf;
  136. } else {
  137. info->dqi_free_entry = nextblk;
  138. mark_info_dirty(info->dqi_sb, info->dqi_type);
  139. }
  140. kfree(tmpbuf);
  141. dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
  142. /* No matter whether write succeeds block is out of list */
  143. if (write_blk(info, blk, buf) < 0)
  144. q_warn(KERN_ERR
  145. "VFS: Can't write block (%u) with free entries.\n",
  146. blk);
  147. return 0;
  148. out_buf:
  149. kfree(tmpbuf);
  150. return err;
  151. }
  152. /* Insert given block to the beginning of list with free entries */
  153. static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  154. uint blk)
  155. {
  156. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  157. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  158. int err;
  159. if (!tmpbuf)
  160. return -ENOMEM;
  161. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
  162. dh->dqdh_prev_free = cpu_to_le32(0);
  163. err = write_blk(info, blk, buf);
  164. if (err < 0)
  165. goto out_buf;
  166. if (info->dqi_free_entry) {
  167. err = read_blk(info, info->dqi_free_entry, tmpbuf);
  168. if (err < 0)
  169. goto out_buf;
  170. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  171. cpu_to_le32(blk);
  172. err = write_blk(info, info->dqi_free_entry, tmpbuf);
  173. if (err < 0)
  174. goto out_buf;
  175. }
  176. kfree(tmpbuf);
  177. info->dqi_free_entry = blk;
  178. mark_info_dirty(info->dqi_sb, info->dqi_type);
  179. return 0;
  180. out_buf:
  181. kfree(tmpbuf);
  182. return err;
  183. }
  184. /* Is the entry in the block free? */
  185. int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
  186. {
  187. int i;
  188. for (i = 0; i < info->dqi_entry_size; i++)
  189. if (disk[i])
  190. return 0;
  191. return 1;
  192. }
  193. EXPORT_SYMBOL(qtree_entry_unused);
  194. /* Find space for dquot */
  195. static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
  196. struct dquot *dquot, int *err)
  197. {
  198. uint blk, i;
  199. struct qt_disk_dqdbheader *dh;
  200. char *buf = getdqbuf(info->dqi_usable_bs);
  201. char *ddquot;
  202. *err = 0;
  203. if (!buf) {
  204. *err = -ENOMEM;
  205. return 0;
  206. }
  207. dh = (struct qt_disk_dqdbheader *)buf;
  208. if (info->dqi_free_entry) {
  209. blk = info->dqi_free_entry;
  210. *err = read_blk(info, blk, buf);
  211. if (*err < 0)
  212. goto out_buf;
  213. } else {
  214. blk = get_free_dqblk(info);
  215. if ((int)blk < 0) {
  216. *err = blk;
  217. kfree(buf);
  218. return 0;
  219. }
  220. memset(buf, 0, info->dqi_usable_bs);
  221. /* This is enough as the block is already zeroed and the entry
  222. * list is empty... */
  223. info->dqi_free_entry = blk;
  224. mark_info_dirty(dquot->dq_sb, dquot->dq_type);
  225. }
  226. /* Block will be full? */
  227. if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
  228. *err = remove_free_dqentry(info, buf, blk);
  229. if (*err < 0) {
  230. q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
  231. "remove block (%u) from entry free list.\n",
  232. blk);
  233. goto out_buf;
  234. }
  235. }
  236. le16_add_cpu(&dh->dqdh_entries, 1);
  237. /* Find free structure in block */
  238. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  239. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  240. if (qtree_entry_unused(info, ddquot))
  241. break;
  242. ddquot += info->dqi_entry_size;
  243. }
  244. #ifdef __QUOTA_QT_PARANOIA
  245. if (i == qtree_dqstr_in_blk(info)) {
  246. printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
  247. "but it shouldn't.\n");
  248. *err = -EIO;
  249. goto out_buf;
  250. }
  251. #endif
  252. *err = write_blk(info, blk, buf);
  253. if (*err < 0) {
  254. q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
  255. "data block %u.\n", blk);
  256. goto out_buf;
  257. }
  258. dquot->dq_off = (blk << info->dqi_blocksize_bits) +
  259. sizeof(struct qt_disk_dqdbheader) +
  260. i * info->dqi_entry_size;
  261. kfree(buf);
  262. return blk;
  263. out_buf:
  264. kfree(buf);
  265. return 0;
  266. }
  267. /* Insert reference to structure into the trie */
  268. static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  269. uint *treeblk, int depth)
  270. {
  271. char *buf = getdqbuf(info->dqi_usable_bs);
  272. int ret = 0, newson = 0, newact = 0;
  273. __le32 *ref;
  274. uint newblk;
  275. if (!buf)
  276. return -ENOMEM;
  277. if (!*treeblk) {
  278. ret = get_free_dqblk(info);
  279. if (ret < 0)
  280. goto out_buf;
  281. *treeblk = ret;
  282. memset(buf, 0, info->dqi_usable_bs);
  283. newact = 1;
  284. } else {
  285. ret = read_blk(info, *treeblk, buf);
  286. if (ret < 0) {
  287. q_warn(KERN_ERR "VFS: Can't read tree quota block "
  288. "%u.\n", *treeblk);
  289. goto out_buf;
  290. }
  291. }
  292. ref = (__le32 *)buf;
  293. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  294. if (!newblk)
  295. newson = 1;
  296. if (depth == info->dqi_qtree_depth - 1) {
  297. #ifdef __QUOTA_QT_PARANOIA
  298. if (newblk) {
  299. printk(KERN_ERR "VFS: Inserting already present quota "
  300. "entry (block %u).\n",
  301. le32_to_cpu(ref[get_index(info,
  302. dquot->dq_id, depth)]));
  303. ret = -EIO;
  304. goto out_buf;
  305. }
  306. #endif
  307. newblk = find_free_dqentry(info, dquot, &ret);
  308. } else {
  309. ret = do_insert_tree(info, dquot, &newblk, depth+1);
  310. }
  311. if (newson && ret >= 0) {
  312. ref[get_index(info, dquot->dq_id, depth)] =
  313. cpu_to_le32(newblk);
  314. ret = write_blk(info, *treeblk, buf);
  315. } else if (newact && ret < 0) {
  316. put_free_dqblk(info, buf, *treeblk);
  317. }
  318. out_buf:
  319. kfree(buf);
  320. return ret;
  321. }
  322. /* Wrapper for inserting quota structure into tree */
  323. static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  324. struct dquot *dquot)
  325. {
  326. int tmp = QT_TREEOFF;
  327. return do_insert_tree(info, dquot, &tmp, 0);
  328. }
  329. /*
  330. * We don't have to be afraid of deadlocks as we never have quotas on quota
  331. * files...
  332. */
  333. int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  334. {
  335. int type = dquot->dq_type;
  336. struct super_block *sb = dquot->dq_sb;
  337. ssize_t ret;
  338. char *ddquot = getdqbuf(info->dqi_entry_size);
  339. if (!ddquot)
  340. return -ENOMEM;
  341. /* dq_off is guarded by dqio_mutex */
  342. if (!dquot->dq_off) {
  343. ret = dq_insert_tree(info, dquot);
  344. if (ret < 0) {
  345. q_warn(KERN_ERR "VFS: Error %zd occurred while "
  346. "creating quota.\n", ret);
  347. kfree(ddquot);
  348. return ret;
  349. }
  350. }
  351. spin_lock(&dq_data_lock);
  352. info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
  353. spin_unlock(&dq_data_lock);
  354. ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
  355. dquot->dq_off);
  356. if (ret != info->dqi_entry_size) {
  357. q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
  358. sb->s_id);
  359. if (ret >= 0)
  360. ret = -ENOSPC;
  361. } else {
  362. ret = 0;
  363. }
  364. dqstats_inc(DQST_WRITES);
  365. kfree(ddquot);
  366. return ret;
  367. }
  368. EXPORT_SYMBOL(qtree_write_dquot);
  369. /* Free dquot entry in data block */
  370. static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  371. uint blk)
  372. {
  373. struct qt_disk_dqdbheader *dh;
  374. char *buf = getdqbuf(info->dqi_usable_bs);
  375. int ret = 0;
  376. if (!buf)
  377. return -ENOMEM;
  378. if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
  379. q_warn(KERN_ERR "VFS: Quota structure has offset to other "
  380. "block (%u) than it should (%u).\n", blk,
  381. (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
  382. goto out_buf;
  383. }
  384. ret = read_blk(info, blk, buf);
  385. if (ret < 0) {
  386. q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
  387. goto out_buf;
  388. }
  389. dh = (struct qt_disk_dqdbheader *)buf;
  390. le16_add_cpu(&dh->dqdh_entries, -1);
  391. if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
  392. ret = remove_free_dqentry(info, buf, blk);
  393. if (ret >= 0)
  394. ret = put_free_dqblk(info, buf, blk);
  395. if (ret < 0) {
  396. q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
  397. "to free list.\n", blk);
  398. goto out_buf;
  399. }
  400. } else {
  401. memset(buf +
  402. (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
  403. 0, info->dqi_entry_size);
  404. if (le16_to_cpu(dh->dqdh_entries) ==
  405. qtree_dqstr_in_blk(info) - 1) {
  406. /* Insert will write block itself */
  407. ret = insert_free_dqentry(info, buf, blk);
  408. if (ret < 0) {
  409. q_warn(KERN_ERR "VFS: Can't insert quota data "
  410. "block (%u) to free entry list.\n", blk);
  411. goto out_buf;
  412. }
  413. } else {
  414. ret = write_blk(info, blk, buf);
  415. if (ret < 0) {
  416. q_warn(KERN_ERR "VFS: Can't write quota data "
  417. "block %u\n", blk);
  418. goto out_buf;
  419. }
  420. }
  421. }
  422. dquot->dq_off = 0; /* Quota is now unattached */
  423. out_buf:
  424. kfree(buf);
  425. return ret;
  426. }
  427. /* Remove reference to dquot from tree */
  428. static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  429. uint *blk, int depth)
  430. {
  431. char *buf = getdqbuf(info->dqi_usable_bs);
  432. int ret = 0;
  433. uint newblk;
  434. __le32 *ref = (__le32 *)buf;
  435. if (!buf)
  436. return -ENOMEM;
  437. ret = read_blk(info, *blk, buf);
  438. if (ret < 0) {
  439. q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
  440. goto out_buf;
  441. }
  442. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  443. if (depth == info->dqi_qtree_depth - 1) {
  444. ret = free_dqentry(info, dquot, newblk);
  445. newblk = 0;
  446. } else {
  447. ret = remove_tree(info, dquot, &newblk, depth+1);
  448. }
  449. if (ret >= 0 && !newblk) {
  450. int i;
  451. ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
  452. /* Block got empty? */
  453. for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
  454. ;
  455. /* Don't put the root block into the free block list */
  456. if (i == (info->dqi_usable_bs >> 2)
  457. && *blk != QT_TREEOFF) {
  458. put_free_dqblk(info, buf, *blk);
  459. *blk = 0;
  460. } else {
  461. ret = write_blk(info, *blk, buf);
  462. if (ret < 0)
  463. q_warn(KERN_ERR "VFS: Can't write quota tree "
  464. "block %u.\n", *blk);
  465. }
  466. }
  467. out_buf:
  468. kfree(buf);
  469. return ret;
  470. }
  471. /* Delete dquot from tree */
  472. int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  473. {
  474. uint tmp = QT_TREEOFF;
  475. if (!dquot->dq_off) /* Even not allocated? */
  476. return 0;
  477. return remove_tree(info, dquot, &tmp, 0);
  478. }
  479. EXPORT_SYMBOL(qtree_delete_dquot);
  480. /* Find entry in block */
  481. static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
  482. struct dquot *dquot, uint blk)
  483. {
  484. char *buf = getdqbuf(info->dqi_usable_bs);
  485. loff_t ret = 0;
  486. int i;
  487. char *ddquot;
  488. if (!buf)
  489. return -ENOMEM;
  490. ret = read_blk(info, blk, buf);
  491. if (ret < 0) {
  492. q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
  493. goto out_buf;
  494. }
  495. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  496. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  497. if (info->dqi_ops->is_id(ddquot, dquot))
  498. break;
  499. ddquot += info->dqi_entry_size;
  500. }
  501. if (i == qtree_dqstr_in_blk(info)) {
  502. q_warn(KERN_ERR "VFS: Quota for id %u referenced "
  503. "but not present.\n", dquot->dq_id);
  504. ret = -EIO;
  505. goto out_buf;
  506. } else {
  507. ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
  508. qt_disk_dqdbheader) + i * info->dqi_entry_size;
  509. }
  510. out_buf:
  511. kfree(buf);
  512. return ret;
  513. }
  514. /* Find entry for given id in the tree */
  515. static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
  516. struct dquot *dquot, uint blk, int depth)
  517. {
  518. char *buf = getdqbuf(info->dqi_usable_bs);
  519. loff_t ret = 0;
  520. __le32 *ref = (__le32 *)buf;
  521. if (!buf)
  522. return -ENOMEM;
  523. ret = read_blk(info, blk, buf);
  524. if (ret < 0) {
  525. q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
  526. goto out_buf;
  527. }
  528. ret = 0;
  529. blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  530. if (!blk) /* No reference? */
  531. goto out_buf;
  532. if (depth < info->dqi_qtree_depth - 1)
  533. ret = find_tree_dqentry(info, dquot, blk, depth+1);
  534. else
  535. ret = find_block_dqentry(info, dquot, blk);
  536. out_buf:
  537. kfree(buf);
  538. return ret;
  539. }
  540. /* Find entry for given id in the tree - wrapper function */
  541. static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
  542. struct dquot *dquot)
  543. {
  544. return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
  545. }
  546. int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  547. {
  548. int type = dquot->dq_type;
  549. struct super_block *sb = dquot->dq_sb;
  550. loff_t offset;
  551. char *ddquot;
  552. int ret = 0;
  553. #ifdef __QUOTA_QT_PARANOIA
  554. /* Invalidated quota? */
  555. if (!sb_dqopt(dquot->dq_sb)->files[type]) {
  556. printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
  557. return -EIO;
  558. }
  559. #endif
  560. /* Do we know offset of the dquot entry in the quota file? */
  561. if (!dquot->dq_off) {
  562. offset = find_dqentry(info, dquot);
  563. if (offset <= 0) { /* Entry not present? */
  564. if (offset < 0)
  565. q_warn(KERN_ERR "VFS: Can't read quota "
  566. "structure for id %u.\n", dquot->dq_id);
  567. dquot->dq_off = 0;
  568. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  569. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  570. ret = offset;
  571. goto out;
  572. }
  573. dquot->dq_off = offset;
  574. }
  575. ddquot = getdqbuf(info->dqi_entry_size);
  576. if (!ddquot)
  577. return -ENOMEM;
  578. ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
  579. dquot->dq_off);
  580. if (ret != info->dqi_entry_size) {
  581. if (ret >= 0)
  582. ret = -EIO;
  583. q_warn(KERN_ERR "VFS: Error while reading quota "
  584. "structure for id %u.\n", dquot->dq_id);
  585. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  586. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  587. kfree(ddquot);
  588. goto out;
  589. }
  590. spin_lock(&dq_data_lock);
  591. info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
  592. if (!dquot->dq_dqb.dqb_bhardlimit &&
  593. !dquot->dq_dqb.dqb_bsoftlimit &&
  594. !dquot->dq_dqb.dqb_ihardlimit &&
  595. !dquot->dq_dqb.dqb_isoftlimit)
  596. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  597. spin_unlock(&dq_data_lock);
  598. kfree(ddquot);
  599. out:
  600. dqstats_inc(DQST_READS);
  601. return ret;
  602. }
  603. EXPORT_SYMBOL(qtree_read_dquot);
  604. /* Check whether dquot should not be deleted. We know we are
  605. * the only one operating on dquot (thanks to dq_lock) */
  606. int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  607. {
  608. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
  609. !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
  610. return qtree_delete_dquot(info, dquot);
  611. return 0;
  612. }
  613. EXPORT_SYMBOL(qtree_release_dquot);