quota_global.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/quota.h>
  7. #include <linux/quotaops.h>
  8. #include <linux/dqblk_qtree.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/writeback.h>
  11. #include <linux/workqueue.h>
  12. #define MLOG_MASK_PREFIX ML_QUOTA
  13. #include <cluster/masklog.h>
  14. #include "ocfs2_fs.h"
  15. #include "ocfs2.h"
  16. #include "alloc.h"
  17. #include "blockcheck.h"
  18. #include "inode.h"
  19. #include "journal.h"
  20. #include "file.h"
  21. #include "sysfile.h"
  22. #include "dlmglue.h"
  23. #include "uptodate.h"
  24. #include "quota.h"
  25. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  26. static void qsync_work_fn(struct work_struct *work);
  27. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  28. {
  29. struct ocfs2_global_disk_dqblk *d = dp;
  30. struct mem_dqblk *m = &dquot->dq_dqb;
  31. /* Update from disk only entries not set by the admin */
  32. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  33. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  34. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  35. }
  36. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  37. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  38. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  39. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  40. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  41. }
  42. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  43. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  44. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  45. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  46. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  47. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  48. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  49. }
  50. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  51. {
  52. struct ocfs2_global_disk_dqblk *d = dp;
  53. struct mem_dqblk *m = &dquot->dq_dqb;
  54. d->dqb_id = cpu_to_le32(dquot->dq_id);
  55. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  56. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  57. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  58. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  59. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  60. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  61. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  62. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  63. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  64. }
  65. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  66. {
  67. struct ocfs2_global_disk_dqblk *d = dp;
  68. struct ocfs2_mem_dqinfo *oinfo =
  69. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  70. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  71. return 0;
  72. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  73. }
  74. struct qtree_fmt_operations ocfs2_global_ops = {
  75. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  76. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  77. .is_id = ocfs2_global_is_id,
  78. };
  79. static int ocfs2_validate_quota_block(struct super_block *sb,
  80. struct buffer_head *bh)
  81. {
  82. struct ocfs2_disk_dqtrailer *dqt =
  83. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  84. mlog(0, "Validating quota block %llu\n",
  85. (unsigned long long)bh->b_blocknr);
  86. BUG_ON(!buffer_uptodate(bh));
  87. /*
  88. * If the ecc fails, we return the error but otherwise
  89. * leave the filesystem running. We know any error is
  90. * local to this block.
  91. */
  92. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  93. }
  94. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  95. struct buffer_head **bh)
  96. {
  97. int rc = 0;
  98. struct buffer_head *tmp = *bh;
  99. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
  100. ocfs2_validate_quota_block);
  101. if (rc)
  102. mlog_errno(rc);
  103. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  104. if (!rc && !*bh)
  105. *bh = tmp;
  106. return rc;
  107. }
  108. static int ocfs2_get_quota_block(struct inode *inode, int block,
  109. struct buffer_head **bh)
  110. {
  111. u64 pblock, pcount;
  112. int err;
  113. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  114. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  115. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  116. if (err) {
  117. mlog_errno(err);
  118. return err;
  119. }
  120. *bh = sb_getblk(inode->i_sb, pblock);
  121. if (!*bh) {
  122. err = -EIO;
  123. mlog_errno(err);
  124. }
  125. return err;;
  126. }
  127. /* Read data from global quotafile - avoid pagecache and such because we cannot
  128. * afford acquiring the locks... We use quota cluster lock to serialize
  129. * operations. Caller is responsible for acquiring it. */
  130. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  131. size_t len, loff_t off)
  132. {
  133. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  134. struct inode *gqinode = oinfo->dqi_gqinode;
  135. loff_t i_size = i_size_read(gqinode);
  136. int offset = off & (sb->s_blocksize - 1);
  137. sector_t blk = off >> sb->s_blocksize_bits;
  138. int err = 0;
  139. struct buffer_head *bh;
  140. size_t toread, tocopy;
  141. if (off > i_size)
  142. return 0;
  143. if (off + len > i_size)
  144. len = i_size - off;
  145. toread = len;
  146. while (toread > 0) {
  147. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  148. bh = NULL;
  149. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  150. if (err) {
  151. mlog_errno(err);
  152. return err;
  153. }
  154. memcpy(data, bh->b_data + offset, tocopy);
  155. brelse(bh);
  156. offset = 0;
  157. toread -= tocopy;
  158. data += tocopy;
  159. blk++;
  160. }
  161. return len;
  162. }
  163. /* Write to quotafile (we know the transaction is already started and has
  164. * enough credits) */
  165. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  166. const char *data, size_t len, loff_t off)
  167. {
  168. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  169. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  170. struct inode *gqinode = oinfo->dqi_gqinode;
  171. int offset = off & (sb->s_blocksize - 1);
  172. sector_t blk = off >> sb->s_blocksize_bits;
  173. int err = 0, new = 0, ja_type;
  174. struct buffer_head *bh = NULL;
  175. handle_t *handle = journal_current_handle();
  176. if (!handle) {
  177. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  178. "because transaction was not started.\n",
  179. (unsigned long long)off, (unsigned long long)len);
  180. return -EIO;
  181. }
  182. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  183. WARN_ON(1);
  184. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  185. }
  186. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  187. if (gqinode->i_size < off + len) {
  188. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  189. err = ocfs2_extend_no_holes(gqinode, off + len, off);
  190. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  191. if (err < 0)
  192. goto out;
  193. err = ocfs2_simple_size_update(gqinode,
  194. oinfo->dqi_gqi_bh,
  195. off + len);
  196. if (err < 0)
  197. goto out;
  198. new = 1;
  199. }
  200. /* Not rewriting whole block? */
  201. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  202. !new) {
  203. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  204. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  205. } else {
  206. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  207. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  208. }
  209. if (err) {
  210. mlog_errno(err);
  211. return err;
  212. }
  213. lock_buffer(bh);
  214. if (new)
  215. memset(bh->b_data, 0, sb->s_blocksize);
  216. memcpy(bh->b_data + offset, data, len);
  217. flush_dcache_page(bh->b_page);
  218. set_buffer_uptodate(bh);
  219. unlock_buffer(bh);
  220. ocfs2_set_buffer_uptodate(gqinode, bh);
  221. err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type);
  222. if (err < 0) {
  223. brelse(bh);
  224. goto out;
  225. }
  226. err = ocfs2_journal_dirty(handle, bh);
  227. brelse(bh);
  228. if (err < 0)
  229. goto out;
  230. out:
  231. if (err) {
  232. mutex_unlock(&gqinode->i_mutex);
  233. mlog_errno(err);
  234. return err;
  235. }
  236. gqinode->i_version++;
  237. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  238. mutex_unlock(&gqinode->i_mutex);
  239. return len;
  240. }
  241. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  242. {
  243. int status;
  244. struct buffer_head *bh = NULL;
  245. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  246. if (status < 0)
  247. return status;
  248. spin_lock(&dq_data_lock);
  249. if (!oinfo->dqi_gqi_count++)
  250. oinfo->dqi_gqi_bh = bh;
  251. else
  252. WARN_ON(bh != oinfo->dqi_gqi_bh);
  253. spin_unlock(&dq_data_lock);
  254. return 0;
  255. }
  256. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  257. {
  258. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  259. brelse(oinfo->dqi_gqi_bh);
  260. spin_lock(&dq_data_lock);
  261. if (!--oinfo->dqi_gqi_count)
  262. oinfo->dqi_gqi_bh = NULL;
  263. spin_unlock(&dq_data_lock);
  264. }
  265. /* Read information header from global quota file */
  266. int ocfs2_global_read_info(struct super_block *sb, int type)
  267. {
  268. struct inode *gqinode = NULL;
  269. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  270. GROUP_QUOTA_SYSTEM_INODE };
  271. struct ocfs2_global_disk_dqinfo dinfo;
  272. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  273. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  274. int status;
  275. mlog_entry_void();
  276. /* Read global header */
  277. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  278. OCFS2_INVALID_SLOT);
  279. if (!gqinode) {
  280. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  281. type);
  282. status = -EINVAL;
  283. goto out_err;
  284. }
  285. oinfo->dqi_gi.dqi_sb = sb;
  286. oinfo->dqi_gi.dqi_type = type;
  287. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  288. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  289. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  290. oinfo->dqi_gqi_bh = NULL;
  291. oinfo->dqi_gqi_count = 0;
  292. oinfo->dqi_gqinode = gqinode;
  293. status = ocfs2_lock_global_qf(oinfo, 0);
  294. if (status < 0) {
  295. mlog_errno(status);
  296. goto out_err;
  297. }
  298. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  299. sizeof(struct ocfs2_global_disk_dqinfo),
  300. OCFS2_GLOBAL_INFO_OFF);
  301. ocfs2_unlock_global_qf(oinfo, 0);
  302. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  303. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  304. status);
  305. if (status >= 0)
  306. status = -EIO;
  307. mlog_errno(status);
  308. goto out_err;
  309. }
  310. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  311. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  312. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  313. oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
  314. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  315. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  316. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  317. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  318. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  319. OCFS2_QBLK_RESERVED_SPACE;
  320. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  321. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  322. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  323. oinfo->dqi_syncjiff);
  324. out_err:
  325. mlog_exit(status);
  326. return status;
  327. }
  328. /* Write information to global quota file. Expects exlusive lock on quota
  329. * file inode and quota info */
  330. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  331. {
  332. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  333. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  334. struct ocfs2_global_disk_dqinfo dinfo;
  335. ssize_t size;
  336. spin_lock(&dq_data_lock);
  337. info->dqi_flags &= ~DQF_INFO_DIRTY;
  338. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  339. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  340. spin_unlock(&dq_data_lock);
  341. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  342. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  343. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  344. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  345. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  346. sizeof(struct ocfs2_global_disk_dqinfo),
  347. OCFS2_GLOBAL_INFO_OFF);
  348. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  349. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  350. if (size >= 0)
  351. size = -EIO;
  352. return size;
  353. }
  354. return 0;
  355. }
  356. int ocfs2_global_write_info(struct super_block *sb, int type)
  357. {
  358. int err;
  359. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  360. err = ocfs2_qinfo_lock(info, 1);
  361. if (err < 0)
  362. return err;
  363. err = __ocfs2_global_write_info(sb, type);
  364. ocfs2_qinfo_unlock(info, 1);
  365. return err;
  366. }
  367. /* Read in information from global quota file and acquire a reference to it.
  368. * dquot_acquire() has already started the transaction and locked quota file */
  369. int ocfs2_global_read_dquot(struct dquot *dquot)
  370. {
  371. int err, err2, ex = 0;
  372. struct ocfs2_mem_dqinfo *info =
  373. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  374. err = ocfs2_qinfo_lock(info, 0);
  375. if (err < 0)
  376. goto out;
  377. err = qtree_read_dquot(&info->dqi_gi, dquot);
  378. if (err < 0)
  379. goto out_qlock;
  380. OCFS2_DQUOT(dquot)->dq_use_count++;
  381. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  382. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  383. if (!dquot->dq_off) { /* No real quota entry? */
  384. /* Upgrade to exclusive lock for allocation */
  385. ocfs2_qinfo_unlock(info, 0);
  386. err = ocfs2_qinfo_lock(info, 1);
  387. if (err < 0)
  388. goto out_qlock;
  389. ex = 1;
  390. }
  391. err = qtree_write_dquot(&info->dqi_gi, dquot);
  392. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  393. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  394. if (!err)
  395. err = err2;
  396. }
  397. out_qlock:
  398. if (ex)
  399. ocfs2_qinfo_unlock(info, 1);
  400. else
  401. ocfs2_qinfo_unlock(info, 0);
  402. out:
  403. if (err < 0)
  404. mlog_errno(err);
  405. return err;
  406. }
  407. /* Sync local information about quota modifications with global quota file.
  408. * Caller must have started the transaction and obtained exclusive lock for
  409. * global quota file inode */
  410. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  411. {
  412. int err, err2;
  413. struct super_block *sb = dquot->dq_sb;
  414. int type = dquot->dq_type;
  415. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  416. struct ocfs2_global_disk_dqblk dqblk;
  417. s64 spacechange, inodechange;
  418. time_t olditime, oldbtime;
  419. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  420. sizeof(struct ocfs2_global_disk_dqblk),
  421. dquot->dq_off);
  422. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  423. if (err >= 0) {
  424. mlog(ML_ERROR, "Short read from global quota file "
  425. "(%u read)\n", err);
  426. err = -EIO;
  427. }
  428. goto out;
  429. }
  430. /* Update space and inode usage. Get also other information from
  431. * global quota file so that we don't overwrite any changes there.
  432. * We are */
  433. spin_lock(&dq_data_lock);
  434. spacechange = dquot->dq_dqb.dqb_curspace -
  435. OCFS2_DQUOT(dquot)->dq_origspace;
  436. inodechange = dquot->dq_dqb.dqb_curinodes -
  437. OCFS2_DQUOT(dquot)->dq_originodes;
  438. olditime = dquot->dq_dqb.dqb_itime;
  439. oldbtime = dquot->dq_dqb.dqb_btime;
  440. ocfs2_global_disk2memdqb(dquot, &dqblk);
  441. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  442. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  443. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  444. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  445. dquot->dq_dqb.dqb_curspace += spacechange;
  446. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  447. dquot->dq_dqb.dqb_curinodes += inodechange;
  448. /* Set properly space grace time... */
  449. if (dquot->dq_dqb.dqb_bsoftlimit &&
  450. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  451. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  452. oldbtime > 0) {
  453. if (dquot->dq_dqb.dqb_btime > 0)
  454. dquot->dq_dqb.dqb_btime =
  455. min(dquot->dq_dqb.dqb_btime, oldbtime);
  456. else
  457. dquot->dq_dqb.dqb_btime = oldbtime;
  458. }
  459. } else {
  460. dquot->dq_dqb.dqb_btime = 0;
  461. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  462. }
  463. /* Set properly inode grace time... */
  464. if (dquot->dq_dqb.dqb_isoftlimit &&
  465. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  466. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  467. olditime > 0) {
  468. if (dquot->dq_dqb.dqb_itime > 0)
  469. dquot->dq_dqb.dqb_itime =
  470. min(dquot->dq_dqb.dqb_itime, olditime);
  471. else
  472. dquot->dq_dqb.dqb_itime = olditime;
  473. }
  474. } else {
  475. dquot->dq_dqb.dqb_itime = 0;
  476. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  477. }
  478. /* All information is properly updated, clear the flags */
  479. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  480. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  481. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  482. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  483. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  484. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  485. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  486. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  487. spin_unlock(&dq_data_lock);
  488. err = ocfs2_qinfo_lock(info, freeing);
  489. if (err < 0) {
  490. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  491. " (type=%d, id=%u)\n", dquot->dq_type,
  492. (unsigned)dquot->dq_id);
  493. goto out;
  494. }
  495. if (freeing)
  496. OCFS2_DQUOT(dquot)->dq_use_count--;
  497. err = qtree_write_dquot(&info->dqi_gi, dquot);
  498. if (err < 0)
  499. goto out_qlock;
  500. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  501. err = qtree_release_dquot(&info->dqi_gi, dquot);
  502. if (info_dirty(sb_dqinfo(sb, type))) {
  503. err2 = __ocfs2_global_write_info(sb, type);
  504. if (!err)
  505. err = err2;
  506. }
  507. }
  508. out_qlock:
  509. ocfs2_qinfo_unlock(info, freeing);
  510. out:
  511. if (err < 0)
  512. mlog_errno(err);
  513. return err;
  514. }
  515. /*
  516. * Functions for periodic syncing of dquots with global file
  517. */
  518. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  519. {
  520. handle_t *handle;
  521. struct super_block *sb = dquot->dq_sb;
  522. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  523. struct ocfs2_super *osb = OCFS2_SB(sb);
  524. int status = 0;
  525. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  526. dquot->dq_type, type, sb->s_id);
  527. if (type != dquot->dq_type)
  528. goto out;
  529. status = ocfs2_lock_global_qf(oinfo, 1);
  530. if (status < 0)
  531. goto out;
  532. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  533. if (IS_ERR(handle)) {
  534. status = PTR_ERR(handle);
  535. mlog_errno(status);
  536. goto out_ilock;
  537. }
  538. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  539. status = ocfs2_sync_dquot(dquot);
  540. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  541. if (status < 0)
  542. mlog_errno(status);
  543. /* We have to write local structure as well... */
  544. dquot_mark_dquot_dirty(dquot);
  545. status = dquot_commit(dquot);
  546. if (status < 0)
  547. mlog_errno(status);
  548. ocfs2_commit_trans(osb, handle);
  549. out_ilock:
  550. ocfs2_unlock_global_qf(oinfo, 1);
  551. out:
  552. mlog_exit(status);
  553. return status;
  554. }
  555. static void qsync_work_fn(struct work_struct *work)
  556. {
  557. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  558. struct ocfs2_mem_dqinfo,
  559. dqi_sync_work.work);
  560. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  561. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  562. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  563. oinfo->dqi_syncjiff);
  564. }
  565. /*
  566. * Wrappers for generic quota functions
  567. */
  568. static int ocfs2_write_dquot(struct dquot *dquot)
  569. {
  570. handle_t *handle;
  571. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  572. int status = 0;
  573. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  574. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  575. if (IS_ERR(handle)) {
  576. status = PTR_ERR(handle);
  577. mlog_errno(status);
  578. goto out;
  579. }
  580. status = dquot_commit(dquot);
  581. ocfs2_commit_trans(osb, handle);
  582. out:
  583. mlog_exit(status);
  584. return status;
  585. }
  586. int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  587. {
  588. struct ocfs2_mem_dqinfo *oinfo;
  589. int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
  590. OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
  591. if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
  592. return 0;
  593. oinfo = sb_dqinfo(sb, type)->dqi_priv;
  594. /* We modify tree, leaf block, global info, local chunk header,
  595. * global and local inode */
  596. return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
  597. 2 * OCFS2_INODE_UPDATE_CREDITS;
  598. }
  599. static int ocfs2_release_dquot(struct dquot *dquot)
  600. {
  601. handle_t *handle;
  602. struct ocfs2_mem_dqinfo *oinfo =
  603. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  604. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  605. int status = 0;
  606. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  607. status = ocfs2_lock_global_qf(oinfo, 1);
  608. if (status < 0)
  609. goto out;
  610. handle = ocfs2_start_trans(osb,
  611. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  612. if (IS_ERR(handle)) {
  613. status = PTR_ERR(handle);
  614. mlog_errno(status);
  615. goto out_ilock;
  616. }
  617. status = dquot_release(dquot);
  618. ocfs2_commit_trans(osb, handle);
  619. out_ilock:
  620. ocfs2_unlock_global_qf(oinfo, 1);
  621. out:
  622. mlog_exit(status);
  623. return status;
  624. }
  625. int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
  626. {
  627. struct ocfs2_mem_dqinfo *oinfo;
  628. int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
  629. OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
  630. struct ocfs2_dinode *lfe, *gfe;
  631. if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
  632. return 0;
  633. oinfo = sb_dqinfo(sb, type)->dqi_priv;
  634. gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
  635. lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
  636. /* We can extend local file + global file. In local file we
  637. * can modify info, chunk header block and dquot block. In
  638. * global file we can modify info, tree and leaf block */
  639. return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
  640. ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
  641. 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
  642. }
  643. static int ocfs2_acquire_dquot(struct dquot *dquot)
  644. {
  645. handle_t *handle;
  646. struct ocfs2_mem_dqinfo *oinfo =
  647. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  648. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  649. int status = 0;
  650. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  651. /* We need an exclusive lock, because we're going to update use count
  652. * and instantiate possibly new dquot structure */
  653. status = ocfs2_lock_global_qf(oinfo, 1);
  654. if (status < 0)
  655. goto out;
  656. handle = ocfs2_start_trans(osb,
  657. ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
  658. if (IS_ERR(handle)) {
  659. status = PTR_ERR(handle);
  660. mlog_errno(status);
  661. goto out_ilock;
  662. }
  663. status = dquot_acquire(dquot);
  664. ocfs2_commit_trans(osb, handle);
  665. out_ilock:
  666. ocfs2_unlock_global_qf(oinfo, 1);
  667. out:
  668. mlog_exit(status);
  669. return status;
  670. }
  671. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  672. {
  673. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  674. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  675. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  676. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  677. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  678. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  679. int sync = 0;
  680. int status;
  681. struct super_block *sb = dquot->dq_sb;
  682. int type = dquot->dq_type;
  683. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  684. handle_t *handle;
  685. struct ocfs2_super *osb = OCFS2_SB(sb);
  686. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  687. dquot_mark_dquot_dirty(dquot);
  688. /* In case user set some limits, sync dquot immediately to global
  689. * quota file so that information propagates quicker */
  690. spin_lock(&dq_data_lock);
  691. if (dquot->dq_flags & mask)
  692. sync = 1;
  693. spin_unlock(&dq_data_lock);
  694. /* This is a slight hack but we can't afford getting global quota
  695. * lock if we already have a transaction started. */
  696. if (!sync || journal_current_handle()) {
  697. status = ocfs2_write_dquot(dquot);
  698. goto out;
  699. }
  700. status = ocfs2_lock_global_qf(oinfo, 1);
  701. if (status < 0)
  702. goto out;
  703. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  704. if (IS_ERR(handle)) {
  705. status = PTR_ERR(handle);
  706. mlog_errno(status);
  707. goto out_ilock;
  708. }
  709. status = ocfs2_sync_dquot(dquot);
  710. if (status < 0) {
  711. mlog_errno(status);
  712. goto out_trans;
  713. }
  714. /* Now write updated local dquot structure */
  715. status = dquot_commit(dquot);
  716. out_trans:
  717. ocfs2_commit_trans(osb, handle);
  718. out_ilock:
  719. ocfs2_unlock_global_qf(oinfo, 1);
  720. out:
  721. mlog_exit(status);
  722. return status;
  723. }
  724. /* This should happen only after set_dqinfo(). */
  725. static int ocfs2_write_info(struct super_block *sb, int type)
  726. {
  727. handle_t *handle;
  728. int status = 0;
  729. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  730. mlog_entry_void();
  731. status = ocfs2_lock_global_qf(oinfo, 1);
  732. if (status < 0)
  733. goto out;
  734. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  735. if (IS_ERR(handle)) {
  736. status = PTR_ERR(handle);
  737. mlog_errno(status);
  738. goto out_ilock;
  739. }
  740. status = dquot_commit_info(sb, type);
  741. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  742. out_ilock:
  743. ocfs2_unlock_global_qf(oinfo, 1);
  744. out:
  745. mlog_exit(status);
  746. return status;
  747. }
  748. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  749. {
  750. struct ocfs2_dquot *dquot =
  751. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  752. if (!dquot)
  753. return NULL;
  754. return &dquot->dq_dquot;
  755. }
  756. static void ocfs2_destroy_dquot(struct dquot *dquot)
  757. {
  758. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  759. }
  760. struct dquot_operations ocfs2_quota_operations = {
  761. .initialize = dquot_initialize,
  762. .drop = dquot_drop,
  763. .alloc_space = dquot_alloc_space,
  764. .alloc_inode = dquot_alloc_inode,
  765. .free_space = dquot_free_space,
  766. .free_inode = dquot_free_inode,
  767. .transfer = dquot_transfer,
  768. .write_dquot = ocfs2_write_dquot,
  769. .acquire_dquot = ocfs2_acquire_dquot,
  770. .release_dquot = ocfs2_release_dquot,
  771. .mark_dirty = ocfs2_mark_dquot_dirty,
  772. .write_info = ocfs2_write_info,
  773. .alloc_dquot = ocfs2_alloc_dquot,
  774. .destroy_dquot = ocfs2_destroy_dquot,
  775. };
  776. int ocfs2_quota_setup(void)
  777. {
  778. ocfs2_quota_wq = create_workqueue("o2quot");
  779. if (!ocfs2_quota_wq)
  780. return -ENOMEM;
  781. return 0;
  782. }
  783. void ocfs2_quota_shutdown(void)
  784. {
  785. if (ocfs2_quota_wq) {
  786. flush_workqueue(ocfs2_quota_wq);
  787. destroy_workqueue(ocfs2_quota_wq);
  788. ocfs2_quota_wq = NULL;
  789. }
  790. }