quota_global.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/slab.h>
  7. #include <linux/quota.h>
  8. #include <linux/quotaops.h>
  9. #include <linux/dqblk_qtree.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/writeback.h>
  12. #include <linux/workqueue.h>
  13. #define MLOG_MASK_PREFIX ML_QUOTA
  14. #include <cluster/masklog.h>
  15. #include "ocfs2_fs.h"
  16. #include "ocfs2.h"
  17. #include "alloc.h"
  18. #include "blockcheck.h"
  19. #include "inode.h"
  20. #include "journal.h"
  21. #include "file.h"
  22. #include "sysfile.h"
  23. #include "dlmglue.h"
  24. #include "uptodate.h"
  25. #include "super.h"
  26. #include "quota.h"
  27. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  28. static void qsync_work_fn(struct work_struct *work);
  29. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  30. {
  31. struct ocfs2_global_disk_dqblk *d = dp;
  32. struct mem_dqblk *m = &dquot->dq_dqb;
  33. /* Update from disk only entries not set by the admin */
  34. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  35. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  36. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  37. }
  38. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  39. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  40. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  41. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  42. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  43. }
  44. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  45. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  46. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  47. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  48. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  49. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  50. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  51. }
  52. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  53. {
  54. struct ocfs2_global_disk_dqblk *d = dp;
  55. struct mem_dqblk *m = &dquot->dq_dqb;
  56. d->dqb_id = cpu_to_le32(dquot->dq_id);
  57. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  58. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  59. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  60. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  61. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  62. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  63. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  64. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  65. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  66. d->dqb_pad1 = d->dqb_pad2 = 0;
  67. }
  68. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  69. {
  70. struct ocfs2_global_disk_dqblk *d = dp;
  71. struct ocfs2_mem_dqinfo *oinfo =
  72. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  73. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  74. return 0;
  75. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  76. }
  77. struct qtree_fmt_operations ocfs2_global_ops = {
  78. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  79. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  80. .is_id = ocfs2_global_is_id,
  81. };
  82. static int ocfs2_validate_quota_block(struct super_block *sb,
  83. struct buffer_head *bh)
  84. {
  85. struct ocfs2_disk_dqtrailer *dqt =
  86. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  87. mlog(0, "Validating quota block %llu\n",
  88. (unsigned long long)bh->b_blocknr);
  89. BUG_ON(!buffer_uptodate(bh));
  90. /*
  91. * If the ecc fails, we return the error but otherwise
  92. * leave the filesystem running. We know any error is
  93. * local to this block.
  94. */
  95. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  96. }
  97. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  98. struct buffer_head **bh)
  99. {
  100. int rc = 0;
  101. struct buffer_head *tmp = *bh;
  102. if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
  103. ocfs2_error(inode->i_sb,
  104. "Quota file %llu is probably corrupted! Requested "
  105. "to read block %Lu but file has size only %Lu\n",
  106. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  107. (unsigned long long)v_block,
  108. (unsigned long long)i_size_read(inode));
  109. return -EIO;
  110. }
  111. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
  112. ocfs2_validate_quota_block);
  113. if (rc)
  114. mlog_errno(rc);
  115. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  116. if (!rc && !*bh)
  117. *bh = tmp;
  118. return rc;
  119. }
  120. static int ocfs2_get_quota_block(struct inode *inode, int block,
  121. struct buffer_head **bh)
  122. {
  123. u64 pblock, pcount;
  124. int err;
  125. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  126. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  127. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  128. if (err) {
  129. mlog_errno(err);
  130. return err;
  131. }
  132. *bh = sb_getblk(inode->i_sb, pblock);
  133. if (!*bh) {
  134. err = -EIO;
  135. mlog_errno(err);
  136. }
  137. return err;
  138. }
  139. /* Read data from global quotafile - avoid pagecache and such because we cannot
  140. * afford acquiring the locks... We use quota cluster lock to serialize
  141. * operations. Caller is responsible for acquiring it. */
  142. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  143. size_t len, loff_t off)
  144. {
  145. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  146. struct inode *gqinode = oinfo->dqi_gqinode;
  147. loff_t i_size = i_size_read(gqinode);
  148. int offset = off & (sb->s_blocksize - 1);
  149. sector_t blk = off >> sb->s_blocksize_bits;
  150. int err = 0;
  151. struct buffer_head *bh;
  152. size_t toread, tocopy;
  153. if (off > i_size)
  154. return 0;
  155. if (off + len > i_size)
  156. len = i_size - off;
  157. toread = len;
  158. while (toread > 0) {
  159. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  160. bh = NULL;
  161. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  162. if (err) {
  163. mlog_errno(err);
  164. return err;
  165. }
  166. memcpy(data, bh->b_data + offset, tocopy);
  167. brelse(bh);
  168. offset = 0;
  169. toread -= tocopy;
  170. data += tocopy;
  171. blk++;
  172. }
  173. return len;
  174. }
  175. /* Write to quotafile (we know the transaction is already started and has
  176. * enough credits) */
  177. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  178. const char *data, size_t len, loff_t off)
  179. {
  180. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  181. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  182. struct inode *gqinode = oinfo->dqi_gqinode;
  183. int offset = off & (sb->s_blocksize - 1);
  184. sector_t blk = off >> sb->s_blocksize_bits;
  185. int err = 0, new = 0, ja_type;
  186. struct buffer_head *bh = NULL;
  187. handle_t *handle = journal_current_handle();
  188. if (!handle) {
  189. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  190. "because transaction was not started.\n",
  191. (unsigned long long)off, (unsigned long long)len);
  192. return -EIO;
  193. }
  194. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  195. WARN_ON(1);
  196. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  197. }
  198. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  199. if (gqinode->i_size < off + len) {
  200. loff_t rounded_end =
  201. ocfs2_align_bytes_to_blocks(sb, off + len);
  202. /* Space is already allocated in ocfs2_global_read_dquot() */
  203. err = ocfs2_simple_size_update(gqinode,
  204. oinfo->dqi_gqi_bh,
  205. rounded_end);
  206. if (err < 0)
  207. goto out;
  208. new = 1;
  209. }
  210. /* Not rewriting whole block? */
  211. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  212. !new) {
  213. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  214. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  215. } else {
  216. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  217. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  218. }
  219. if (err) {
  220. mlog_errno(err);
  221. goto out;
  222. }
  223. lock_buffer(bh);
  224. if (new)
  225. memset(bh->b_data, 0, sb->s_blocksize);
  226. memcpy(bh->b_data + offset, data, len);
  227. flush_dcache_page(bh->b_page);
  228. set_buffer_uptodate(bh);
  229. unlock_buffer(bh);
  230. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  231. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  232. ja_type);
  233. if (err < 0) {
  234. brelse(bh);
  235. goto out;
  236. }
  237. err = ocfs2_journal_dirty(handle, bh);
  238. brelse(bh);
  239. if (err < 0)
  240. goto out;
  241. out:
  242. if (err) {
  243. mutex_unlock(&gqinode->i_mutex);
  244. mlog_errno(err);
  245. return err;
  246. }
  247. gqinode->i_version++;
  248. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  249. mutex_unlock(&gqinode->i_mutex);
  250. return len;
  251. }
  252. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  253. {
  254. int status;
  255. struct buffer_head *bh = NULL;
  256. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  257. if (status < 0)
  258. return status;
  259. spin_lock(&dq_data_lock);
  260. if (!oinfo->dqi_gqi_count++)
  261. oinfo->dqi_gqi_bh = bh;
  262. else
  263. WARN_ON(bh != oinfo->dqi_gqi_bh);
  264. spin_unlock(&dq_data_lock);
  265. return 0;
  266. }
  267. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  268. {
  269. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  270. brelse(oinfo->dqi_gqi_bh);
  271. spin_lock(&dq_data_lock);
  272. if (!--oinfo->dqi_gqi_count)
  273. oinfo->dqi_gqi_bh = NULL;
  274. spin_unlock(&dq_data_lock);
  275. }
  276. /* Read information header from global quota file */
  277. int ocfs2_global_read_info(struct super_block *sb, int type)
  278. {
  279. struct inode *gqinode = NULL;
  280. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  281. GROUP_QUOTA_SYSTEM_INODE };
  282. struct ocfs2_global_disk_dqinfo dinfo;
  283. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  284. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  285. int status;
  286. mlog_entry_void();
  287. /* Read global header */
  288. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  289. OCFS2_INVALID_SLOT);
  290. if (!gqinode) {
  291. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  292. type);
  293. status = -EINVAL;
  294. goto out_err;
  295. }
  296. oinfo->dqi_gi.dqi_sb = sb;
  297. oinfo->dqi_gi.dqi_type = type;
  298. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  299. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  300. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  301. oinfo->dqi_gqi_bh = NULL;
  302. oinfo->dqi_gqi_count = 0;
  303. oinfo->dqi_gqinode = gqinode;
  304. status = ocfs2_lock_global_qf(oinfo, 0);
  305. if (status < 0) {
  306. mlog_errno(status);
  307. goto out_err;
  308. }
  309. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  310. sizeof(struct ocfs2_global_disk_dqinfo),
  311. OCFS2_GLOBAL_INFO_OFF);
  312. ocfs2_unlock_global_qf(oinfo, 0);
  313. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  314. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  315. status);
  316. if (status >= 0)
  317. status = -EIO;
  318. mlog_errno(status);
  319. goto out_err;
  320. }
  321. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  322. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  323. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  324. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  325. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  326. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  327. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  328. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  329. OCFS2_QBLK_RESERVED_SPACE;
  330. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  331. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  332. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  333. msecs_to_jiffies(oinfo->dqi_syncms));
  334. out_err:
  335. mlog_exit(status);
  336. return status;
  337. }
  338. /* Write information to global quota file. Expects exlusive lock on quota
  339. * file inode and quota info */
  340. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  341. {
  342. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  343. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  344. struct ocfs2_global_disk_dqinfo dinfo;
  345. ssize_t size;
  346. spin_lock(&dq_data_lock);
  347. info->dqi_flags &= ~DQF_INFO_DIRTY;
  348. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  349. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  350. spin_unlock(&dq_data_lock);
  351. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  352. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  353. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  354. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  355. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  356. sizeof(struct ocfs2_global_disk_dqinfo),
  357. OCFS2_GLOBAL_INFO_OFF);
  358. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  359. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  360. if (size >= 0)
  361. size = -EIO;
  362. return size;
  363. }
  364. return 0;
  365. }
  366. int ocfs2_global_write_info(struct super_block *sb, int type)
  367. {
  368. int err;
  369. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  370. err = ocfs2_qinfo_lock(info, 1);
  371. if (err < 0)
  372. return err;
  373. err = __ocfs2_global_write_info(sb, type);
  374. ocfs2_qinfo_unlock(info, 1);
  375. return err;
  376. }
  377. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  378. {
  379. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  380. /*
  381. * We may need to allocate tree blocks and a leaf block but not the
  382. * root block
  383. */
  384. return oinfo->dqi_gi.dqi_qtree_depth;
  385. }
  386. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  387. {
  388. /* We modify all the allocated blocks, tree root, and info block */
  389. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  390. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
  391. }
  392. /* Read in information from global quota file and acquire a reference to it.
  393. * dquot_acquire() has already started the transaction and locked quota file */
  394. int ocfs2_global_read_dquot(struct dquot *dquot)
  395. {
  396. int err, err2, ex = 0;
  397. struct super_block *sb = dquot->dq_sb;
  398. int type = dquot->dq_type;
  399. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  400. struct ocfs2_super *osb = OCFS2_SB(sb);
  401. struct inode *gqinode = info->dqi_gqinode;
  402. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  403. handle_t *handle = NULL;
  404. err = ocfs2_qinfo_lock(info, 0);
  405. if (err < 0)
  406. goto out;
  407. err = qtree_read_dquot(&info->dqi_gi, dquot);
  408. if (err < 0)
  409. goto out_qlock;
  410. OCFS2_DQUOT(dquot)->dq_use_count++;
  411. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  412. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  413. ocfs2_qinfo_unlock(info, 0);
  414. if (!dquot->dq_off) { /* No real quota entry? */
  415. ex = 1;
  416. /*
  417. * Add blocks to quota file before we start a transaction since
  418. * locking allocators ranks above a transaction start
  419. */
  420. WARN_ON(journal_current_handle());
  421. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  422. err = ocfs2_extend_no_holes(gqinode,
  423. gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
  424. gqinode->i_size);
  425. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  426. if (err < 0)
  427. goto out;
  428. }
  429. handle = ocfs2_start_trans(osb,
  430. ocfs2_calc_global_qinit_credits(sb, type));
  431. if (IS_ERR(handle)) {
  432. err = PTR_ERR(handle);
  433. goto out;
  434. }
  435. err = ocfs2_qinfo_lock(info, ex);
  436. if (err < 0)
  437. goto out_trans;
  438. err = qtree_write_dquot(&info->dqi_gi, dquot);
  439. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  440. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  441. if (!err)
  442. err = err2;
  443. }
  444. out_qlock:
  445. if (ex)
  446. ocfs2_qinfo_unlock(info, 1);
  447. else
  448. ocfs2_qinfo_unlock(info, 0);
  449. out_trans:
  450. if (handle)
  451. ocfs2_commit_trans(osb, handle);
  452. out:
  453. if (err < 0)
  454. mlog_errno(err);
  455. return err;
  456. }
  457. /* Sync local information about quota modifications with global quota file.
  458. * Caller must have started the transaction and obtained exclusive lock for
  459. * global quota file inode */
  460. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  461. {
  462. int err, err2;
  463. struct super_block *sb = dquot->dq_sb;
  464. int type = dquot->dq_type;
  465. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  466. struct ocfs2_global_disk_dqblk dqblk;
  467. s64 spacechange, inodechange;
  468. time_t olditime, oldbtime;
  469. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  470. sizeof(struct ocfs2_global_disk_dqblk),
  471. dquot->dq_off);
  472. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  473. if (err >= 0) {
  474. mlog(ML_ERROR, "Short read from global quota file "
  475. "(%u read)\n", err);
  476. err = -EIO;
  477. }
  478. goto out;
  479. }
  480. /* Update space and inode usage. Get also other information from
  481. * global quota file so that we don't overwrite any changes there.
  482. * We are */
  483. spin_lock(&dq_data_lock);
  484. spacechange = dquot->dq_dqb.dqb_curspace -
  485. OCFS2_DQUOT(dquot)->dq_origspace;
  486. inodechange = dquot->dq_dqb.dqb_curinodes -
  487. OCFS2_DQUOT(dquot)->dq_originodes;
  488. olditime = dquot->dq_dqb.dqb_itime;
  489. oldbtime = dquot->dq_dqb.dqb_btime;
  490. ocfs2_global_disk2memdqb(dquot, &dqblk);
  491. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  492. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  493. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  494. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  495. dquot->dq_dqb.dqb_curspace += spacechange;
  496. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  497. dquot->dq_dqb.dqb_curinodes += inodechange;
  498. /* Set properly space grace time... */
  499. if (dquot->dq_dqb.dqb_bsoftlimit &&
  500. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  501. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  502. oldbtime > 0) {
  503. if (dquot->dq_dqb.dqb_btime > 0)
  504. dquot->dq_dqb.dqb_btime =
  505. min(dquot->dq_dqb.dqb_btime, oldbtime);
  506. else
  507. dquot->dq_dqb.dqb_btime = oldbtime;
  508. }
  509. } else {
  510. dquot->dq_dqb.dqb_btime = 0;
  511. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  512. }
  513. /* Set properly inode grace time... */
  514. if (dquot->dq_dqb.dqb_isoftlimit &&
  515. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  516. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  517. olditime > 0) {
  518. if (dquot->dq_dqb.dqb_itime > 0)
  519. dquot->dq_dqb.dqb_itime =
  520. min(dquot->dq_dqb.dqb_itime, olditime);
  521. else
  522. dquot->dq_dqb.dqb_itime = olditime;
  523. }
  524. } else {
  525. dquot->dq_dqb.dqb_itime = 0;
  526. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  527. }
  528. /* All information is properly updated, clear the flags */
  529. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  530. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  531. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  532. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  533. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  534. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  535. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  536. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  537. spin_unlock(&dq_data_lock);
  538. err = ocfs2_qinfo_lock(info, freeing);
  539. if (err < 0) {
  540. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  541. " (type=%d, id=%u)\n", dquot->dq_type,
  542. (unsigned)dquot->dq_id);
  543. goto out;
  544. }
  545. if (freeing)
  546. OCFS2_DQUOT(dquot)->dq_use_count--;
  547. err = qtree_write_dquot(&info->dqi_gi, dquot);
  548. if (err < 0)
  549. goto out_qlock;
  550. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  551. err = qtree_release_dquot(&info->dqi_gi, dquot);
  552. if (info_dirty(sb_dqinfo(sb, type))) {
  553. err2 = __ocfs2_global_write_info(sb, type);
  554. if (!err)
  555. err = err2;
  556. }
  557. }
  558. out_qlock:
  559. ocfs2_qinfo_unlock(info, freeing);
  560. out:
  561. if (err < 0)
  562. mlog_errno(err);
  563. return err;
  564. }
  565. /*
  566. * Functions for periodic syncing of dquots with global file
  567. */
  568. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  569. {
  570. handle_t *handle;
  571. struct super_block *sb = dquot->dq_sb;
  572. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  573. struct ocfs2_super *osb = OCFS2_SB(sb);
  574. int status = 0;
  575. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  576. dquot->dq_type, type, sb->s_id);
  577. if (type != dquot->dq_type)
  578. goto out;
  579. status = ocfs2_lock_global_qf(oinfo, 1);
  580. if (status < 0)
  581. goto out;
  582. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  583. if (IS_ERR(handle)) {
  584. status = PTR_ERR(handle);
  585. mlog_errno(status);
  586. goto out_ilock;
  587. }
  588. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  589. status = ocfs2_sync_dquot(dquot);
  590. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  591. if (status < 0)
  592. mlog_errno(status);
  593. /* We have to write local structure as well... */
  594. dquot_mark_dquot_dirty(dquot);
  595. status = dquot_commit(dquot);
  596. if (status < 0)
  597. mlog_errno(status);
  598. ocfs2_commit_trans(osb, handle);
  599. out_ilock:
  600. ocfs2_unlock_global_qf(oinfo, 1);
  601. out:
  602. mlog_exit(status);
  603. return status;
  604. }
  605. static void qsync_work_fn(struct work_struct *work)
  606. {
  607. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  608. struct ocfs2_mem_dqinfo,
  609. dqi_sync_work.work);
  610. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  611. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  612. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  613. msecs_to_jiffies(oinfo->dqi_syncms));
  614. }
  615. /*
  616. * Wrappers for generic quota functions
  617. */
  618. static int ocfs2_write_dquot(struct dquot *dquot)
  619. {
  620. handle_t *handle;
  621. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  622. int status = 0;
  623. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  624. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  625. if (IS_ERR(handle)) {
  626. status = PTR_ERR(handle);
  627. mlog_errno(status);
  628. goto out;
  629. }
  630. status = dquot_commit(dquot);
  631. ocfs2_commit_trans(osb, handle);
  632. out:
  633. mlog_exit(status);
  634. return status;
  635. }
  636. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  637. {
  638. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  639. /*
  640. * We modify tree, leaf block, global info, local chunk header,
  641. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  642. * accounts for inode update
  643. */
  644. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  645. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  646. OCFS2_QINFO_WRITE_CREDITS +
  647. OCFS2_INODE_UPDATE_CREDITS;
  648. }
  649. static int ocfs2_release_dquot(struct dquot *dquot)
  650. {
  651. handle_t *handle;
  652. struct ocfs2_mem_dqinfo *oinfo =
  653. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  654. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  655. int status = 0;
  656. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  657. status = ocfs2_lock_global_qf(oinfo, 1);
  658. if (status < 0)
  659. goto out;
  660. handle = ocfs2_start_trans(osb,
  661. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  662. if (IS_ERR(handle)) {
  663. status = PTR_ERR(handle);
  664. mlog_errno(status);
  665. goto out_ilock;
  666. }
  667. status = dquot_release(dquot);
  668. ocfs2_commit_trans(osb, handle);
  669. out_ilock:
  670. ocfs2_unlock_global_qf(oinfo, 1);
  671. out:
  672. mlog_exit(status);
  673. return status;
  674. }
  675. static int ocfs2_acquire_dquot(struct dquot *dquot)
  676. {
  677. struct ocfs2_mem_dqinfo *oinfo =
  678. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  679. int status = 0;
  680. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  681. /* We need an exclusive lock, because we're going to update use count
  682. * and instantiate possibly new dquot structure */
  683. status = ocfs2_lock_global_qf(oinfo, 1);
  684. if (status < 0)
  685. goto out;
  686. status = dquot_acquire(dquot);
  687. ocfs2_unlock_global_qf(oinfo, 1);
  688. out:
  689. mlog_exit(status);
  690. return status;
  691. }
  692. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  693. {
  694. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  695. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  696. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  697. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  698. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  699. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  700. int sync = 0;
  701. int status;
  702. struct super_block *sb = dquot->dq_sb;
  703. int type = dquot->dq_type;
  704. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  705. handle_t *handle;
  706. struct ocfs2_super *osb = OCFS2_SB(sb);
  707. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  708. dquot_mark_dquot_dirty(dquot);
  709. /* In case user set some limits, sync dquot immediately to global
  710. * quota file so that information propagates quicker */
  711. spin_lock(&dq_data_lock);
  712. if (dquot->dq_flags & mask)
  713. sync = 1;
  714. spin_unlock(&dq_data_lock);
  715. /* This is a slight hack but we can't afford getting global quota
  716. * lock if we already have a transaction started. */
  717. if (!sync || journal_current_handle()) {
  718. status = ocfs2_write_dquot(dquot);
  719. goto out;
  720. }
  721. status = ocfs2_lock_global_qf(oinfo, 1);
  722. if (status < 0)
  723. goto out;
  724. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  725. if (IS_ERR(handle)) {
  726. status = PTR_ERR(handle);
  727. mlog_errno(status);
  728. goto out_ilock;
  729. }
  730. status = ocfs2_sync_dquot(dquot);
  731. if (status < 0) {
  732. mlog_errno(status);
  733. goto out_trans;
  734. }
  735. /* Now write updated local dquot structure */
  736. status = dquot_commit(dquot);
  737. out_trans:
  738. ocfs2_commit_trans(osb, handle);
  739. out_ilock:
  740. ocfs2_unlock_global_qf(oinfo, 1);
  741. out:
  742. mlog_exit(status);
  743. return status;
  744. }
  745. /* This should happen only after set_dqinfo(). */
  746. static int ocfs2_write_info(struct super_block *sb, int type)
  747. {
  748. handle_t *handle;
  749. int status = 0;
  750. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  751. mlog_entry_void();
  752. status = ocfs2_lock_global_qf(oinfo, 1);
  753. if (status < 0)
  754. goto out;
  755. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  756. if (IS_ERR(handle)) {
  757. status = PTR_ERR(handle);
  758. mlog_errno(status);
  759. goto out_ilock;
  760. }
  761. status = dquot_commit_info(sb, type);
  762. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  763. out_ilock:
  764. ocfs2_unlock_global_qf(oinfo, 1);
  765. out:
  766. mlog_exit(status);
  767. return status;
  768. }
  769. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  770. {
  771. struct ocfs2_dquot *dquot =
  772. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  773. if (!dquot)
  774. return NULL;
  775. return &dquot->dq_dquot;
  776. }
  777. static void ocfs2_destroy_dquot(struct dquot *dquot)
  778. {
  779. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  780. }
  781. const struct dquot_operations ocfs2_quota_operations = {
  782. .write_dquot = ocfs2_write_dquot,
  783. .acquire_dquot = ocfs2_acquire_dquot,
  784. .release_dquot = ocfs2_release_dquot,
  785. .mark_dirty = ocfs2_mark_dquot_dirty,
  786. .write_info = ocfs2_write_info,
  787. .alloc_dquot = ocfs2_alloc_dquot,
  788. .destroy_dquot = ocfs2_destroy_dquot,
  789. };
  790. int ocfs2_quota_setup(void)
  791. {
  792. ocfs2_quota_wq = create_workqueue("o2quot");
  793. if (!ocfs2_quota_wq)
  794. return -ENOMEM;
  795. return 0;
  796. }
  797. void ocfs2_quota_shutdown(void)
  798. {
  799. if (ocfs2_quota_wq) {
  800. flush_workqueue(ocfs2_quota_wq);
  801. destroy_workqueue(ocfs2_quota_wq);
  802. ocfs2_quota_wq = NULL;
  803. }
  804. }