quota_global.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/quota.h>
  7. #include <linux/quotaops.h>
  8. #include <linux/dqblk_qtree.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/writeback.h>
  11. #include <linux/workqueue.h>
  12. #define MLOG_MASK_PREFIX ML_QUOTA
  13. #include <cluster/masklog.h>
  14. #include "ocfs2_fs.h"
  15. #include "ocfs2.h"
  16. #include "alloc.h"
  17. #include "blockcheck.h"
  18. #include "inode.h"
  19. #include "journal.h"
  20. #include "file.h"
  21. #include "sysfile.h"
  22. #include "dlmglue.h"
  23. #include "uptodate.h"
  24. #include "super.h"
  25. #include "quota.h"
  26. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  27. static void qsync_work_fn(struct work_struct *work);
  28. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  29. {
  30. struct ocfs2_global_disk_dqblk *d = dp;
  31. struct mem_dqblk *m = &dquot->dq_dqb;
  32. /* Update from disk only entries not set by the admin */
  33. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  34. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  35. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  36. }
  37. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  38. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  39. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  40. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  41. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  42. }
  43. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  44. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  45. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  46. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  47. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  48. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  49. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  50. }
  51. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  52. {
  53. struct ocfs2_global_disk_dqblk *d = dp;
  54. struct mem_dqblk *m = &dquot->dq_dqb;
  55. d->dqb_id = cpu_to_le32(dquot->dq_id);
  56. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  57. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  58. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  59. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  60. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  61. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  62. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  63. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  64. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  65. d->dqb_pad1 = d->dqb_pad2 = 0;
  66. }
  67. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  68. {
  69. struct ocfs2_global_disk_dqblk *d = dp;
  70. struct ocfs2_mem_dqinfo *oinfo =
  71. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  72. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  73. return 0;
  74. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  75. }
  76. struct qtree_fmt_operations ocfs2_global_ops = {
  77. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  78. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  79. .is_id = ocfs2_global_is_id,
  80. };
  81. static int ocfs2_validate_quota_block(struct super_block *sb,
  82. struct buffer_head *bh)
  83. {
  84. struct ocfs2_disk_dqtrailer *dqt =
  85. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  86. mlog(0, "Validating quota block %llu\n",
  87. (unsigned long long)bh->b_blocknr);
  88. BUG_ON(!buffer_uptodate(bh));
  89. /*
  90. * If the ecc fails, we return the error but otherwise
  91. * leave the filesystem running. We know any error is
  92. * local to this block.
  93. */
  94. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  95. }
  96. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  97. struct buffer_head **bh)
  98. {
  99. int rc = 0;
  100. struct buffer_head *tmp = *bh;
  101. if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
  102. ocfs2_error(inode->i_sb,
  103. "Quota file %llu is probably corrupted! Requested "
  104. "to read block %Lu but file has size only %Lu\n",
  105. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  106. (unsigned long long)v_block,
  107. (unsigned long long)i_size_read(inode));
  108. return -EIO;
  109. }
  110. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
  111. ocfs2_validate_quota_block);
  112. if (rc)
  113. mlog_errno(rc);
  114. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  115. if (!rc && !*bh)
  116. *bh = tmp;
  117. return rc;
  118. }
  119. static int ocfs2_get_quota_block(struct inode *inode, int block,
  120. struct buffer_head **bh)
  121. {
  122. u64 pblock, pcount;
  123. int err;
  124. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  125. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  126. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  127. if (err) {
  128. mlog_errno(err);
  129. return err;
  130. }
  131. *bh = sb_getblk(inode->i_sb, pblock);
  132. if (!*bh) {
  133. err = -EIO;
  134. mlog_errno(err);
  135. }
  136. return err;
  137. }
  138. /* Read data from global quotafile - avoid pagecache and such because we cannot
  139. * afford acquiring the locks... We use quota cluster lock to serialize
  140. * operations. Caller is responsible for acquiring it. */
  141. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  142. size_t len, loff_t off)
  143. {
  144. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  145. struct inode *gqinode = oinfo->dqi_gqinode;
  146. loff_t i_size = i_size_read(gqinode);
  147. int offset = off & (sb->s_blocksize - 1);
  148. sector_t blk = off >> sb->s_blocksize_bits;
  149. int err = 0;
  150. struct buffer_head *bh;
  151. size_t toread, tocopy;
  152. if (off > i_size)
  153. return 0;
  154. if (off + len > i_size)
  155. len = i_size - off;
  156. toread = len;
  157. while (toread > 0) {
  158. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  159. bh = NULL;
  160. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  161. if (err) {
  162. mlog_errno(err);
  163. return err;
  164. }
  165. memcpy(data, bh->b_data + offset, tocopy);
  166. brelse(bh);
  167. offset = 0;
  168. toread -= tocopy;
  169. data += tocopy;
  170. blk++;
  171. }
  172. return len;
  173. }
  174. /* Write to quotafile (we know the transaction is already started and has
  175. * enough credits) */
  176. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  177. const char *data, size_t len, loff_t off)
  178. {
  179. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  180. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  181. struct inode *gqinode = oinfo->dqi_gqinode;
  182. int offset = off & (sb->s_blocksize - 1);
  183. sector_t blk = off >> sb->s_blocksize_bits;
  184. int err = 0, new = 0, ja_type;
  185. struct buffer_head *bh = NULL;
  186. handle_t *handle = journal_current_handle();
  187. if (!handle) {
  188. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  189. "because transaction was not started.\n",
  190. (unsigned long long)off, (unsigned long long)len);
  191. return -EIO;
  192. }
  193. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  194. WARN_ON(1);
  195. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  196. }
  197. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  198. if (gqinode->i_size < off + len) {
  199. loff_t rounded_end =
  200. ocfs2_align_bytes_to_blocks(sb, off + len);
  201. /* Space is already allocated in ocfs2_global_read_dquot() */
  202. err = ocfs2_simple_size_update(gqinode,
  203. oinfo->dqi_gqi_bh,
  204. rounded_end);
  205. if (err < 0)
  206. goto out;
  207. new = 1;
  208. }
  209. /* Not rewriting whole block? */
  210. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  211. !new) {
  212. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  213. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  214. } else {
  215. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  216. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  217. }
  218. if (err) {
  219. mlog_errno(err);
  220. goto out;
  221. }
  222. lock_buffer(bh);
  223. if (new)
  224. memset(bh->b_data, 0, sb->s_blocksize);
  225. memcpy(bh->b_data + offset, data, len);
  226. flush_dcache_page(bh->b_page);
  227. set_buffer_uptodate(bh);
  228. unlock_buffer(bh);
  229. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  230. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  231. ja_type);
  232. if (err < 0) {
  233. brelse(bh);
  234. goto out;
  235. }
  236. err = ocfs2_journal_dirty(handle, bh);
  237. brelse(bh);
  238. if (err < 0)
  239. goto out;
  240. out:
  241. if (err) {
  242. mutex_unlock(&gqinode->i_mutex);
  243. mlog_errno(err);
  244. return err;
  245. }
  246. gqinode->i_version++;
  247. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  248. mutex_unlock(&gqinode->i_mutex);
  249. return len;
  250. }
  251. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  252. {
  253. int status;
  254. struct buffer_head *bh = NULL;
  255. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  256. if (status < 0)
  257. return status;
  258. spin_lock(&dq_data_lock);
  259. if (!oinfo->dqi_gqi_count++)
  260. oinfo->dqi_gqi_bh = bh;
  261. else
  262. WARN_ON(bh != oinfo->dqi_gqi_bh);
  263. spin_unlock(&dq_data_lock);
  264. return 0;
  265. }
  266. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  267. {
  268. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  269. brelse(oinfo->dqi_gqi_bh);
  270. spin_lock(&dq_data_lock);
  271. if (!--oinfo->dqi_gqi_count)
  272. oinfo->dqi_gqi_bh = NULL;
  273. spin_unlock(&dq_data_lock);
  274. }
  275. /* Read information header from global quota file */
  276. int ocfs2_global_read_info(struct super_block *sb, int type)
  277. {
  278. struct inode *gqinode = NULL;
  279. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  280. GROUP_QUOTA_SYSTEM_INODE };
  281. struct ocfs2_global_disk_dqinfo dinfo;
  282. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  283. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  284. int status;
  285. mlog_entry_void();
  286. /* Read global header */
  287. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  288. OCFS2_INVALID_SLOT);
  289. if (!gqinode) {
  290. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  291. type);
  292. status = -EINVAL;
  293. goto out_err;
  294. }
  295. oinfo->dqi_gi.dqi_sb = sb;
  296. oinfo->dqi_gi.dqi_type = type;
  297. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  298. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  299. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  300. oinfo->dqi_gqi_bh = NULL;
  301. oinfo->dqi_gqi_count = 0;
  302. oinfo->dqi_gqinode = gqinode;
  303. status = ocfs2_lock_global_qf(oinfo, 0);
  304. if (status < 0) {
  305. mlog_errno(status);
  306. goto out_err;
  307. }
  308. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  309. sizeof(struct ocfs2_global_disk_dqinfo),
  310. OCFS2_GLOBAL_INFO_OFF);
  311. ocfs2_unlock_global_qf(oinfo, 0);
  312. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  313. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  314. status);
  315. if (status >= 0)
  316. status = -EIO;
  317. mlog_errno(status);
  318. goto out_err;
  319. }
  320. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  321. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  322. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  323. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  324. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  325. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  326. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  327. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  328. OCFS2_QBLK_RESERVED_SPACE;
  329. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  330. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  331. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  332. msecs_to_jiffies(oinfo->dqi_syncms));
  333. out_err:
  334. mlog_exit(status);
  335. return status;
  336. }
  337. /* Write information to global quota file. Expects exlusive lock on quota
  338. * file inode and quota info */
  339. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  340. {
  341. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  342. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  343. struct ocfs2_global_disk_dqinfo dinfo;
  344. ssize_t size;
  345. spin_lock(&dq_data_lock);
  346. info->dqi_flags &= ~DQF_INFO_DIRTY;
  347. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  348. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  349. spin_unlock(&dq_data_lock);
  350. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  351. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  352. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  353. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  354. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  355. sizeof(struct ocfs2_global_disk_dqinfo),
  356. OCFS2_GLOBAL_INFO_OFF);
  357. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  358. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  359. if (size >= 0)
  360. size = -EIO;
  361. return size;
  362. }
  363. return 0;
  364. }
  365. int ocfs2_global_write_info(struct super_block *sb, int type)
  366. {
  367. int err;
  368. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  369. err = ocfs2_qinfo_lock(info, 1);
  370. if (err < 0)
  371. return err;
  372. err = __ocfs2_global_write_info(sb, type);
  373. ocfs2_qinfo_unlock(info, 1);
  374. return err;
  375. }
  376. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  377. {
  378. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  379. /*
  380. * We may need to allocate tree blocks and a leaf block but not the
  381. * root block
  382. */
  383. return oinfo->dqi_gi.dqi_qtree_depth;
  384. }
  385. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  386. {
  387. /* We modify all the allocated blocks, tree root, and info block */
  388. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  389. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
  390. }
  391. /* Read in information from global quota file and acquire a reference to it.
  392. * dquot_acquire() has already started the transaction and locked quota file */
  393. int ocfs2_global_read_dquot(struct dquot *dquot)
  394. {
  395. int err, err2, ex = 0;
  396. struct super_block *sb = dquot->dq_sb;
  397. int type = dquot->dq_type;
  398. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  399. struct ocfs2_super *osb = OCFS2_SB(sb);
  400. struct inode *gqinode = info->dqi_gqinode;
  401. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  402. handle_t *handle = NULL;
  403. err = ocfs2_qinfo_lock(info, 0);
  404. if (err < 0)
  405. goto out;
  406. err = qtree_read_dquot(&info->dqi_gi, dquot);
  407. if (err < 0)
  408. goto out_qlock;
  409. OCFS2_DQUOT(dquot)->dq_use_count++;
  410. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  411. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  412. ocfs2_qinfo_unlock(info, 0);
  413. if (!dquot->dq_off) { /* No real quota entry? */
  414. ex = 1;
  415. /*
  416. * Add blocks to quota file before we start a transaction since
  417. * locking allocators ranks above a transaction start
  418. */
  419. WARN_ON(journal_current_handle());
  420. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  421. err = ocfs2_extend_no_holes(gqinode,
  422. gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
  423. gqinode->i_size);
  424. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  425. if (err < 0)
  426. goto out;
  427. }
  428. handle = ocfs2_start_trans(osb,
  429. ocfs2_calc_global_qinit_credits(sb, type));
  430. if (IS_ERR(handle)) {
  431. err = PTR_ERR(handle);
  432. goto out;
  433. }
  434. err = ocfs2_qinfo_lock(info, ex);
  435. if (err < 0)
  436. goto out_trans;
  437. err = qtree_write_dquot(&info->dqi_gi, dquot);
  438. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  439. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  440. if (!err)
  441. err = err2;
  442. }
  443. out_qlock:
  444. if (ex)
  445. ocfs2_qinfo_unlock(info, 1);
  446. else
  447. ocfs2_qinfo_unlock(info, 0);
  448. out_trans:
  449. if (handle)
  450. ocfs2_commit_trans(osb, handle);
  451. out:
  452. if (err < 0)
  453. mlog_errno(err);
  454. return err;
  455. }
  456. /* Sync local information about quota modifications with global quota file.
  457. * Caller must have started the transaction and obtained exclusive lock for
  458. * global quota file inode */
  459. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  460. {
  461. int err, err2;
  462. struct super_block *sb = dquot->dq_sb;
  463. int type = dquot->dq_type;
  464. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  465. struct ocfs2_global_disk_dqblk dqblk;
  466. s64 spacechange, inodechange;
  467. time_t olditime, oldbtime;
  468. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  469. sizeof(struct ocfs2_global_disk_dqblk),
  470. dquot->dq_off);
  471. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  472. if (err >= 0) {
  473. mlog(ML_ERROR, "Short read from global quota file "
  474. "(%u read)\n", err);
  475. err = -EIO;
  476. }
  477. goto out;
  478. }
  479. /* Update space and inode usage. Get also other information from
  480. * global quota file so that we don't overwrite any changes there.
  481. * We are */
  482. spin_lock(&dq_data_lock);
  483. spacechange = dquot->dq_dqb.dqb_curspace -
  484. OCFS2_DQUOT(dquot)->dq_origspace;
  485. inodechange = dquot->dq_dqb.dqb_curinodes -
  486. OCFS2_DQUOT(dquot)->dq_originodes;
  487. olditime = dquot->dq_dqb.dqb_itime;
  488. oldbtime = dquot->dq_dqb.dqb_btime;
  489. ocfs2_global_disk2memdqb(dquot, &dqblk);
  490. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  491. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  492. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  493. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  494. dquot->dq_dqb.dqb_curspace += spacechange;
  495. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  496. dquot->dq_dqb.dqb_curinodes += inodechange;
  497. /* Set properly space grace time... */
  498. if (dquot->dq_dqb.dqb_bsoftlimit &&
  499. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  500. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  501. oldbtime > 0) {
  502. if (dquot->dq_dqb.dqb_btime > 0)
  503. dquot->dq_dqb.dqb_btime =
  504. min(dquot->dq_dqb.dqb_btime, oldbtime);
  505. else
  506. dquot->dq_dqb.dqb_btime = oldbtime;
  507. }
  508. } else {
  509. dquot->dq_dqb.dqb_btime = 0;
  510. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  511. }
  512. /* Set properly inode grace time... */
  513. if (dquot->dq_dqb.dqb_isoftlimit &&
  514. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  515. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  516. olditime > 0) {
  517. if (dquot->dq_dqb.dqb_itime > 0)
  518. dquot->dq_dqb.dqb_itime =
  519. min(dquot->dq_dqb.dqb_itime, olditime);
  520. else
  521. dquot->dq_dqb.dqb_itime = olditime;
  522. }
  523. } else {
  524. dquot->dq_dqb.dqb_itime = 0;
  525. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  526. }
  527. /* All information is properly updated, clear the flags */
  528. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  529. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  530. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  531. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  532. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  533. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  534. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  535. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  536. spin_unlock(&dq_data_lock);
  537. err = ocfs2_qinfo_lock(info, freeing);
  538. if (err < 0) {
  539. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  540. " (type=%d, id=%u)\n", dquot->dq_type,
  541. (unsigned)dquot->dq_id);
  542. goto out;
  543. }
  544. if (freeing)
  545. OCFS2_DQUOT(dquot)->dq_use_count--;
  546. err = qtree_write_dquot(&info->dqi_gi, dquot);
  547. if (err < 0)
  548. goto out_qlock;
  549. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  550. err = qtree_release_dquot(&info->dqi_gi, dquot);
  551. if (info_dirty(sb_dqinfo(sb, type))) {
  552. err2 = __ocfs2_global_write_info(sb, type);
  553. if (!err)
  554. err = err2;
  555. }
  556. }
  557. out_qlock:
  558. ocfs2_qinfo_unlock(info, freeing);
  559. out:
  560. if (err < 0)
  561. mlog_errno(err);
  562. return err;
  563. }
  564. /*
  565. * Functions for periodic syncing of dquots with global file
  566. */
  567. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  568. {
  569. handle_t *handle;
  570. struct super_block *sb = dquot->dq_sb;
  571. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  572. struct ocfs2_super *osb = OCFS2_SB(sb);
  573. int status = 0;
  574. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  575. dquot->dq_type, type, sb->s_id);
  576. if (type != dquot->dq_type)
  577. goto out;
  578. status = ocfs2_lock_global_qf(oinfo, 1);
  579. if (status < 0)
  580. goto out;
  581. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  582. if (IS_ERR(handle)) {
  583. status = PTR_ERR(handle);
  584. mlog_errno(status);
  585. goto out_ilock;
  586. }
  587. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  588. status = ocfs2_sync_dquot(dquot);
  589. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  590. if (status < 0)
  591. mlog_errno(status);
  592. /* We have to write local structure as well... */
  593. dquot_mark_dquot_dirty(dquot);
  594. status = dquot_commit(dquot);
  595. if (status < 0)
  596. mlog_errno(status);
  597. ocfs2_commit_trans(osb, handle);
  598. out_ilock:
  599. ocfs2_unlock_global_qf(oinfo, 1);
  600. out:
  601. mlog_exit(status);
  602. return status;
  603. }
  604. static void qsync_work_fn(struct work_struct *work)
  605. {
  606. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  607. struct ocfs2_mem_dqinfo,
  608. dqi_sync_work.work);
  609. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  610. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  611. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  612. msecs_to_jiffies(oinfo->dqi_syncms));
  613. }
  614. /*
  615. * Wrappers for generic quota functions
  616. */
  617. static int ocfs2_write_dquot(struct dquot *dquot)
  618. {
  619. handle_t *handle;
  620. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  621. int status = 0;
  622. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  623. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  624. if (IS_ERR(handle)) {
  625. status = PTR_ERR(handle);
  626. mlog_errno(status);
  627. goto out;
  628. }
  629. status = dquot_commit(dquot);
  630. ocfs2_commit_trans(osb, handle);
  631. out:
  632. mlog_exit(status);
  633. return status;
  634. }
  635. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  636. {
  637. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  638. /*
  639. * We modify tree, leaf block, global info, local chunk header,
  640. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  641. * accounts for inode update
  642. */
  643. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  644. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  645. OCFS2_QINFO_WRITE_CREDITS +
  646. OCFS2_INODE_UPDATE_CREDITS;
  647. }
  648. static int ocfs2_release_dquot(struct dquot *dquot)
  649. {
  650. handle_t *handle;
  651. struct ocfs2_mem_dqinfo *oinfo =
  652. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  653. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  654. int status = 0;
  655. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  656. status = ocfs2_lock_global_qf(oinfo, 1);
  657. if (status < 0)
  658. goto out;
  659. handle = ocfs2_start_trans(osb,
  660. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  661. if (IS_ERR(handle)) {
  662. status = PTR_ERR(handle);
  663. mlog_errno(status);
  664. goto out_ilock;
  665. }
  666. status = dquot_release(dquot);
  667. ocfs2_commit_trans(osb, handle);
  668. out_ilock:
  669. ocfs2_unlock_global_qf(oinfo, 1);
  670. out:
  671. mlog_exit(status);
  672. return status;
  673. }
  674. static int ocfs2_acquire_dquot(struct dquot *dquot)
  675. {
  676. struct ocfs2_mem_dqinfo *oinfo =
  677. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  678. int status = 0;
  679. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  680. /* We need an exclusive lock, because we're going to update use count
  681. * and instantiate possibly new dquot structure */
  682. status = ocfs2_lock_global_qf(oinfo, 1);
  683. if (status < 0)
  684. goto out;
  685. status = dquot_acquire(dquot);
  686. ocfs2_unlock_global_qf(oinfo, 1);
  687. out:
  688. mlog_exit(status);
  689. return status;
  690. }
  691. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  692. {
  693. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  694. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  695. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  696. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  697. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  698. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  699. int sync = 0;
  700. int status;
  701. struct super_block *sb = dquot->dq_sb;
  702. int type = dquot->dq_type;
  703. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  704. handle_t *handle;
  705. struct ocfs2_super *osb = OCFS2_SB(sb);
  706. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  707. dquot_mark_dquot_dirty(dquot);
  708. /* In case user set some limits, sync dquot immediately to global
  709. * quota file so that information propagates quicker */
  710. spin_lock(&dq_data_lock);
  711. if (dquot->dq_flags & mask)
  712. sync = 1;
  713. spin_unlock(&dq_data_lock);
  714. /* This is a slight hack but we can't afford getting global quota
  715. * lock if we already have a transaction started. */
  716. if (!sync || journal_current_handle()) {
  717. status = ocfs2_write_dquot(dquot);
  718. goto out;
  719. }
  720. status = ocfs2_lock_global_qf(oinfo, 1);
  721. if (status < 0)
  722. goto out;
  723. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  724. if (IS_ERR(handle)) {
  725. status = PTR_ERR(handle);
  726. mlog_errno(status);
  727. goto out_ilock;
  728. }
  729. status = ocfs2_sync_dquot(dquot);
  730. if (status < 0) {
  731. mlog_errno(status);
  732. goto out_trans;
  733. }
  734. /* Now write updated local dquot structure */
  735. status = dquot_commit(dquot);
  736. out_trans:
  737. ocfs2_commit_trans(osb, handle);
  738. out_ilock:
  739. ocfs2_unlock_global_qf(oinfo, 1);
  740. out:
  741. mlog_exit(status);
  742. return status;
  743. }
  744. /* This should happen only after set_dqinfo(). */
  745. static int ocfs2_write_info(struct super_block *sb, int type)
  746. {
  747. handle_t *handle;
  748. int status = 0;
  749. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  750. mlog_entry_void();
  751. status = ocfs2_lock_global_qf(oinfo, 1);
  752. if (status < 0)
  753. goto out;
  754. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  755. if (IS_ERR(handle)) {
  756. status = PTR_ERR(handle);
  757. mlog_errno(status);
  758. goto out_ilock;
  759. }
  760. status = dquot_commit_info(sb, type);
  761. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  762. out_ilock:
  763. ocfs2_unlock_global_qf(oinfo, 1);
  764. out:
  765. mlog_exit(status);
  766. return status;
  767. }
  768. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  769. {
  770. struct ocfs2_dquot *dquot =
  771. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  772. if (!dquot)
  773. return NULL;
  774. return &dquot->dq_dquot;
  775. }
  776. static void ocfs2_destroy_dquot(struct dquot *dquot)
  777. {
  778. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  779. }
  780. const struct dquot_operations ocfs2_quota_operations = {
  781. .initialize = dquot_initialize,
  782. .drop = dquot_drop,
  783. .alloc_space = dquot_alloc_space,
  784. .alloc_inode = dquot_alloc_inode,
  785. .free_space = dquot_free_space,
  786. .free_inode = dquot_free_inode,
  787. .transfer = dquot_transfer,
  788. .write_dquot = ocfs2_write_dquot,
  789. .acquire_dquot = ocfs2_acquire_dquot,
  790. .release_dquot = ocfs2_release_dquot,
  791. .mark_dirty = ocfs2_mark_dquot_dirty,
  792. .write_info = ocfs2_write_info,
  793. .alloc_dquot = ocfs2_alloc_dquot,
  794. .destroy_dquot = ocfs2_destroy_dquot,
  795. };
  796. int ocfs2_quota_setup(void)
  797. {
  798. ocfs2_quota_wq = create_workqueue("o2quot");
  799. if (!ocfs2_quota_wq)
  800. return -ENOMEM;
  801. return 0;
  802. }
  803. void ocfs2_quota_shutdown(void)
  804. {
  805. if (ocfs2_quota_wq) {
  806. flush_workqueue(ocfs2_quota_wq);
  807. destroy_workqueue(ocfs2_quota_wq);
  808. ocfs2_quota_wq = NULL;
  809. }
  810. }