quota_global.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/quota.h>
  7. #include <linux/quotaops.h>
  8. #include <linux/dqblk_qtree.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/writeback.h>
  11. #include <linux/workqueue.h>
  12. #define MLOG_MASK_PREFIX ML_QUOTA
  13. #include <cluster/masklog.h>
  14. #include "ocfs2_fs.h"
  15. #include "ocfs2.h"
  16. #include "alloc.h"
  17. #include "blockcheck.h"
  18. #include "inode.h"
  19. #include "journal.h"
  20. #include "file.h"
  21. #include "sysfile.h"
  22. #include "dlmglue.h"
  23. #include "uptodate.h"
  24. #include "quota.h"
  25. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  26. static void qsync_work_fn(struct work_struct *work);
  27. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  28. {
  29. struct ocfs2_global_disk_dqblk *d = dp;
  30. struct mem_dqblk *m = &dquot->dq_dqb;
  31. /* Update from disk only entries not set by the admin */
  32. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  33. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  34. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  35. }
  36. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  37. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  38. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  39. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  40. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  41. }
  42. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  43. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  44. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  45. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  46. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  47. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  48. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  49. }
  50. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  51. {
  52. struct ocfs2_global_disk_dqblk *d = dp;
  53. struct mem_dqblk *m = &dquot->dq_dqb;
  54. d->dqb_id = cpu_to_le32(dquot->dq_id);
  55. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  56. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  57. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  58. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  59. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  60. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  61. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  62. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  63. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  64. d->dqb_pad1 = d->dqb_pad2 = 0;
  65. }
  66. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  67. {
  68. struct ocfs2_global_disk_dqblk *d = dp;
  69. struct ocfs2_mem_dqinfo *oinfo =
  70. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  71. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  72. return 0;
  73. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  74. }
  75. struct qtree_fmt_operations ocfs2_global_ops = {
  76. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  77. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  78. .is_id = ocfs2_global_is_id,
  79. };
  80. static int ocfs2_validate_quota_block(struct super_block *sb,
  81. struct buffer_head *bh)
  82. {
  83. struct ocfs2_disk_dqtrailer *dqt =
  84. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  85. mlog(0, "Validating quota block %llu\n",
  86. (unsigned long long)bh->b_blocknr);
  87. BUG_ON(!buffer_uptodate(bh));
  88. /*
  89. * If the ecc fails, we return the error but otherwise
  90. * leave the filesystem running. We know any error is
  91. * local to this block.
  92. */
  93. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  94. }
  95. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  96. struct buffer_head **bh)
  97. {
  98. int rc = 0;
  99. struct buffer_head *tmp = *bh;
  100. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
  101. ocfs2_validate_quota_block);
  102. if (rc)
  103. mlog_errno(rc);
  104. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  105. if (!rc && !*bh)
  106. *bh = tmp;
  107. return rc;
  108. }
  109. static int ocfs2_get_quota_block(struct inode *inode, int block,
  110. struct buffer_head **bh)
  111. {
  112. u64 pblock, pcount;
  113. int err;
  114. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  115. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  116. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  117. if (err) {
  118. mlog_errno(err);
  119. return err;
  120. }
  121. *bh = sb_getblk(inode->i_sb, pblock);
  122. if (!*bh) {
  123. err = -EIO;
  124. mlog_errno(err);
  125. }
  126. return err;;
  127. }
  128. /* Read data from global quotafile - avoid pagecache and such because we cannot
  129. * afford acquiring the locks... We use quota cluster lock to serialize
  130. * operations. Caller is responsible for acquiring it. */
  131. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  132. size_t len, loff_t off)
  133. {
  134. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  135. struct inode *gqinode = oinfo->dqi_gqinode;
  136. loff_t i_size = i_size_read(gqinode);
  137. int offset = off & (sb->s_blocksize - 1);
  138. sector_t blk = off >> sb->s_blocksize_bits;
  139. int err = 0;
  140. struct buffer_head *bh;
  141. size_t toread, tocopy;
  142. if (off > i_size)
  143. return 0;
  144. if (off + len > i_size)
  145. len = i_size - off;
  146. toread = len;
  147. while (toread > 0) {
  148. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  149. bh = NULL;
  150. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  151. if (err) {
  152. mlog_errno(err);
  153. return err;
  154. }
  155. memcpy(data, bh->b_data + offset, tocopy);
  156. brelse(bh);
  157. offset = 0;
  158. toread -= tocopy;
  159. data += tocopy;
  160. blk++;
  161. }
  162. return len;
  163. }
  164. /* Write to quotafile (we know the transaction is already started and has
  165. * enough credits) */
  166. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  167. const char *data, size_t len, loff_t off)
  168. {
  169. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  170. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  171. struct inode *gqinode = oinfo->dqi_gqinode;
  172. int offset = off & (sb->s_blocksize - 1);
  173. sector_t blk = off >> sb->s_blocksize_bits;
  174. int err = 0, new = 0, ja_type;
  175. struct buffer_head *bh = NULL;
  176. handle_t *handle = journal_current_handle();
  177. if (!handle) {
  178. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  179. "because transaction was not started.\n",
  180. (unsigned long long)off, (unsigned long long)len);
  181. return -EIO;
  182. }
  183. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  184. WARN_ON(1);
  185. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  186. }
  187. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  188. if (gqinode->i_size < off + len) {
  189. loff_t rounded_end =
  190. ocfs2_align_bytes_to_blocks(sb, off + len);
  191. /* Space is already allocated in ocfs2_global_read_dquot() */
  192. err = ocfs2_simple_size_update(gqinode,
  193. oinfo->dqi_gqi_bh,
  194. rounded_end);
  195. if (err < 0)
  196. goto out;
  197. new = 1;
  198. }
  199. /* Not rewriting whole block? */
  200. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  201. !new) {
  202. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  203. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  204. } else {
  205. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  206. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  207. }
  208. if (err) {
  209. mlog_errno(err);
  210. goto out;
  211. }
  212. lock_buffer(bh);
  213. if (new)
  214. memset(bh->b_data, 0, sb->s_blocksize);
  215. memcpy(bh->b_data + offset, data, len);
  216. flush_dcache_page(bh->b_page);
  217. set_buffer_uptodate(bh);
  218. unlock_buffer(bh);
  219. ocfs2_set_buffer_uptodate(gqinode, bh);
  220. err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type);
  221. if (err < 0) {
  222. brelse(bh);
  223. goto out;
  224. }
  225. err = ocfs2_journal_dirty(handle, bh);
  226. brelse(bh);
  227. if (err < 0)
  228. goto out;
  229. out:
  230. if (err) {
  231. mutex_unlock(&gqinode->i_mutex);
  232. mlog_errno(err);
  233. return err;
  234. }
  235. gqinode->i_version++;
  236. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  237. mutex_unlock(&gqinode->i_mutex);
  238. return len;
  239. }
  240. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  241. {
  242. int status;
  243. struct buffer_head *bh = NULL;
  244. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  245. if (status < 0)
  246. return status;
  247. spin_lock(&dq_data_lock);
  248. if (!oinfo->dqi_gqi_count++)
  249. oinfo->dqi_gqi_bh = bh;
  250. else
  251. WARN_ON(bh != oinfo->dqi_gqi_bh);
  252. spin_unlock(&dq_data_lock);
  253. return 0;
  254. }
  255. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  256. {
  257. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  258. brelse(oinfo->dqi_gqi_bh);
  259. spin_lock(&dq_data_lock);
  260. if (!--oinfo->dqi_gqi_count)
  261. oinfo->dqi_gqi_bh = NULL;
  262. spin_unlock(&dq_data_lock);
  263. }
  264. /* Read information header from global quota file */
  265. int ocfs2_global_read_info(struct super_block *sb, int type)
  266. {
  267. struct inode *gqinode = NULL;
  268. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  269. GROUP_QUOTA_SYSTEM_INODE };
  270. struct ocfs2_global_disk_dqinfo dinfo;
  271. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  272. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  273. int status;
  274. mlog_entry_void();
  275. /* Read global header */
  276. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  277. OCFS2_INVALID_SLOT);
  278. if (!gqinode) {
  279. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  280. type);
  281. status = -EINVAL;
  282. goto out_err;
  283. }
  284. oinfo->dqi_gi.dqi_sb = sb;
  285. oinfo->dqi_gi.dqi_type = type;
  286. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  287. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  288. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  289. oinfo->dqi_gqi_bh = NULL;
  290. oinfo->dqi_gqi_count = 0;
  291. oinfo->dqi_gqinode = gqinode;
  292. status = ocfs2_lock_global_qf(oinfo, 0);
  293. if (status < 0) {
  294. mlog_errno(status);
  295. goto out_err;
  296. }
  297. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  298. sizeof(struct ocfs2_global_disk_dqinfo),
  299. OCFS2_GLOBAL_INFO_OFF);
  300. ocfs2_unlock_global_qf(oinfo, 0);
  301. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  302. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  303. status);
  304. if (status >= 0)
  305. status = -EIO;
  306. mlog_errno(status);
  307. goto out_err;
  308. }
  309. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  310. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  311. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  312. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  313. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  314. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  315. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  316. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  317. OCFS2_QBLK_RESERVED_SPACE;
  318. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  319. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  320. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  321. msecs_to_jiffies(oinfo->dqi_syncms));
  322. out_err:
  323. mlog_exit(status);
  324. return status;
  325. }
  326. /* Write information to global quota file. Expects exlusive lock on quota
  327. * file inode and quota info */
  328. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  329. {
  330. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  331. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  332. struct ocfs2_global_disk_dqinfo dinfo;
  333. ssize_t size;
  334. spin_lock(&dq_data_lock);
  335. info->dqi_flags &= ~DQF_INFO_DIRTY;
  336. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  337. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  338. spin_unlock(&dq_data_lock);
  339. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  340. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  341. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  342. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  343. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  344. sizeof(struct ocfs2_global_disk_dqinfo),
  345. OCFS2_GLOBAL_INFO_OFF);
  346. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  347. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  348. if (size >= 0)
  349. size = -EIO;
  350. return size;
  351. }
  352. return 0;
  353. }
  354. int ocfs2_global_write_info(struct super_block *sb, int type)
  355. {
  356. int err;
  357. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  358. err = ocfs2_qinfo_lock(info, 1);
  359. if (err < 0)
  360. return err;
  361. err = __ocfs2_global_write_info(sb, type);
  362. ocfs2_qinfo_unlock(info, 1);
  363. return err;
  364. }
  365. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  366. {
  367. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  368. /*
  369. * We may need to allocate tree blocks and a leaf block but not the
  370. * root block
  371. */
  372. return oinfo->dqi_gi.dqi_qtree_depth;
  373. }
  374. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  375. {
  376. /* We modify all the allocated blocks, tree root, and info block */
  377. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  378. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
  379. }
  380. /* Read in information from global quota file and acquire a reference to it.
  381. * dquot_acquire() has already started the transaction and locked quota file */
  382. int ocfs2_global_read_dquot(struct dquot *dquot)
  383. {
  384. int err, err2, ex = 0;
  385. struct super_block *sb = dquot->dq_sb;
  386. int type = dquot->dq_type;
  387. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  388. struct ocfs2_super *osb = OCFS2_SB(sb);
  389. struct inode *gqinode = info->dqi_gqinode;
  390. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  391. handle_t *handle = NULL;
  392. err = ocfs2_qinfo_lock(info, 0);
  393. if (err < 0)
  394. goto out;
  395. err = qtree_read_dquot(&info->dqi_gi, dquot);
  396. if (err < 0)
  397. goto out_qlock;
  398. OCFS2_DQUOT(dquot)->dq_use_count++;
  399. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  400. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  401. ocfs2_qinfo_unlock(info, 0);
  402. if (!dquot->dq_off) { /* No real quota entry? */
  403. ex = 1;
  404. /*
  405. * Add blocks to quota file before we start a transaction since
  406. * locking allocators ranks above a transaction start
  407. */
  408. WARN_ON(journal_current_handle());
  409. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  410. err = ocfs2_extend_no_holes(gqinode,
  411. gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
  412. gqinode->i_size);
  413. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  414. if (err < 0)
  415. goto out;
  416. }
  417. handle = ocfs2_start_trans(osb,
  418. ocfs2_calc_global_qinit_credits(sb, type));
  419. if (IS_ERR(handle)) {
  420. err = PTR_ERR(handle);
  421. goto out;
  422. }
  423. err = ocfs2_qinfo_lock(info, ex);
  424. if (err < 0)
  425. goto out_trans;
  426. err = qtree_write_dquot(&info->dqi_gi, dquot);
  427. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  428. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  429. if (!err)
  430. err = err2;
  431. }
  432. out_qlock:
  433. if (ex)
  434. ocfs2_qinfo_unlock(info, 1);
  435. else
  436. ocfs2_qinfo_unlock(info, 0);
  437. out_trans:
  438. if (handle)
  439. ocfs2_commit_trans(osb, handle);
  440. out:
  441. if (err < 0)
  442. mlog_errno(err);
  443. return err;
  444. }
  445. /* Sync local information about quota modifications with global quota file.
  446. * Caller must have started the transaction and obtained exclusive lock for
  447. * global quota file inode */
  448. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  449. {
  450. int err, err2;
  451. struct super_block *sb = dquot->dq_sb;
  452. int type = dquot->dq_type;
  453. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  454. struct ocfs2_global_disk_dqblk dqblk;
  455. s64 spacechange, inodechange;
  456. time_t olditime, oldbtime;
  457. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  458. sizeof(struct ocfs2_global_disk_dqblk),
  459. dquot->dq_off);
  460. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  461. if (err >= 0) {
  462. mlog(ML_ERROR, "Short read from global quota file "
  463. "(%u read)\n", err);
  464. err = -EIO;
  465. }
  466. goto out;
  467. }
  468. /* Update space and inode usage. Get also other information from
  469. * global quota file so that we don't overwrite any changes there.
  470. * We are */
  471. spin_lock(&dq_data_lock);
  472. spacechange = dquot->dq_dqb.dqb_curspace -
  473. OCFS2_DQUOT(dquot)->dq_origspace;
  474. inodechange = dquot->dq_dqb.dqb_curinodes -
  475. OCFS2_DQUOT(dquot)->dq_originodes;
  476. olditime = dquot->dq_dqb.dqb_itime;
  477. oldbtime = dquot->dq_dqb.dqb_btime;
  478. ocfs2_global_disk2memdqb(dquot, &dqblk);
  479. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  480. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  481. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  482. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  483. dquot->dq_dqb.dqb_curspace += spacechange;
  484. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  485. dquot->dq_dqb.dqb_curinodes += inodechange;
  486. /* Set properly space grace time... */
  487. if (dquot->dq_dqb.dqb_bsoftlimit &&
  488. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  489. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  490. oldbtime > 0) {
  491. if (dquot->dq_dqb.dqb_btime > 0)
  492. dquot->dq_dqb.dqb_btime =
  493. min(dquot->dq_dqb.dqb_btime, oldbtime);
  494. else
  495. dquot->dq_dqb.dqb_btime = oldbtime;
  496. }
  497. } else {
  498. dquot->dq_dqb.dqb_btime = 0;
  499. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  500. }
  501. /* Set properly inode grace time... */
  502. if (dquot->dq_dqb.dqb_isoftlimit &&
  503. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  504. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  505. olditime > 0) {
  506. if (dquot->dq_dqb.dqb_itime > 0)
  507. dquot->dq_dqb.dqb_itime =
  508. min(dquot->dq_dqb.dqb_itime, olditime);
  509. else
  510. dquot->dq_dqb.dqb_itime = olditime;
  511. }
  512. } else {
  513. dquot->dq_dqb.dqb_itime = 0;
  514. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  515. }
  516. /* All information is properly updated, clear the flags */
  517. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  518. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  519. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  520. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  521. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  522. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  523. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  524. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  525. spin_unlock(&dq_data_lock);
  526. err = ocfs2_qinfo_lock(info, freeing);
  527. if (err < 0) {
  528. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  529. " (type=%d, id=%u)\n", dquot->dq_type,
  530. (unsigned)dquot->dq_id);
  531. goto out;
  532. }
  533. if (freeing)
  534. OCFS2_DQUOT(dquot)->dq_use_count--;
  535. err = qtree_write_dquot(&info->dqi_gi, dquot);
  536. if (err < 0)
  537. goto out_qlock;
  538. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  539. err = qtree_release_dquot(&info->dqi_gi, dquot);
  540. if (info_dirty(sb_dqinfo(sb, type))) {
  541. err2 = __ocfs2_global_write_info(sb, type);
  542. if (!err)
  543. err = err2;
  544. }
  545. }
  546. out_qlock:
  547. ocfs2_qinfo_unlock(info, freeing);
  548. out:
  549. if (err < 0)
  550. mlog_errno(err);
  551. return err;
  552. }
  553. /*
  554. * Functions for periodic syncing of dquots with global file
  555. */
  556. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  557. {
  558. handle_t *handle;
  559. struct super_block *sb = dquot->dq_sb;
  560. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  561. struct ocfs2_super *osb = OCFS2_SB(sb);
  562. int status = 0;
  563. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  564. dquot->dq_type, type, sb->s_id);
  565. if (type != dquot->dq_type)
  566. goto out;
  567. status = ocfs2_lock_global_qf(oinfo, 1);
  568. if (status < 0)
  569. goto out;
  570. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  571. if (IS_ERR(handle)) {
  572. status = PTR_ERR(handle);
  573. mlog_errno(status);
  574. goto out_ilock;
  575. }
  576. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  577. status = ocfs2_sync_dquot(dquot);
  578. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  579. if (status < 0)
  580. mlog_errno(status);
  581. /* We have to write local structure as well... */
  582. dquot_mark_dquot_dirty(dquot);
  583. status = dquot_commit(dquot);
  584. if (status < 0)
  585. mlog_errno(status);
  586. ocfs2_commit_trans(osb, handle);
  587. out_ilock:
  588. ocfs2_unlock_global_qf(oinfo, 1);
  589. out:
  590. mlog_exit(status);
  591. return status;
  592. }
  593. static void qsync_work_fn(struct work_struct *work)
  594. {
  595. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  596. struct ocfs2_mem_dqinfo,
  597. dqi_sync_work.work);
  598. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  599. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  600. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  601. msecs_to_jiffies(oinfo->dqi_syncms));
  602. }
  603. /*
  604. * Wrappers for generic quota functions
  605. */
  606. static int ocfs2_write_dquot(struct dquot *dquot)
  607. {
  608. handle_t *handle;
  609. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  610. int status = 0;
  611. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  612. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  613. if (IS_ERR(handle)) {
  614. status = PTR_ERR(handle);
  615. mlog_errno(status);
  616. goto out;
  617. }
  618. status = dquot_commit(dquot);
  619. ocfs2_commit_trans(osb, handle);
  620. out:
  621. mlog_exit(status);
  622. return status;
  623. }
  624. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  625. {
  626. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  627. /*
  628. * We modify tree, leaf block, global info, local chunk header,
  629. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  630. * accounts for inode update
  631. */
  632. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  633. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  634. OCFS2_QINFO_WRITE_CREDITS +
  635. OCFS2_INODE_UPDATE_CREDITS;
  636. }
  637. static int ocfs2_release_dquot(struct dquot *dquot)
  638. {
  639. handle_t *handle;
  640. struct ocfs2_mem_dqinfo *oinfo =
  641. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  642. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  643. int status = 0;
  644. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  645. status = ocfs2_lock_global_qf(oinfo, 1);
  646. if (status < 0)
  647. goto out;
  648. handle = ocfs2_start_trans(osb,
  649. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  650. if (IS_ERR(handle)) {
  651. status = PTR_ERR(handle);
  652. mlog_errno(status);
  653. goto out_ilock;
  654. }
  655. status = dquot_release(dquot);
  656. ocfs2_commit_trans(osb, handle);
  657. out_ilock:
  658. ocfs2_unlock_global_qf(oinfo, 1);
  659. out:
  660. mlog_exit(status);
  661. return status;
  662. }
  663. static int ocfs2_acquire_dquot(struct dquot *dquot)
  664. {
  665. struct ocfs2_mem_dqinfo *oinfo =
  666. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  667. int status = 0;
  668. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  669. /* We need an exclusive lock, because we're going to update use count
  670. * and instantiate possibly new dquot structure */
  671. status = ocfs2_lock_global_qf(oinfo, 1);
  672. if (status < 0)
  673. goto out;
  674. status = dquot_acquire(dquot);
  675. ocfs2_unlock_global_qf(oinfo, 1);
  676. out:
  677. mlog_exit(status);
  678. return status;
  679. }
  680. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  681. {
  682. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  683. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  684. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  685. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  686. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  687. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  688. int sync = 0;
  689. int status;
  690. struct super_block *sb = dquot->dq_sb;
  691. int type = dquot->dq_type;
  692. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  693. handle_t *handle;
  694. struct ocfs2_super *osb = OCFS2_SB(sb);
  695. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  696. dquot_mark_dquot_dirty(dquot);
  697. /* In case user set some limits, sync dquot immediately to global
  698. * quota file so that information propagates quicker */
  699. spin_lock(&dq_data_lock);
  700. if (dquot->dq_flags & mask)
  701. sync = 1;
  702. spin_unlock(&dq_data_lock);
  703. /* This is a slight hack but we can't afford getting global quota
  704. * lock if we already have a transaction started. */
  705. if (!sync || journal_current_handle()) {
  706. status = ocfs2_write_dquot(dquot);
  707. goto out;
  708. }
  709. status = ocfs2_lock_global_qf(oinfo, 1);
  710. if (status < 0)
  711. goto out;
  712. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  713. if (IS_ERR(handle)) {
  714. status = PTR_ERR(handle);
  715. mlog_errno(status);
  716. goto out_ilock;
  717. }
  718. status = ocfs2_sync_dquot(dquot);
  719. if (status < 0) {
  720. mlog_errno(status);
  721. goto out_trans;
  722. }
  723. /* Now write updated local dquot structure */
  724. status = dquot_commit(dquot);
  725. out_trans:
  726. ocfs2_commit_trans(osb, handle);
  727. out_ilock:
  728. ocfs2_unlock_global_qf(oinfo, 1);
  729. out:
  730. mlog_exit(status);
  731. return status;
  732. }
  733. /* This should happen only after set_dqinfo(). */
  734. static int ocfs2_write_info(struct super_block *sb, int type)
  735. {
  736. handle_t *handle;
  737. int status = 0;
  738. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  739. mlog_entry_void();
  740. status = ocfs2_lock_global_qf(oinfo, 1);
  741. if (status < 0)
  742. goto out;
  743. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  744. if (IS_ERR(handle)) {
  745. status = PTR_ERR(handle);
  746. mlog_errno(status);
  747. goto out_ilock;
  748. }
  749. status = dquot_commit_info(sb, type);
  750. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  751. out_ilock:
  752. ocfs2_unlock_global_qf(oinfo, 1);
  753. out:
  754. mlog_exit(status);
  755. return status;
  756. }
  757. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  758. {
  759. struct ocfs2_dquot *dquot =
  760. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  761. if (!dquot)
  762. return NULL;
  763. return &dquot->dq_dquot;
  764. }
  765. static void ocfs2_destroy_dquot(struct dquot *dquot)
  766. {
  767. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  768. }
  769. struct dquot_operations ocfs2_quota_operations = {
  770. .initialize = dquot_initialize,
  771. .drop = dquot_drop,
  772. .alloc_space = dquot_alloc_space,
  773. .alloc_inode = dquot_alloc_inode,
  774. .free_space = dquot_free_space,
  775. .free_inode = dquot_free_inode,
  776. .transfer = dquot_transfer,
  777. .write_dquot = ocfs2_write_dquot,
  778. .acquire_dquot = ocfs2_acquire_dquot,
  779. .release_dquot = ocfs2_release_dquot,
  780. .mark_dirty = ocfs2_mark_dquot_dirty,
  781. .write_info = ocfs2_write_info,
  782. .alloc_dquot = ocfs2_alloc_dquot,
  783. .destroy_dquot = ocfs2_destroy_dquot,
  784. };
  785. int ocfs2_quota_setup(void)
  786. {
  787. ocfs2_quota_wq = create_workqueue("o2quot");
  788. if (!ocfs2_quota_wq)
  789. return -ENOMEM;
  790. return 0;
  791. }
  792. void ocfs2_quota_shutdown(void)
  793. {
  794. if (ocfs2_quota_wq) {
  795. flush_workqueue(ocfs2_quota_wq);
  796. destroy_workqueue(ocfs2_quota_wq);
  797. ocfs2_quota_wq = NULL;
  798. }
  799. }