quota_global.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/slab.h>
  7. #include <linux/quota.h>
  8. #include <linux/quotaops.h>
  9. #include <linux/dqblk_qtree.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/writeback.h>
  12. #include <linux/workqueue.h>
  13. #define MLOG_MASK_PREFIX ML_QUOTA
  14. #include <cluster/masklog.h>
  15. #include "ocfs2_fs.h"
  16. #include "ocfs2.h"
  17. #include "alloc.h"
  18. #include "blockcheck.h"
  19. #include "inode.h"
  20. #include "journal.h"
  21. #include "file.h"
  22. #include "sysfile.h"
  23. #include "dlmglue.h"
  24. #include "uptodate.h"
  25. #include "super.h"
  26. #include "buffer_head_io.h"
  27. #include "quota.h"
  28. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  29. static void qsync_work_fn(struct work_struct *work);
  30. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  31. {
  32. struct ocfs2_global_disk_dqblk *d = dp;
  33. struct mem_dqblk *m = &dquot->dq_dqb;
  34. /* Update from disk only entries not set by the admin */
  35. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  36. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  37. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  38. }
  39. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  40. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  41. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  42. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  43. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  44. }
  45. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  46. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  47. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  48. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  49. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  50. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  51. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  52. }
  53. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  54. {
  55. struct ocfs2_global_disk_dqblk *d = dp;
  56. struct mem_dqblk *m = &dquot->dq_dqb;
  57. d->dqb_id = cpu_to_le32(dquot->dq_id);
  58. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  59. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  60. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  61. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  62. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  63. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  64. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  65. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  66. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  67. d->dqb_pad1 = d->dqb_pad2 = 0;
  68. }
  69. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  70. {
  71. struct ocfs2_global_disk_dqblk *d = dp;
  72. struct ocfs2_mem_dqinfo *oinfo =
  73. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  74. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  75. return 0;
  76. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  77. }
  78. struct qtree_fmt_operations ocfs2_global_ops = {
  79. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  80. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  81. .is_id = ocfs2_global_is_id,
  82. };
  83. static int ocfs2_validate_quota_block(struct super_block *sb,
  84. struct buffer_head *bh)
  85. {
  86. struct ocfs2_disk_dqtrailer *dqt =
  87. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  88. mlog(0, "Validating quota block %llu\n",
  89. (unsigned long long)bh->b_blocknr);
  90. BUG_ON(!buffer_uptodate(bh));
  91. /*
  92. * If the ecc fails, we return the error but otherwise
  93. * leave the filesystem running. We know any error is
  94. * local to this block.
  95. */
  96. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  97. }
  98. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  99. struct buffer_head **bh)
  100. {
  101. int rc = 0;
  102. struct buffer_head *tmp = *bh;
  103. if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
  104. ocfs2_error(inode->i_sb,
  105. "Quota file %llu is probably corrupted! Requested "
  106. "to read block %Lu but file has size only %Lu\n",
  107. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  108. (unsigned long long)v_block,
  109. (unsigned long long)i_size_read(inode));
  110. return -EIO;
  111. }
  112. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
  113. ocfs2_validate_quota_block);
  114. if (rc)
  115. mlog_errno(rc);
  116. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  117. if (!rc && !*bh)
  118. *bh = tmp;
  119. return rc;
  120. }
  121. int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
  122. struct buffer_head **bhp)
  123. {
  124. int rc;
  125. *bhp = NULL;
  126. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
  127. ocfs2_validate_quota_block);
  128. if (rc)
  129. mlog_errno(rc);
  130. return rc;
  131. }
  132. static int ocfs2_get_quota_block(struct inode *inode, int block,
  133. struct buffer_head **bh)
  134. {
  135. u64 pblock, pcount;
  136. int err;
  137. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  138. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  139. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  140. if (err) {
  141. mlog_errno(err);
  142. return err;
  143. }
  144. *bh = sb_getblk(inode->i_sb, pblock);
  145. if (!*bh) {
  146. err = -EIO;
  147. mlog_errno(err);
  148. }
  149. return err;
  150. }
  151. /* Read data from global quotafile - avoid pagecache and such because we cannot
  152. * afford acquiring the locks... We use quota cluster lock to serialize
  153. * operations. Caller is responsible for acquiring it. */
  154. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  155. size_t len, loff_t off)
  156. {
  157. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  158. struct inode *gqinode = oinfo->dqi_gqinode;
  159. loff_t i_size = i_size_read(gqinode);
  160. int offset = off & (sb->s_blocksize - 1);
  161. sector_t blk = off >> sb->s_blocksize_bits;
  162. int err = 0;
  163. struct buffer_head *bh;
  164. size_t toread, tocopy;
  165. if (off > i_size)
  166. return 0;
  167. if (off + len > i_size)
  168. len = i_size - off;
  169. toread = len;
  170. while (toread > 0) {
  171. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  172. bh = NULL;
  173. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  174. if (err) {
  175. mlog_errno(err);
  176. return err;
  177. }
  178. memcpy(data, bh->b_data + offset, tocopy);
  179. brelse(bh);
  180. offset = 0;
  181. toread -= tocopy;
  182. data += tocopy;
  183. blk++;
  184. }
  185. return len;
  186. }
  187. /* Write to quotafile (we know the transaction is already started and has
  188. * enough credits) */
  189. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  190. const char *data, size_t len, loff_t off)
  191. {
  192. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  193. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  194. struct inode *gqinode = oinfo->dqi_gqinode;
  195. int offset = off & (sb->s_blocksize - 1);
  196. sector_t blk = off >> sb->s_blocksize_bits;
  197. int err = 0, new = 0, ja_type;
  198. struct buffer_head *bh = NULL;
  199. handle_t *handle = journal_current_handle();
  200. if (!handle) {
  201. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  202. "because transaction was not started.\n",
  203. (unsigned long long)off, (unsigned long long)len);
  204. return -EIO;
  205. }
  206. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  207. WARN_ON(1);
  208. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  209. }
  210. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  211. if (gqinode->i_size < off + len) {
  212. loff_t rounded_end =
  213. ocfs2_align_bytes_to_blocks(sb, off + len);
  214. /* Space is already allocated in ocfs2_global_read_dquot() */
  215. err = ocfs2_simple_size_update(gqinode,
  216. oinfo->dqi_gqi_bh,
  217. rounded_end);
  218. if (err < 0)
  219. goto out;
  220. new = 1;
  221. }
  222. /* Not rewriting whole block? */
  223. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  224. !new) {
  225. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  226. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  227. } else {
  228. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  229. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  230. }
  231. if (err) {
  232. mlog_errno(err);
  233. goto out;
  234. }
  235. lock_buffer(bh);
  236. if (new)
  237. memset(bh->b_data, 0, sb->s_blocksize);
  238. memcpy(bh->b_data + offset, data, len);
  239. flush_dcache_page(bh->b_page);
  240. set_buffer_uptodate(bh);
  241. unlock_buffer(bh);
  242. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  243. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  244. ja_type);
  245. if (err < 0) {
  246. brelse(bh);
  247. goto out;
  248. }
  249. ocfs2_journal_dirty(handle, bh);
  250. brelse(bh);
  251. out:
  252. if (err) {
  253. mutex_unlock(&gqinode->i_mutex);
  254. mlog_errno(err);
  255. return err;
  256. }
  257. gqinode->i_version++;
  258. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  259. mutex_unlock(&gqinode->i_mutex);
  260. return len;
  261. }
  262. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  263. {
  264. int status;
  265. struct buffer_head *bh = NULL;
  266. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  267. if (status < 0)
  268. return status;
  269. spin_lock(&dq_data_lock);
  270. if (!oinfo->dqi_gqi_count++)
  271. oinfo->dqi_gqi_bh = bh;
  272. else
  273. WARN_ON(bh != oinfo->dqi_gqi_bh);
  274. spin_unlock(&dq_data_lock);
  275. return 0;
  276. }
  277. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  278. {
  279. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  280. brelse(oinfo->dqi_gqi_bh);
  281. spin_lock(&dq_data_lock);
  282. if (!--oinfo->dqi_gqi_count)
  283. oinfo->dqi_gqi_bh = NULL;
  284. spin_unlock(&dq_data_lock);
  285. }
  286. /* Read information header from global quota file */
  287. int ocfs2_global_read_info(struct super_block *sb, int type)
  288. {
  289. struct inode *gqinode = NULL;
  290. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  291. GROUP_QUOTA_SYSTEM_INODE };
  292. struct ocfs2_global_disk_dqinfo dinfo;
  293. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  294. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  295. int status;
  296. mlog_entry_void();
  297. /* Read global header */
  298. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  299. OCFS2_INVALID_SLOT);
  300. if (!gqinode) {
  301. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  302. type);
  303. status = -EINVAL;
  304. goto out_err;
  305. }
  306. oinfo->dqi_gi.dqi_sb = sb;
  307. oinfo->dqi_gi.dqi_type = type;
  308. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  309. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  310. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  311. oinfo->dqi_gqi_bh = NULL;
  312. oinfo->dqi_gqi_count = 0;
  313. oinfo->dqi_gqinode = gqinode;
  314. status = ocfs2_lock_global_qf(oinfo, 0);
  315. if (status < 0) {
  316. mlog_errno(status);
  317. goto out_err;
  318. }
  319. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  320. sizeof(struct ocfs2_global_disk_dqinfo),
  321. OCFS2_GLOBAL_INFO_OFF);
  322. ocfs2_unlock_global_qf(oinfo, 0);
  323. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  324. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  325. status);
  326. if (status >= 0)
  327. status = -EIO;
  328. mlog_errno(status);
  329. goto out_err;
  330. }
  331. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  332. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  333. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  334. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  335. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  336. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  337. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  338. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  339. OCFS2_QBLK_RESERVED_SPACE;
  340. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  341. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  342. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  343. msecs_to_jiffies(oinfo->dqi_syncms));
  344. out_err:
  345. mlog_exit(status);
  346. return status;
  347. }
  348. /* Write information to global quota file. Expects exlusive lock on quota
  349. * file inode and quota info */
  350. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  351. {
  352. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  353. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  354. struct ocfs2_global_disk_dqinfo dinfo;
  355. ssize_t size;
  356. spin_lock(&dq_data_lock);
  357. info->dqi_flags &= ~DQF_INFO_DIRTY;
  358. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  359. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  360. spin_unlock(&dq_data_lock);
  361. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  362. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  363. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  364. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  365. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  366. sizeof(struct ocfs2_global_disk_dqinfo),
  367. OCFS2_GLOBAL_INFO_OFF);
  368. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  369. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  370. if (size >= 0)
  371. size = -EIO;
  372. return size;
  373. }
  374. return 0;
  375. }
  376. int ocfs2_global_write_info(struct super_block *sb, int type)
  377. {
  378. int err;
  379. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  380. err = ocfs2_qinfo_lock(info, 1);
  381. if (err < 0)
  382. return err;
  383. err = __ocfs2_global_write_info(sb, type);
  384. ocfs2_qinfo_unlock(info, 1);
  385. return err;
  386. }
  387. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  388. {
  389. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  390. /*
  391. * We may need to allocate tree blocks and a leaf block but not the
  392. * root block
  393. */
  394. return oinfo->dqi_gi.dqi_qtree_depth;
  395. }
  396. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  397. {
  398. /* We modify all the allocated blocks, tree root, and info block */
  399. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  400. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS;
  401. }
  402. /* Read in information from global quota file and acquire a reference to it.
  403. * dquot_acquire() has already started the transaction and locked quota file */
  404. int ocfs2_global_read_dquot(struct dquot *dquot)
  405. {
  406. int err, err2, ex = 0;
  407. struct super_block *sb = dquot->dq_sb;
  408. int type = dquot->dq_type;
  409. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  410. struct ocfs2_super *osb = OCFS2_SB(sb);
  411. struct inode *gqinode = info->dqi_gqinode;
  412. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  413. handle_t *handle = NULL;
  414. err = ocfs2_qinfo_lock(info, 0);
  415. if (err < 0)
  416. goto out;
  417. err = qtree_read_dquot(&info->dqi_gi, dquot);
  418. if (err < 0)
  419. goto out_qlock;
  420. OCFS2_DQUOT(dquot)->dq_use_count++;
  421. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  422. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  423. ocfs2_qinfo_unlock(info, 0);
  424. if (!dquot->dq_off) { /* No real quota entry? */
  425. ex = 1;
  426. /*
  427. * Add blocks to quota file before we start a transaction since
  428. * locking allocators ranks above a transaction start
  429. */
  430. WARN_ON(journal_current_handle());
  431. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  432. err = ocfs2_extend_no_holes(gqinode,
  433. gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
  434. gqinode->i_size);
  435. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  436. if (err < 0)
  437. goto out;
  438. }
  439. handle = ocfs2_start_trans(osb,
  440. ocfs2_calc_global_qinit_credits(sb, type));
  441. if (IS_ERR(handle)) {
  442. err = PTR_ERR(handle);
  443. goto out;
  444. }
  445. err = ocfs2_qinfo_lock(info, ex);
  446. if (err < 0)
  447. goto out_trans;
  448. err = qtree_write_dquot(&info->dqi_gi, dquot);
  449. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  450. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  451. if (!err)
  452. err = err2;
  453. }
  454. out_qlock:
  455. if (ex)
  456. ocfs2_qinfo_unlock(info, 1);
  457. else
  458. ocfs2_qinfo_unlock(info, 0);
  459. out_trans:
  460. if (handle)
  461. ocfs2_commit_trans(osb, handle);
  462. out:
  463. if (err < 0)
  464. mlog_errno(err);
  465. return err;
  466. }
  467. /* Sync local information about quota modifications with global quota file.
  468. * Caller must have started the transaction and obtained exclusive lock for
  469. * global quota file inode */
  470. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  471. {
  472. int err, err2;
  473. struct super_block *sb = dquot->dq_sb;
  474. int type = dquot->dq_type;
  475. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  476. struct ocfs2_global_disk_dqblk dqblk;
  477. s64 spacechange, inodechange;
  478. time_t olditime, oldbtime;
  479. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  480. sizeof(struct ocfs2_global_disk_dqblk),
  481. dquot->dq_off);
  482. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  483. if (err >= 0) {
  484. mlog(ML_ERROR, "Short read from global quota file "
  485. "(%u read)\n", err);
  486. err = -EIO;
  487. }
  488. goto out;
  489. }
  490. /* Update space and inode usage. Get also other information from
  491. * global quota file so that we don't overwrite any changes there.
  492. * We are */
  493. spin_lock(&dq_data_lock);
  494. spacechange = dquot->dq_dqb.dqb_curspace -
  495. OCFS2_DQUOT(dquot)->dq_origspace;
  496. inodechange = dquot->dq_dqb.dqb_curinodes -
  497. OCFS2_DQUOT(dquot)->dq_originodes;
  498. olditime = dquot->dq_dqb.dqb_itime;
  499. oldbtime = dquot->dq_dqb.dqb_btime;
  500. ocfs2_global_disk2memdqb(dquot, &dqblk);
  501. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  502. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  503. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  504. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  505. dquot->dq_dqb.dqb_curspace += spacechange;
  506. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  507. dquot->dq_dqb.dqb_curinodes += inodechange;
  508. /* Set properly space grace time... */
  509. if (dquot->dq_dqb.dqb_bsoftlimit &&
  510. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  511. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  512. oldbtime > 0) {
  513. if (dquot->dq_dqb.dqb_btime > 0)
  514. dquot->dq_dqb.dqb_btime =
  515. min(dquot->dq_dqb.dqb_btime, oldbtime);
  516. else
  517. dquot->dq_dqb.dqb_btime = oldbtime;
  518. }
  519. } else {
  520. dquot->dq_dqb.dqb_btime = 0;
  521. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  522. }
  523. /* Set properly inode grace time... */
  524. if (dquot->dq_dqb.dqb_isoftlimit &&
  525. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  526. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  527. olditime > 0) {
  528. if (dquot->dq_dqb.dqb_itime > 0)
  529. dquot->dq_dqb.dqb_itime =
  530. min(dquot->dq_dqb.dqb_itime, olditime);
  531. else
  532. dquot->dq_dqb.dqb_itime = olditime;
  533. }
  534. } else {
  535. dquot->dq_dqb.dqb_itime = 0;
  536. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  537. }
  538. /* All information is properly updated, clear the flags */
  539. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  540. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  541. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  542. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  543. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  544. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  545. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  546. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  547. spin_unlock(&dq_data_lock);
  548. err = ocfs2_qinfo_lock(info, freeing);
  549. if (err < 0) {
  550. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  551. " (type=%d, id=%u)\n", dquot->dq_type,
  552. (unsigned)dquot->dq_id);
  553. goto out;
  554. }
  555. if (freeing)
  556. OCFS2_DQUOT(dquot)->dq_use_count--;
  557. err = qtree_write_dquot(&info->dqi_gi, dquot);
  558. if (err < 0)
  559. goto out_qlock;
  560. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  561. err = qtree_release_dquot(&info->dqi_gi, dquot);
  562. if (info_dirty(sb_dqinfo(sb, type))) {
  563. err2 = __ocfs2_global_write_info(sb, type);
  564. if (!err)
  565. err = err2;
  566. }
  567. }
  568. out_qlock:
  569. ocfs2_qinfo_unlock(info, freeing);
  570. out:
  571. if (err < 0)
  572. mlog_errno(err);
  573. return err;
  574. }
  575. /*
  576. * Functions for periodic syncing of dquots with global file
  577. */
  578. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  579. {
  580. handle_t *handle;
  581. struct super_block *sb = dquot->dq_sb;
  582. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  583. struct ocfs2_super *osb = OCFS2_SB(sb);
  584. int status = 0;
  585. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  586. dquot->dq_type, type, sb->s_id);
  587. if (type != dquot->dq_type)
  588. goto out;
  589. status = ocfs2_lock_global_qf(oinfo, 1);
  590. if (status < 0)
  591. goto out;
  592. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  593. if (IS_ERR(handle)) {
  594. status = PTR_ERR(handle);
  595. mlog_errno(status);
  596. goto out_ilock;
  597. }
  598. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  599. status = ocfs2_sync_dquot(dquot);
  600. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  601. if (status < 0)
  602. mlog_errno(status);
  603. /* We have to write local structure as well... */
  604. dquot_mark_dquot_dirty(dquot);
  605. status = dquot_commit(dquot);
  606. if (status < 0)
  607. mlog_errno(status);
  608. ocfs2_commit_trans(osb, handle);
  609. out_ilock:
  610. ocfs2_unlock_global_qf(oinfo, 1);
  611. out:
  612. mlog_exit(status);
  613. return status;
  614. }
  615. static void qsync_work_fn(struct work_struct *work)
  616. {
  617. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  618. struct ocfs2_mem_dqinfo,
  619. dqi_sync_work.work);
  620. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  621. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  622. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  623. msecs_to_jiffies(oinfo->dqi_syncms));
  624. }
  625. /*
  626. * Wrappers for generic quota functions
  627. */
  628. static int ocfs2_write_dquot(struct dquot *dquot)
  629. {
  630. handle_t *handle;
  631. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  632. int status = 0;
  633. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  634. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  635. if (IS_ERR(handle)) {
  636. status = PTR_ERR(handle);
  637. mlog_errno(status);
  638. goto out;
  639. }
  640. status = dquot_commit(dquot);
  641. ocfs2_commit_trans(osb, handle);
  642. out:
  643. mlog_exit(status);
  644. return status;
  645. }
  646. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  647. {
  648. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  649. /*
  650. * We modify tree, leaf block, global info, local chunk header,
  651. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  652. * accounts for inode update
  653. */
  654. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  655. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  656. OCFS2_QINFO_WRITE_CREDITS +
  657. OCFS2_INODE_UPDATE_CREDITS;
  658. }
  659. static int ocfs2_release_dquot(struct dquot *dquot)
  660. {
  661. handle_t *handle;
  662. struct ocfs2_mem_dqinfo *oinfo =
  663. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  664. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  665. int status = 0;
  666. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  667. status = ocfs2_lock_global_qf(oinfo, 1);
  668. if (status < 0)
  669. goto out;
  670. handle = ocfs2_start_trans(osb,
  671. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  672. if (IS_ERR(handle)) {
  673. status = PTR_ERR(handle);
  674. mlog_errno(status);
  675. goto out_ilock;
  676. }
  677. status = dquot_release(dquot);
  678. ocfs2_commit_trans(osb, handle);
  679. out_ilock:
  680. ocfs2_unlock_global_qf(oinfo, 1);
  681. out:
  682. mlog_exit(status);
  683. return status;
  684. }
  685. static int ocfs2_acquire_dquot(struct dquot *dquot)
  686. {
  687. struct ocfs2_mem_dqinfo *oinfo =
  688. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  689. int status = 0;
  690. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  691. /* We need an exclusive lock, because we're going to update use count
  692. * and instantiate possibly new dquot structure */
  693. status = ocfs2_lock_global_qf(oinfo, 1);
  694. if (status < 0)
  695. goto out;
  696. status = dquot_acquire(dquot);
  697. ocfs2_unlock_global_qf(oinfo, 1);
  698. out:
  699. mlog_exit(status);
  700. return status;
  701. }
  702. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  703. {
  704. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  705. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  706. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  707. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  708. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  709. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  710. int sync = 0;
  711. int status;
  712. struct super_block *sb = dquot->dq_sb;
  713. int type = dquot->dq_type;
  714. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  715. handle_t *handle;
  716. struct ocfs2_super *osb = OCFS2_SB(sb);
  717. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  718. dquot_mark_dquot_dirty(dquot);
  719. /* In case user set some limits, sync dquot immediately to global
  720. * quota file so that information propagates quicker */
  721. spin_lock(&dq_data_lock);
  722. if (dquot->dq_flags & mask)
  723. sync = 1;
  724. spin_unlock(&dq_data_lock);
  725. /* This is a slight hack but we can't afford getting global quota
  726. * lock if we already have a transaction started. */
  727. if (!sync || journal_current_handle()) {
  728. status = ocfs2_write_dquot(dquot);
  729. goto out;
  730. }
  731. status = ocfs2_lock_global_qf(oinfo, 1);
  732. if (status < 0)
  733. goto out;
  734. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  735. if (IS_ERR(handle)) {
  736. status = PTR_ERR(handle);
  737. mlog_errno(status);
  738. goto out_ilock;
  739. }
  740. status = ocfs2_sync_dquot(dquot);
  741. if (status < 0) {
  742. mlog_errno(status);
  743. goto out_trans;
  744. }
  745. /* Now write updated local dquot structure */
  746. status = dquot_commit(dquot);
  747. out_trans:
  748. ocfs2_commit_trans(osb, handle);
  749. out_ilock:
  750. ocfs2_unlock_global_qf(oinfo, 1);
  751. out:
  752. mlog_exit(status);
  753. return status;
  754. }
  755. /* This should happen only after set_dqinfo(). */
  756. static int ocfs2_write_info(struct super_block *sb, int type)
  757. {
  758. handle_t *handle;
  759. int status = 0;
  760. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  761. mlog_entry_void();
  762. status = ocfs2_lock_global_qf(oinfo, 1);
  763. if (status < 0)
  764. goto out;
  765. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  766. if (IS_ERR(handle)) {
  767. status = PTR_ERR(handle);
  768. mlog_errno(status);
  769. goto out_ilock;
  770. }
  771. status = dquot_commit_info(sb, type);
  772. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  773. out_ilock:
  774. ocfs2_unlock_global_qf(oinfo, 1);
  775. out:
  776. mlog_exit(status);
  777. return status;
  778. }
  779. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  780. {
  781. struct ocfs2_dquot *dquot =
  782. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  783. if (!dquot)
  784. return NULL;
  785. return &dquot->dq_dquot;
  786. }
  787. static void ocfs2_destroy_dquot(struct dquot *dquot)
  788. {
  789. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  790. }
  791. const struct dquot_operations ocfs2_quota_operations = {
  792. .write_dquot = ocfs2_write_dquot,
  793. .acquire_dquot = ocfs2_acquire_dquot,
  794. .release_dquot = ocfs2_release_dquot,
  795. .mark_dirty = ocfs2_mark_dquot_dirty,
  796. .write_info = ocfs2_write_info,
  797. .alloc_dquot = ocfs2_alloc_dquot,
  798. .destroy_dquot = ocfs2_destroy_dquot,
  799. };
  800. int ocfs2_quota_setup(void)
  801. {
  802. ocfs2_quota_wq = create_workqueue("o2quot");
  803. if (!ocfs2_quota_wq)
  804. return -ENOMEM;
  805. return 0;
  806. }
  807. void ocfs2_quota_shutdown(void)
  808. {
  809. if (ocfs2_quota_wq) {
  810. flush_workqueue(ocfs2_quota_wq);
  811. destroy_workqueue(ocfs2_quota_wq);
  812. ocfs2_quota_wq = NULL;
  813. }
  814. }