quota_global.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/slab.h>
  7. #include <linux/quota.h>
  8. #include <linux/quotaops.h>
  9. #include <linux/dqblk_qtree.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/writeback.h>
  12. #include <linux/workqueue.h>
  13. #include <cluster/masklog.h>
  14. #include "ocfs2_fs.h"
  15. #include "ocfs2.h"
  16. #include "alloc.h"
  17. #include "blockcheck.h"
  18. #include "inode.h"
  19. #include "journal.h"
  20. #include "file.h"
  21. #include "sysfile.h"
  22. #include "dlmglue.h"
  23. #include "uptodate.h"
  24. #include "super.h"
  25. #include "buffer_head_io.h"
  26. #include "quota.h"
  27. #include "ocfs2_trace.h"
  28. /*
  29. * Locking of quotas with OCFS2 is rather complex. Here are rules that
  30. * should be obeyed by all the functions:
  31. * - any write of quota structure (either to local or global file) is protected
  32. * by dqio_mutex or dquot->dq_lock.
  33. * - any modification of global quota file holds inode cluster lock, i_mutex,
  34. * and ip_alloc_sem of the global quota file (achieved by
  35. * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
  36. * - an allocation of new blocks for local quota file is protected by
  37. * its ip_alloc_sem
  38. *
  39. * A rough sketch of locking dependencies (lf = local file, gf = global file):
  40. * Normal filesystem operation:
  41. * start_trans -> dqio_mutex -> write to lf
  42. * Syncing of local and global file:
  43. * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
  44. * write to gf
  45. * -> write to lf
  46. * Acquire dquot for the first time:
  47. * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
  48. * -> alloc space for gf
  49. * -> start_trans -> qinfo_lock -> write to gf
  50. * -> ip_alloc_sem of lf -> alloc space for lf
  51. * -> write to lf
  52. * Release last reference to dquot:
  53. * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
  54. * -> write to lf
  55. * Note that all the above operations also hold the inode cluster lock of lf.
  56. * Recovery:
  57. * inode cluster lock of recovered lf
  58. * -> read bitmaps -> ip_alloc_sem of lf
  59. * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
  60. * write to gf
  61. */
  62. static void qsync_work_fn(struct work_struct *work);
  63. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  64. {
  65. struct ocfs2_global_disk_dqblk *d = dp;
  66. struct mem_dqblk *m = &dquot->dq_dqb;
  67. /* Update from disk only entries not set by the admin */
  68. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  69. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  70. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  71. }
  72. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  73. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  74. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  75. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  76. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  77. }
  78. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  79. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  80. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  81. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  82. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  83. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  84. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  85. }
  86. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  87. {
  88. struct ocfs2_global_disk_dqblk *d = dp;
  89. struct mem_dqblk *m = &dquot->dq_dqb;
  90. d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
  91. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  92. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  93. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  94. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  95. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  96. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  97. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  98. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  99. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  100. d->dqb_pad1 = d->dqb_pad2 = 0;
  101. }
  102. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  103. {
  104. struct ocfs2_global_disk_dqblk *d = dp;
  105. struct ocfs2_mem_dqinfo *oinfo =
  106. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  107. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  108. return 0;
  109. return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
  110. le32_to_cpu(d->dqb_id)),
  111. dquot->dq_id);
  112. }
  113. struct qtree_fmt_operations ocfs2_global_ops = {
  114. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  115. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  116. .is_id = ocfs2_global_is_id,
  117. };
  118. int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
  119. {
  120. struct ocfs2_disk_dqtrailer *dqt =
  121. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  122. trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
  123. BUG_ON(!buffer_uptodate(bh));
  124. /*
  125. * If the ecc fails, we return the error but otherwise
  126. * leave the filesystem running. We know any error is
  127. * local to this block.
  128. */
  129. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  130. }
  131. int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
  132. struct buffer_head **bhp)
  133. {
  134. int rc;
  135. *bhp = NULL;
  136. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
  137. ocfs2_validate_quota_block);
  138. if (rc)
  139. mlog_errno(rc);
  140. return rc;
  141. }
  142. /* Read data from global quotafile - avoid pagecache and such because we cannot
  143. * afford acquiring the locks... We use quota cluster lock to serialize
  144. * operations. Caller is responsible for acquiring it. */
  145. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  146. size_t len, loff_t off)
  147. {
  148. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  149. struct inode *gqinode = oinfo->dqi_gqinode;
  150. loff_t i_size = i_size_read(gqinode);
  151. int offset = off & (sb->s_blocksize - 1);
  152. sector_t blk = off >> sb->s_blocksize_bits;
  153. int err = 0;
  154. struct buffer_head *bh;
  155. size_t toread, tocopy;
  156. u64 pblock = 0, pcount = 0;
  157. if (off > i_size)
  158. return 0;
  159. if (off + len > i_size)
  160. len = i_size - off;
  161. toread = len;
  162. while (toread > 0) {
  163. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  164. if (!pcount) {
  165. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
  166. &pcount, NULL);
  167. if (err) {
  168. mlog_errno(err);
  169. return err;
  170. }
  171. } else {
  172. pcount--;
  173. pblock++;
  174. }
  175. bh = NULL;
  176. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  177. if (err) {
  178. mlog_errno(err);
  179. return err;
  180. }
  181. memcpy(data, bh->b_data + offset, tocopy);
  182. brelse(bh);
  183. offset = 0;
  184. toread -= tocopy;
  185. data += tocopy;
  186. blk++;
  187. }
  188. return len;
  189. }
  190. /* Write to quotafile (we know the transaction is already started and has
  191. * enough credits) */
  192. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  193. const char *data, size_t len, loff_t off)
  194. {
  195. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  196. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  197. struct inode *gqinode = oinfo->dqi_gqinode;
  198. int offset = off & (sb->s_blocksize - 1);
  199. sector_t blk = off >> sb->s_blocksize_bits;
  200. int err = 0, new = 0, ja_type;
  201. struct buffer_head *bh = NULL;
  202. handle_t *handle = journal_current_handle();
  203. u64 pblock, pcount;
  204. if (!handle) {
  205. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  206. "because transaction was not started.\n",
  207. (unsigned long long)off, (unsigned long long)len);
  208. return -EIO;
  209. }
  210. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  211. WARN_ON(1);
  212. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  213. }
  214. if (gqinode->i_size < off + len) {
  215. loff_t rounded_end =
  216. ocfs2_align_bytes_to_blocks(sb, off + len);
  217. /* Space is already allocated in ocfs2_acquire_dquot() */
  218. err = ocfs2_simple_size_update(gqinode,
  219. oinfo->dqi_gqi_bh,
  220. rounded_end);
  221. if (err < 0)
  222. goto out;
  223. new = 1;
  224. }
  225. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
  226. if (err) {
  227. mlog_errno(err);
  228. goto out;
  229. }
  230. /* Not rewriting whole block? */
  231. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  232. !new) {
  233. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  234. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  235. } else {
  236. bh = sb_getblk(sb, pblock);
  237. if (!bh)
  238. err = -ENOMEM;
  239. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  240. }
  241. if (err) {
  242. mlog_errno(err);
  243. goto out;
  244. }
  245. lock_buffer(bh);
  246. if (new)
  247. memset(bh->b_data, 0, sb->s_blocksize);
  248. memcpy(bh->b_data + offset, data, len);
  249. flush_dcache_page(bh->b_page);
  250. set_buffer_uptodate(bh);
  251. unlock_buffer(bh);
  252. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  253. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  254. ja_type);
  255. if (err < 0) {
  256. brelse(bh);
  257. goto out;
  258. }
  259. ocfs2_journal_dirty(handle, bh);
  260. brelse(bh);
  261. out:
  262. if (err) {
  263. mlog_errno(err);
  264. return err;
  265. }
  266. gqinode->i_version++;
  267. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  268. return len;
  269. }
  270. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  271. {
  272. int status;
  273. struct buffer_head *bh = NULL;
  274. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  275. if (status < 0)
  276. return status;
  277. spin_lock(&dq_data_lock);
  278. if (!oinfo->dqi_gqi_count++)
  279. oinfo->dqi_gqi_bh = bh;
  280. else
  281. WARN_ON(bh != oinfo->dqi_gqi_bh);
  282. spin_unlock(&dq_data_lock);
  283. if (ex) {
  284. mutex_lock(&oinfo->dqi_gqinode->i_mutex);
  285. down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  286. } else {
  287. down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  288. }
  289. return 0;
  290. }
  291. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  292. {
  293. if (ex) {
  294. up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  295. mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
  296. } else {
  297. up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  298. }
  299. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  300. brelse(oinfo->dqi_gqi_bh);
  301. spin_lock(&dq_data_lock);
  302. if (!--oinfo->dqi_gqi_count)
  303. oinfo->dqi_gqi_bh = NULL;
  304. spin_unlock(&dq_data_lock);
  305. }
  306. /* Read information header from global quota file */
  307. int ocfs2_global_read_info(struct super_block *sb, int type)
  308. {
  309. struct inode *gqinode = NULL;
  310. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  311. GROUP_QUOTA_SYSTEM_INODE };
  312. struct ocfs2_global_disk_dqinfo dinfo;
  313. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  314. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  315. u64 pcount;
  316. int status;
  317. /* Read global header */
  318. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  319. OCFS2_INVALID_SLOT);
  320. if (!gqinode) {
  321. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  322. type);
  323. status = -EINVAL;
  324. goto out_err;
  325. }
  326. oinfo->dqi_gi.dqi_sb = sb;
  327. oinfo->dqi_gi.dqi_type = type;
  328. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  329. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  330. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  331. oinfo->dqi_gqi_bh = NULL;
  332. oinfo->dqi_gqi_count = 0;
  333. oinfo->dqi_gqinode = gqinode;
  334. status = ocfs2_lock_global_qf(oinfo, 0);
  335. if (status < 0) {
  336. mlog_errno(status);
  337. goto out_err;
  338. }
  339. status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
  340. &pcount, NULL);
  341. if (status < 0)
  342. goto out_unlock;
  343. status = ocfs2_qinfo_lock(oinfo, 0);
  344. if (status < 0)
  345. goto out_unlock;
  346. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  347. sizeof(struct ocfs2_global_disk_dqinfo),
  348. OCFS2_GLOBAL_INFO_OFF);
  349. ocfs2_qinfo_unlock(oinfo, 0);
  350. ocfs2_unlock_global_qf(oinfo, 0);
  351. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  352. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  353. status);
  354. if (status >= 0)
  355. status = -EIO;
  356. mlog_errno(status);
  357. goto out_err;
  358. }
  359. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  360. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  361. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  362. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  363. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  364. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  365. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  366. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  367. OCFS2_QBLK_RESERVED_SPACE;
  368. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  369. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  370. schedule_delayed_work(&oinfo->dqi_sync_work,
  371. msecs_to_jiffies(oinfo->dqi_syncms));
  372. out_err:
  373. return status;
  374. out_unlock:
  375. ocfs2_unlock_global_qf(oinfo, 0);
  376. mlog_errno(status);
  377. goto out_err;
  378. }
  379. /* Write information to global quota file. Expects exlusive lock on quota
  380. * file inode and quota info */
  381. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  382. {
  383. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  384. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  385. struct ocfs2_global_disk_dqinfo dinfo;
  386. ssize_t size;
  387. spin_lock(&dq_data_lock);
  388. info->dqi_flags &= ~DQF_INFO_DIRTY;
  389. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  390. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  391. spin_unlock(&dq_data_lock);
  392. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  393. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  394. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  395. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  396. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  397. sizeof(struct ocfs2_global_disk_dqinfo),
  398. OCFS2_GLOBAL_INFO_OFF);
  399. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  400. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  401. if (size >= 0)
  402. size = -EIO;
  403. return size;
  404. }
  405. return 0;
  406. }
  407. int ocfs2_global_write_info(struct super_block *sb, int type)
  408. {
  409. int err;
  410. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  411. err = ocfs2_qinfo_lock(info, 1);
  412. if (err < 0)
  413. return err;
  414. err = __ocfs2_global_write_info(sb, type);
  415. ocfs2_qinfo_unlock(info, 1);
  416. return err;
  417. }
  418. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  419. {
  420. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  421. /*
  422. * We may need to allocate tree blocks and a leaf block but not the
  423. * root block
  424. */
  425. return oinfo->dqi_gi.dqi_qtree_depth;
  426. }
  427. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  428. {
  429. /* We modify all the allocated blocks, tree root, info block and
  430. * the inode */
  431. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  432. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
  433. }
  434. /* Sync local information about quota modifications with global quota file.
  435. * Caller must have started the transaction and obtained exclusive lock for
  436. * global quota file inode */
  437. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  438. {
  439. int err, err2;
  440. struct super_block *sb = dquot->dq_sb;
  441. int type = dquot->dq_id.type;
  442. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  443. struct ocfs2_global_disk_dqblk dqblk;
  444. s64 spacechange, inodechange;
  445. time_t olditime, oldbtime;
  446. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  447. sizeof(struct ocfs2_global_disk_dqblk),
  448. dquot->dq_off);
  449. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  450. if (err >= 0) {
  451. mlog(ML_ERROR, "Short read from global quota file "
  452. "(%u read)\n", err);
  453. err = -EIO;
  454. }
  455. goto out;
  456. }
  457. /* Update space and inode usage. Get also other information from
  458. * global quota file so that we don't overwrite any changes there.
  459. * We are */
  460. spin_lock(&dq_data_lock);
  461. spacechange = dquot->dq_dqb.dqb_curspace -
  462. OCFS2_DQUOT(dquot)->dq_origspace;
  463. inodechange = dquot->dq_dqb.dqb_curinodes -
  464. OCFS2_DQUOT(dquot)->dq_originodes;
  465. olditime = dquot->dq_dqb.dqb_itime;
  466. oldbtime = dquot->dq_dqb.dqb_btime;
  467. ocfs2_global_disk2memdqb(dquot, &dqblk);
  468. trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  469. dquot->dq_dqb.dqb_curspace,
  470. (long long)spacechange,
  471. dquot->dq_dqb.dqb_curinodes,
  472. (long long)inodechange);
  473. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  474. dquot->dq_dqb.dqb_curspace += spacechange;
  475. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  476. dquot->dq_dqb.dqb_curinodes += inodechange;
  477. /* Set properly space grace time... */
  478. if (dquot->dq_dqb.dqb_bsoftlimit &&
  479. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  480. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  481. oldbtime > 0) {
  482. if (dquot->dq_dqb.dqb_btime > 0)
  483. dquot->dq_dqb.dqb_btime =
  484. min(dquot->dq_dqb.dqb_btime, oldbtime);
  485. else
  486. dquot->dq_dqb.dqb_btime = oldbtime;
  487. }
  488. } else {
  489. dquot->dq_dqb.dqb_btime = 0;
  490. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  491. }
  492. /* Set properly inode grace time... */
  493. if (dquot->dq_dqb.dqb_isoftlimit &&
  494. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  495. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  496. olditime > 0) {
  497. if (dquot->dq_dqb.dqb_itime > 0)
  498. dquot->dq_dqb.dqb_itime =
  499. min(dquot->dq_dqb.dqb_itime, olditime);
  500. else
  501. dquot->dq_dqb.dqb_itime = olditime;
  502. }
  503. } else {
  504. dquot->dq_dqb.dqb_itime = 0;
  505. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  506. }
  507. /* All information is properly updated, clear the flags */
  508. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  509. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  510. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  511. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  512. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  513. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  514. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  515. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  516. spin_unlock(&dq_data_lock);
  517. err = ocfs2_qinfo_lock(info, freeing);
  518. if (err < 0) {
  519. mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
  520. " (type=%d, id=%u)\n", dquot->dq_id.type,
  521. (unsigned)from_kqid(&init_user_ns, dquot->dq_id));
  522. goto out;
  523. }
  524. if (freeing)
  525. OCFS2_DQUOT(dquot)->dq_use_count--;
  526. err = qtree_write_dquot(&info->dqi_gi, dquot);
  527. if (err < 0)
  528. goto out_qlock;
  529. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  530. err = qtree_release_dquot(&info->dqi_gi, dquot);
  531. if (info_dirty(sb_dqinfo(sb, type))) {
  532. err2 = __ocfs2_global_write_info(sb, type);
  533. if (!err)
  534. err = err2;
  535. }
  536. }
  537. out_qlock:
  538. ocfs2_qinfo_unlock(info, freeing);
  539. out:
  540. if (err < 0)
  541. mlog_errno(err);
  542. return err;
  543. }
  544. /*
  545. * Functions for periodic syncing of dquots with global file
  546. */
  547. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  548. {
  549. handle_t *handle;
  550. struct super_block *sb = dquot->dq_sb;
  551. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  552. struct ocfs2_super *osb = OCFS2_SB(sb);
  553. int status = 0;
  554. trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
  555. dquot->dq_id.type,
  556. type, sb->s_id);
  557. if (type != dquot->dq_id.type)
  558. goto out;
  559. status = ocfs2_lock_global_qf(oinfo, 1);
  560. if (status < 0)
  561. goto out;
  562. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  563. if (IS_ERR(handle)) {
  564. status = PTR_ERR(handle);
  565. mlog_errno(status);
  566. goto out_ilock;
  567. }
  568. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  569. status = ocfs2_sync_dquot(dquot);
  570. if (status < 0)
  571. mlog_errno(status);
  572. /* We have to write local structure as well... */
  573. status = ocfs2_local_write_dquot(dquot);
  574. if (status < 0)
  575. mlog_errno(status);
  576. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  577. ocfs2_commit_trans(osb, handle);
  578. out_ilock:
  579. ocfs2_unlock_global_qf(oinfo, 1);
  580. out:
  581. return status;
  582. }
  583. static void qsync_work_fn(struct work_struct *work)
  584. {
  585. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  586. struct ocfs2_mem_dqinfo,
  587. dqi_sync_work.work);
  588. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  589. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  590. schedule_delayed_work(&oinfo->dqi_sync_work,
  591. msecs_to_jiffies(oinfo->dqi_syncms));
  592. }
  593. /*
  594. * Wrappers for generic quota functions
  595. */
  596. static int ocfs2_write_dquot(struct dquot *dquot)
  597. {
  598. handle_t *handle;
  599. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  600. int status = 0;
  601. trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  602. dquot->dq_id.type);
  603. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  604. if (IS_ERR(handle)) {
  605. status = PTR_ERR(handle);
  606. mlog_errno(status);
  607. goto out;
  608. }
  609. mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
  610. status = ocfs2_local_write_dquot(dquot);
  611. mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
  612. ocfs2_commit_trans(osb, handle);
  613. out:
  614. return status;
  615. }
  616. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  617. {
  618. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  619. /*
  620. * We modify tree, leaf block, global info, local chunk header,
  621. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  622. * accounts for inode update
  623. */
  624. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  625. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  626. OCFS2_QINFO_WRITE_CREDITS +
  627. OCFS2_INODE_UPDATE_CREDITS;
  628. }
  629. static int ocfs2_release_dquot(struct dquot *dquot)
  630. {
  631. handle_t *handle;
  632. struct ocfs2_mem_dqinfo *oinfo =
  633. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  634. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  635. int status = 0;
  636. trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  637. dquot->dq_id.type);
  638. mutex_lock(&dquot->dq_lock);
  639. /* Check whether we are not racing with some other dqget() */
  640. if (atomic_read(&dquot->dq_count) > 1)
  641. goto out;
  642. status = ocfs2_lock_global_qf(oinfo, 1);
  643. if (status < 0)
  644. goto out;
  645. handle = ocfs2_start_trans(osb,
  646. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
  647. if (IS_ERR(handle)) {
  648. status = PTR_ERR(handle);
  649. mlog_errno(status);
  650. goto out_ilock;
  651. }
  652. status = ocfs2_global_release_dquot(dquot);
  653. if (status < 0) {
  654. mlog_errno(status);
  655. goto out_trans;
  656. }
  657. status = ocfs2_local_release_dquot(handle, dquot);
  658. /*
  659. * If we fail here, we cannot do much as global structure is
  660. * already released. So just complain...
  661. */
  662. if (status < 0)
  663. mlog_errno(status);
  664. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  665. out_trans:
  666. ocfs2_commit_trans(osb, handle);
  667. out_ilock:
  668. ocfs2_unlock_global_qf(oinfo, 1);
  669. out:
  670. mutex_unlock(&dquot->dq_lock);
  671. if (status)
  672. mlog_errno(status);
  673. return status;
  674. }
  675. /*
  676. * Read global dquot structure from disk or create it if it does
  677. * not exist. Also update use count of the global structure and
  678. * create structure in node-local quota file.
  679. */
  680. static int ocfs2_acquire_dquot(struct dquot *dquot)
  681. {
  682. int status = 0, err;
  683. int ex = 0;
  684. struct super_block *sb = dquot->dq_sb;
  685. struct ocfs2_super *osb = OCFS2_SB(sb);
  686. int type = dquot->dq_id.type;
  687. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  688. struct inode *gqinode = info->dqi_gqinode;
  689. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  690. handle_t *handle;
  691. trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  692. type);
  693. mutex_lock(&dquot->dq_lock);
  694. /*
  695. * We need an exclusive lock, because we're going to update use count
  696. * and instantiate possibly new dquot structure
  697. */
  698. status = ocfs2_lock_global_qf(info, 1);
  699. if (status < 0)
  700. goto out;
  701. if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
  702. status = ocfs2_qinfo_lock(info, 0);
  703. if (status < 0)
  704. goto out_dq;
  705. status = qtree_read_dquot(&info->dqi_gi, dquot);
  706. ocfs2_qinfo_unlock(info, 0);
  707. if (status < 0)
  708. goto out_dq;
  709. }
  710. set_bit(DQ_READ_B, &dquot->dq_flags);
  711. OCFS2_DQUOT(dquot)->dq_use_count++;
  712. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  713. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  714. if (!dquot->dq_off) { /* No real quota entry? */
  715. ex = 1;
  716. /*
  717. * Add blocks to quota file before we start a transaction since
  718. * locking allocators ranks above a transaction start
  719. */
  720. WARN_ON(journal_current_handle());
  721. status = ocfs2_extend_no_holes(gqinode, NULL,
  722. gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
  723. gqinode->i_size);
  724. if (status < 0)
  725. goto out_dq;
  726. }
  727. handle = ocfs2_start_trans(osb,
  728. ocfs2_calc_global_qinit_credits(sb, type));
  729. if (IS_ERR(handle)) {
  730. status = PTR_ERR(handle);
  731. goto out_dq;
  732. }
  733. status = ocfs2_qinfo_lock(info, ex);
  734. if (status < 0)
  735. goto out_trans;
  736. status = qtree_write_dquot(&info->dqi_gi, dquot);
  737. if (ex && info_dirty(sb_dqinfo(sb, type))) {
  738. err = __ocfs2_global_write_info(sb, type);
  739. if (!status)
  740. status = err;
  741. }
  742. ocfs2_qinfo_unlock(info, ex);
  743. out_trans:
  744. ocfs2_commit_trans(osb, handle);
  745. out_dq:
  746. ocfs2_unlock_global_qf(info, 1);
  747. if (status < 0)
  748. goto out;
  749. status = ocfs2_create_local_dquot(dquot);
  750. if (status < 0)
  751. goto out;
  752. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  753. out:
  754. mutex_unlock(&dquot->dq_lock);
  755. if (status)
  756. mlog_errno(status);
  757. return status;
  758. }
  759. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  760. {
  761. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  762. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  763. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  764. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  765. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  766. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  767. int sync = 0;
  768. int status;
  769. struct super_block *sb = dquot->dq_sb;
  770. int type = dquot->dq_id.type;
  771. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  772. handle_t *handle;
  773. struct ocfs2_super *osb = OCFS2_SB(sb);
  774. trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
  775. type);
  776. /* In case user set some limits, sync dquot immediately to global
  777. * quota file so that information propagates quicker */
  778. spin_lock(&dq_data_lock);
  779. if (dquot->dq_flags & mask)
  780. sync = 1;
  781. spin_unlock(&dq_data_lock);
  782. /* This is a slight hack but we can't afford getting global quota
  783. * lock if we already have a transaction started. */
  784. if (!sync || journal_current_handle()) {
  785. status = ocfs2_write_dquot(dquot);
  786. goto out;
  787. }
  788. status = ocfs2_lock_global_qf(oinfo, 1);
  789. if (status < 0)
  790. goto out;
  791. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  792. if (IS_ERR(handle)) {
  793. status = PTR_ERR(handle);
  794. mlog_errno(status);
  795. goto out_ilock;
  796. }
  797. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  798. status = ocfs2_sync_dquot(dquot);
  799. if (status < 0) {
  800. mlog_errno(status);
  801. goto out_dlock;
  802. }
  803. /* Now write updated local dquot structure */
  804. status = ocfs2_local_write_dquot(dquot);
  805. out_dlock:
  806. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  807. ocfs2_commit_trans(osb, handle);
  808. out_ilock:
  809. ocfs2_unlock_global_qf(oinfo, 1);
  810. out:
  811. if (status)
  812. mlog_errno(status);
  813. return status;
  814. }
  815. /* This should happen only after set_dqinfo(). */
  816. static int ocfs2_write_info(struct super_block *sb, int type)
  817. {
  818. handle_t *handle;
  819. int status = 0;
  820. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  821. status = ocfs2_lock_global_qf(oinfo, 1);
  822. if (status < 0)
  823. goto out;
  824. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  825. if (IS_ERR(handle)) {
  826. status = PTR_ERR(handle);
  827. mlog_errno(status);
  828. goto out_ilock;
  829. }
  830. status = dquot_commit_info(sb, type);
  831. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  832. out_ilock:
  833. ocfs2_unlock_global_qf(oinfo, 1);
  834. out:
  835. if (status)
  836. mlog_errno(status);
  837. return status;
  838. }
  839. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  840. {
  841. struct ocfs2_dquot *dquot =
  842. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  843. if (!dquot)
  844. return NULL;
  845. return &dquot->dq_dquot;
  846. }
  847. static void ocfs2_destroy_dquot(struct dquot *dquot)
  848. {
  849. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  850. }
  851. const struct dquot_operations ocfs2_quota_operations = {
  852. /* We never make dquot dirty so .write_dquot is never called */
  853. .acquire_dquot = ocfs2_acquire_dquot,
  854. .release_dquot = ocfs2_release_dquot,
  855. .mark_dirty = ocfs2_mark_dquot_dirty,
  856. .write_info = ocfs2_write_info,
  857. .alloc_dquot = ocfs2_alloc_dquot,
  858. .destroy_dquot = ocfs2_destroy_dquot,
  859. };