quota_global.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/quota.h>
  7. #include <linux/quotaops.h>
  8. #include <linux/dqblk_qtree.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/writeback.h>
  11. #include <linux/workqueue.h>
  12. #define MLOG_MASK_PREFIX ML_QUOTA
  13. #include <cluster/masklog.h>
  14. #include "ocfs2_fs.h"
  15. #include "ocfs2.h"
  16. #include "alloc.h"
  17. #include "inode.h"
  18. #include "journal.h"
  19. #include "file.h"
  20. #include "sysfile.h"
  21. #include "dlmglue.h"
  22. #include "uptodate.h"
  23. #include "quota.h"
  24. static struct workqueue_struct *ocfs2_quota_wq = NULL;
  25. static void qsync_work_fn(struct work_struct *work);
  26. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  27. {
  28. struct ocfs2_global_disk_dqblk *d = dp;
  29. struct mem_dqblk *m = &dquot->dq_dqb;
  30. /* Update from disk only entries not set by the admin */
  31. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  32. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  33. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  34. }
  35. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  36. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  37. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  38. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  39. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  40. }
  41. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  42. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  43. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  44. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  45. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  46. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  47. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  48. }
  49. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  50. {
  51. struct ocfs2_global_disk_dqblk *d = dp;
  52. struct mem_dqblk *m = &dquot->dq_dqb;
  53. d->dqb_id = cpu_to_le32(dquot->dq_id);
  54. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  55. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  56. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  57. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  58. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  59. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  60. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  61. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  62. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  63. }
  64. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  65. {
  66. struct ocfs2_global_disk_dqblk *d = dp;
  67. struct ocfs2_mem_dqinfo *oinfo =
  68. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  69. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  70. return 0;
  71. return le32_to_cpu(d->dqb_id) == dquot->dq_id;
  72. }
  73. struct qtree_fmt_operations ocfs2_global_ops = {
  74. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  75. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  76. .is_id = ocfs2_global_is_id,
  77. };
  78. int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
  79. struct buffer_head **bh)
  80. {
  81. int rc = 0;
  82. struct buffer_head *tmp = *bh;
  83. rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0, NULL);
  84. if (rc)
  85. mlog_errno(rc);
  86. /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
  87. if (!rc && !*bh)
  88. *bh = tmp;
  89. return rc;
  90. }
  91. static int ocfs2_get_quota_block(struct inode *inode, int block,
  92. struct buffer_head **bh)
  93. {
  94. u64 pblock, pcount;
  95. int err;
  96. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  97. err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
  98. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  99. if (err) {
  100. mlog_errno(err);
  101. return err;
  102. }
  103. *bh = sb_getblk(inode->i_sb, pblock);
  104. if (!*bh) {
  105. err = -EIO;
  106. mlog_errno(err);
  107. }
  108. return err;;
  109. }
  110. /* Read data from global quotafile - avoid pagecache and such because we cannot
  111. * afford acquiring the locks... We use quota cluster lock to serialize
  112. * operations. Caller is responsible for acquiring it. */
  113. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  114. size_t len, loff_t off)
  115. {
  116. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  117. struct inode *gqinode = oinfo->dqi_gqinode;
  118. loff_t i_size = i_size_read(gqinode);
  119. int offset = off & (sb->s_blocksize - 1);
  120. sector_t blk = off >> sb->s_blocksize_bits;
  121. int err = 0;
  122. struct buffer_head *bh;
  123. size_t toread, tocopy;
  124. if (off > i_size)
  125. return 0;
  126. if (off + len > i_size)
  127. len = i_size - off;
  128. toread = len;
  129. while (toread > 0) {
  130. tocopy = min((size_t)(sb->s_blocksize - offset), toread);
  131. bh = NULL;
  132. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  133. if (err) {
  134. mlog_errno(err);
  135. return err;
  136. }
  137. memcpy(data, bh->b_data + offset, tocopy);
  138. brelse(bh);
  139. offset = 0;
  140. toread -= tocopy;
  141. data += tocopy;
  142. blk++;
  143. }
  144. return len;
  145. }
  146. /* Write to quotafile (we know the transaction is already started and has
  147. * enough credits) */
  148. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  149. const char *data, size_t len, loff_t off)
  150. {
  151. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  152. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  153. struct inode *gqinode = oinfo->dqi_gqinode;
  154. int offset = off & (sb->s_blocksize - 1);
  155. sector_t blk = off >> sb->s_blocksize_bits;
  156. int err = 0, new = 0, ja_type;
  157. struct buffer_head *bh = NULL;
  158. handle_t *handle = journal_current_handle();
  159. if (!handle) {
  160. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  161. "because transaction was not started.\n",
  162. (unsigned long long)off, (unsigned long long)len);
  163. return -EIO;
  164. }
  165. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  166. WARN_ON(1);
  167. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  168. }
  169. mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
  170. if (gqinode->i_size < off + len) {
  171. down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  172. err = ocfs2_extend_no_holes(gqinode, off + len, off);
  173. up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
  174. if (err < 0)
  175. goto out;
  176. err = ocfs2_simple_size_update(gqinode,
  177. oinfo->dqi_gqi_bh,
  178. off + len);
  179. if (err < 0)
  180. goto out;
  181. new = 1;
  182. }
  183. /* Not rewriting whole block? */
  184. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  185. !new) {
  186. err = ocfs2_read_quota_block(gqinode, blk, &bh);
  187. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  188. } else {
  189. err = ocfs2_get_quota_block(gqinode, blk, &bh);
  190. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  191. }
  192. if (err) {
  193. mlog_errno(err);
  194. return err;
  195. }
  196. lock_buffer(bh);
  197. if (new)
  198. memset(bh->b_data, 0, sb->s_blocksize);
  199. memcpy(bh->b_data + offset, data, len);
  200. flush_dcache_page(bh->b_page);
  201. set_buffer_uptodate(bh);
  202. unlock_buffer(bh);
  203. ocfs2_set_buffer_uptodate(gqinode, bh);
  204. err = ocfs2_journal_access(handle, gqinode, bh, ja_type);
  205. if (err < 0) {
  206. brelse(bh);
  207. goto out;
  208. }
  209. err = ocfs2_journal_dirty(handle, bh);
  210. brelse(bh);
  211. if (err < 0)
  212. goto out;
  213. out:
  214. if (err) {
  215. mutex_unlock(&gqinode->i_mutex);
  216. mlog_errno(err);
  217. return err;
  218. }
  219. gqinode->i_version++;
  220. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  221. mutex_unlock(&gqinode->i_mutex);
  222. return len;
  223. }
  224. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  225. {
  226. int status;
  227. struct buffer_head *bh = NULL;
  228. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  229. if (status < 0)
  230. return status;
  231. spin_lock(&dq_data_lock);
  232. if (!oinfo->dqi_gqi_count++)
  233. oinfo->dqi_gqi_bh = bh;
  234. else
  235. WARN_ON(bh != oinfo->dqi_gqi_bh);
  236. spin_unlock(&dq_data_lock);
  237. return 0;
  238. }
  239. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  240. {
  241. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  242. brelse(oinfo->dqi_gqi_bh);
  243. spin_lock(&dq_data_lock);
  244. if (!--oinfo->dqi_gqi_count)
  245. oinfo->dqi_gqi_bh = NULL;
  246. spin_unlock(&dq_data_lock);
  247. }
  248. /* Read information header from global quota file */
  249. int ocfs2_global_read_info(struct super_block *sb, int type)
  250. {
  251. struct inode *gqinode = NULL;
  252. unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  253. GROUP_QUOTA_SYSTEM_INODE };
  254. struct ocfs2_global_disk_dqinfo dinfo;
  255. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  256. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  257. int status;
  258. mlog_entry_void();
  259. /* Read global header */
  260. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  261. OCFS2_INVALID_SLOT);
  262. if (!gqinode) {
  263. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  264. type);
  265. status = -EINVAL;
  266. goto out_err;
  267. }
  268. oinfo->dqi_gi.dqi_sb = sb;
  269. oinfo->dqi_gi.dqi_type = type;
  270. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  271. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  272. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  273. oinfo->dqi_gqi_bh = NULL;
  274. oinfo->dqi_gqi_count = 0;
  275. oinfo->dqi_gqinode = gqinode;
  276. status = ocfs2_lock_global_qf(oinfo, 0);
  277. if (status < 0) {
  278. mlog_errno(status);
  279. goto out_err;
  280. }
  281. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  282. sizeof(struct ocfs2_global_disk_dqinfo),
  283. OCFS2_GLOBAL_INFO_OFF);
  284. ocfs2_unlock_global_qf(oinfo, 0);
  285. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  286. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  287. status);
  288. if (status >= 0)
  289. status = -EIO;
  290. mlog_errno(status);
  291. goto out_err;
  292. }
  293. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  294. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  295. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  296. oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
  297. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  298. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  299. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  300. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  301. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  302. OCFS2_QBLK_RESERVED_SPACE;
  303. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  304. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  305. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  306. oinfo->dqi_syncjiff);
  307. out_err:
  308. mlog_exit(status);
  309. return status;
  310. }
  311. /* Write information to global quota file. Expects exlusive lock on quota
  312. * file inode and quota info */
  313. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  314. {
  315. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  316. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  317. struct ocfs2_global_disk_dqinfo dinfo;
  318. ssize_t size;
  319. spin_lock(&dq_data_lock);
  320. info->dqi_flags &= ~DQF_INFO_DIRTY;
  321. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  322. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  323. spin_unlock(&dq_data_lock);
  324. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  325. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  326. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  327. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  328. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  329. sizeof(struct ocfs2_global_disk_dqinfo),
  330. OCFS2_GLOBAL_INFO_OFF);
  331. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  332. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  333. if (size >= 0)
  334. size = -EIO;
  335. return size;
  336. }
  337. return 0;
  338. }
  339. int ocfs2_global_write_info(struct super_block *sb, int type)
  340. {
  341. int err;
  342. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  343. err = ocfs2_qinfo_lock(info, 1);
  344. if (err < 0)
  345. return err;
  346. err = __ocfs2_global_write_info(sb, type);
  347. ocfs2_qinfo_unlock(info, 1);
  348. return err;
  349. }
  350. /* Read in information from global quota file and acquire a reference to it.
  351. * dquot_acquire() has already started the transaction and locked quota file */
  352. int ocfs2_global_read_dquot(struct dquot *dquot)
  353. {
  354. int err, err2, ex = 0;
  355. struct ocfs2_mem_dqinfo *info =
  356. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  357. err = ocfs2_qinfo_lock(info, 0);
  358. if (err < 0)
  359. goto out;
  360. err = qtree_read_dquot(&info->dqi_gi, dquot);
  361. if (err < 0)
  362. goto out_qlock;
  363. OCFS2_DQUOT(dquot)->dq_use_count++;
  364. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  365. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  366. if (!dquot->dq_off) { /* No real quota entry? */
  367. /* Upgrade to exclusive lock for allocation */
  368. err = ocfs2_qinfo_lock(info, 1);
  369. if (err < 0)
  370. goto out_qlock;
  371. ex = 1;
  372. }
  373. err = qtree_write_dquot(&info->dqi_gi, dquot);
  374. if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
  375. err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
  376. if (!err)
  377. err = err2;
  378. }
  379. out_qlock:
  380. if (ex)
  381. ocfs2_qinfo_unlock(info, 1);
  382. ocfs2_qinfo_unlock(info, 0);
  383. out:
  384. if (err < 0)
  385. mlog_errno(err);
  386. return err;
  387. }
  388. /* Sync local information about quota modifications with global quota file.
  389. * Caller must have started the transaction and obtained exclusive lock for
  390. * global quota file inode */
  391. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  392. {
  393. int err, err2;
  394. struct super_block *sb = dquot->dq_sb;
  395. int type = dquot->dq_type;
  396. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  397. struct ocfs2_global_disk_dqblk dqblk;
  398. s64 spacechange, inodechange;
  399. time_t olditime, oldbtime;
  400. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  401. sizeof(struct ocfs2_global_disk_dqblk),
  402. dquot->dq_off);
  403. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  404. if (err >= 0) {
  405. mlog(ML_ERROR, "Short read from global quota file "
  406. "(%u read)\n", err);
  407. err = -EIO;
  408. }
  409. goto out;
  410. }
  411. /* Update space and inode usage. Get also other information from
  412. * global quota file so that we don't overwrite any changes there.
  413. * We are */
  414. spin_lock(&dq_data_lock);
  415. spacechange = dquot->dq_dqb.dqb_curspace -
  416. OCFS2_DQUOT(dquot)->dq_origspace;
  417. inodechange = dquot->dq_dqb.dqb_curinodes -
  418. OCFS2_DQUOT(dquot)->dq_originodes;
  419. olditime = dquot->dq_dqb.dqb_itime;
  420. oldbtime = dquot->dq_dqb.dqb_btime;
  421. ocfs2_global_disk2memdqb(dquot, &dqblk);
  422. mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
  423. dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
  424. dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
  425. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  426. dquot->dq_dqb.dqb_curspace += spacechange;
  427. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  428. dquot->dq_dqb.dqb_curinodes += inodechange;
  429. /* Set properly space grace time... */
  430. if (dquot->dq_dqb.dqb_bsoftlimit &&
  431. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  432. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  433. oldbtime > 0) {
  434. if (dquot->dq_dqb.dqb_btime > 0)
  435. dquot->dq_dqb.dqb_btime =
  436. min(dquot->dq_dqb.dqb_btime, oldbtime);
  437. else
  438. dquot->dq_dqb.dqb_btime = oldbtime;
  439. }
  440. } else {
  441. dquot->dq_dqb.dqb_btime = 0;
  442. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  443. }
  444. /* Set properly inode grace time... */
  445. if (dquot->dq_dqb.dqb_isoftlimit &&
  446. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  447. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  448. olditime > 0) {
  449. if (dquot->dq_dqb.dqb_itime > 0)
  450. dquot->dq_dqb.dqb_itime =
  451. min(dquot->dq_dqb.dqb_itime, olditime);
  452. else
  453. dquot->dq_dqb.dqb_itime = olditime;
  454. }
  455. } else {
  456. dquot->dq_dqb.dqb_itime = 0;
  457. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  458. }
  459. /* All information is properly updated, clear the flags */
  460. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  461. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  462. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  463. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  464. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  465. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  466. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  467. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  468. spin_unlock(&dq_data_lock);
  469. err = ocfs2_qinfo_lock(info, freeing);
  470. if (err < 0) {
  471. mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
  472. " (type=%d, id=%u)\n", dquot->dq_type,
  473. (unsigned)dquot->dq_id);
  474. goto out;
  475. }
  476. if (freeing)
  477. OCFS2_DQUOT(dquot)->dq_use_count--;
  478. err = qtree_write_dquot(&info->dqi_gi, dquot);
  479. if (err < 0)
  480. goto out_qlock;
  481. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  482. err = qtree_release_dquot(&info->dqi_gi, dquot);
  483. if (info_dirty(sb_dqinfo(sb, type))) {
  484. err2 = __ocfs2_global_write_info(sb, type);
  485. if (!err)
  486. err = err2;
  487. }
  488. }
  489. out_qlock:
  490. ocfs2_qinfo_unlock(info, freeing);
  491. out:
  492. if (err < 0)
  493. mlog_errno(err);
  494. return err;
  495. }
  496. /*
  497. * Functions for periodic syncing of dquots with global file
  498. */
  499. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  500. {
  501. handle_t *handle;
  502. struct super_block *sb = dquot->dq_sb;
  503. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  504. struct ocfs2_super *osb = OCFS2_SB(sb);
  505. int status = 0;
  506. mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
  507. dquot->dq_type, type, sb->s_id);
  508. if (type != dquot->dq_type)
  509. goto out;
  510. status = ocfs2_lock_global_qf(oinfo, 1);
  511. if (status < 0)
  512. goto out;
  513. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  514. if (IS_ERR(handle)) {
  515. status = PTR_ERR(handle);
  516. mlog_errno(status);
  517. goto out_ilock;
  518. }
  519. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  520. status = ocfs2_sync_dquot(dquot);
  521. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  522. if (status < 0)
  523. mlog_errno(status);
  524. /* We have to write local structure as well... */
  525. dquot_mark_dquot_dirty(dquot);
  526. status = dquot_commit(dquot);
  527. if (status < 0)
  528. mlog_errno(status);
  529. ocfs2_commit_trans(osb, handle);
  530. out_ilock:
  531. ocfs2_unlock_global_qf(oinfo, 1);
  532. out:
  533. mlog_exit(status);
  534. return status;
  535. }
  536. static void qsync_work_fn(struct work_struct *work)
  537. {
  538. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  539. struct ocfs2_mem_dqinfo,
  540. dqi_sync_work.work);
  541. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  542. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  543. queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
  544. oinfo->dqi_syncjiff);
  545. }
  546. /*
  547. * Wrappers for generic quota functions
  548. */
  549. static int ocfs2_write_dquot(struct dquot *dquot)
  550. {
  551. handle_t *handle;
  552. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  553. int status = 0;
  554. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  555. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  556. if (IS_ERR(handle)) {
  557. status = PTR_ERR(handle);
  558. mlog_errno(status);
  559. goto out;
  560. }
  561. status = dquot_commit(dquot);
  562. ocfs2_commit_trans(osb, handle);
  563. out:
  564. mlog_exit(status);
  565. return status;
  566. }
  567. int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  568. {
  569. struct ocfs2_mem_dqinfo *oinfo;
  570. int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
  571. OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
  572. if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
  573. return 0;
  574. oinfo = sb_dqinfo(sb, type)->dqi_priv;
  575. /* We modify tree, leaf block, global info, local chunk header,
  576. * global and local inode */
  577. return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
  578. 2 * OCFS2_INODE_UPDATE_CREDITS;
  579. }
  580. static int ocfs2_release_dquot(struct dquot *dquot)
  581. {
  582. handle_t *handle;
  583. struct ocfs2_mem_dqinfo *oinfo =
  584. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  585. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  586. int status = 0;
  587. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  588. status = ocfs2_lock_global_qf(oinfo, 1);
  589. if (status < 0)
  590. goto out;
  591. handle = ocfs2_start_trans(osb,
  592. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
  593. if (IS_ERR(handle)) {
  594. status = PTR_ERR(handle);
  595. mlog_errno(status);
  596. goto out_ilock;
  597. }
  598. status = dquot_release(dquot);
  599. ocfs2_commit_trans(osb, handle);
  600. out_ilock:
  601. ocfs2_unlock_global_qf(oinfo, 1);
  602. out:
  603. mlog_exit(status);
  604. return status;
  605. }
  606. int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
  607. {
  608. struct ocfs2_mem_dqinfo *oinfo;
  609. int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
  610. OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
  611. struct ocfs2_dinode *lfe, *gfe;
  612. if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
  613. return 0;
  614. oinfo = sb_dqinfo(sb, type)->dqi_priv;
  615. gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
  616. lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
  617. /* We can extend local file + global file. In local file we
  618. * can modify info, chunk header block and dquot block. In
  619. * global file we can modify info, tree and leaf block */
  620. return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
  621. ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
  622. 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
  623. }
  624. static int ocfs2_acquire_dquot(struct dquot *dquot)
  625. {
  626. handle_t *handle;
  627. struct ocfs2_mem_dqinfo *oinfo =
  628. sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
  629. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  630. int status = 0;
  631. mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
  632. /* We need an exclusive lock, because we're going to update use count
  633. * and instantiate possibly new dquot structure */
  634. status = ocfs2_lock_global_qf(oinfo, 1);
  635. if (status < 0)
  636. goto out;
  637. handle = ocfs2_start_trans(osb,
  638. ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
  639. if (IS_ERR(handle)) {
  640. status = PTR_ERR(handle);
  641. mlog_errno(status);
  642. goto out_ilock;
  643. }
  644. status = dquot_acquire(dquot);
  645. ocfs2_commit_trans(osb, handle);
  646. out_ilock:
  647. ocfs2_unlock_global_qf(oinfo, 1);
  648. out:
  649. mlog_exit(status);
  650. return status;
  651. }
  652. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  653. {
  654. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  655. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  656. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  657. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  658. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  659. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  660. int sync = 0;
  661. int status;
  662. struct super_block *sb = dquot->dq_sb;
  663. int type = dquot->dq_type;
  664. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  665. handle_t *handle;
  666. struct ocfs2_super *osb = OCFS2_SB(sb);
  667. mlog_entry("id=%u, type=%d", dquot->dq_id, type);
  668. dquot_mark_dquot_dirty(dquot);
  669. /* In case user set some limits, sync dquot immediately to global
  670. * quota file so that information propagates quicker */
  671. spin_lock(&dq_data_lock);
  672. if (dquot->dq_flags & mask)
  673. sync = 1;
  674. spin_unlock(&dq_data_lock);
  675. if (!sync) {
  676. status = ocfs2_write_dquot(dquot);
  677. goto out;
  678. }
  679. status = ocfs2_lock_global_qf(oinfo, 1);
  680. if (status < 0)
  681. goto out;
  682. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  683. if (IS_ERR(handle)) {
  684. status = PTR_ERR(handle);
  685. mlog_errno(status);
  686. goto out_ilock;
  687. }
  688. status = ocfs2_sync_dquot(dquot);
  689. if (status < 0) {
  690. mlog_errno(status);
  691. goto out_trans;
  692. }
  693. /* Now write updated local dquot structure */
  694. status = dquot_commit(dquot);
  695. out_trans:
  696. ocfs2_commit_trans(osb, handle);
  697. out_ilock:
  698. ocfs2_unlock_global_qf(oinfo, 1);
  699. out:
  700. mlog_exit(status);
  701. return status;
  702. }
  703. /* This should happen only after set_dqinfo(). */
  704. static int ocfs2_write_info(struct super_block *sb, int type)
  705. {
  706. handle_t *handle;
  707. int status = 0;
  708. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  709. mlog_entry_void();
  710. status = ocfs2_lock_global_qf(oinfo, 1);
  711. if (status < 0)
  712. goto out;
  713. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  714. if (IS_ERR(handle)) {
  715. status = PTR_ERR(handle);
  716. mlog_errno(status);
  717. goto out_ilock;
  718. }
  719. status = dquot_commit_info(sb, type);
  720. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  721. out_ilock:
  722. ocfs2_unlock_global_qf(oinfo, 1);
  723. out:
  724. mlog_exit(status);
  725. return status;
  726. }
  727. /* This is difficult. We have to lock quota inode and start transaction
  728. * in this function but we don't want to take the penalty of exlusive
  729. * quota file lock when we are just going to use cached structures. So
  730. * we just take read lock check whether we have dquot cached and if so,
  731. * we don't have to take the write lock... */
  732. static int ocfs2_dquot_initialize(struct inode *inode, int type)
  733. {
  734. handle_t *handle = NULL;
  735. int status = 0;
  736. struct super_block *sb = inode->i_sb;
  737. struct ocfs2_mem_dqinfo *oinfo;
  738. int exclusive = 0;
  739. int cnt;
  740. qid_t id;
  741. mlog_entry_void();
  742. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  743. if (type != -1 && cnt != type)
  744. continue;
  745. if (!sb_has_quota_active(sb, cnt))
  746. continue;
  747. oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
  748. status = ocfs2_lock_global_qf(oinfo, 0);
  749. if (status < 0)
  750. goto out;
  751. /* This is just a performance optimization not a reliable test.
  752. * Since we hold an inode lock, noone can actually release
  753. * the structure until we are finished with initialization. */
  754. if (inode->i_dquot[cnt] != NODQUOT) {
  755. ocfs2_unlock_global_qf(oinfo, 0);
  756. continue;
  757. }
  758. /* When we have inode lock, we know that no dquot_release() can
  759. * run and thus we can safely check whether we need to
  760. * read+modify global file to get quota information or whether
  761. * our node already has it. */
  762. if (cnt == USRQUOTA)
  763. id = inode->i_uid;
  764. else if (cnt == GRPQUOTA)
  765. id = inode->i_gid;
  766. else
  767. BUG();
  768. /* Obtain exclusion from quota off... */
  769. down_write(&sb_dqopt(sb)->dqptr_sem);
  770. exclusive = !dquot_is_cached(sb, id, cnt);
  771. up_write(&sb_dqopt(sb)->dqptr_sem);
  772. if (exclusive) {
  773. status = ocfs2_lock_global_qf(oinfo, 1);
  774. if (status < 0) {
  775. exclusive = 0;
  776. mlog_errno(status);
  777. goto out_ilock;
  778. }
  779. handle = ocfs2_start_trans(OCFS2_SB(sb),
  780. ocfs2_calc_qinit_credits(sb, cnt));
  781. if (IS_ERR(handle)) {
  782. status = PTR_ERR(handle);
  783. mlog_errno(status);
  784. goto out_ilock;
  785. }
  786. }
  787. dquot_initialize(inode, cnt);
  788. if (exclusive) {
  789. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  790. ocfs2_unlock_global_qf(oinfo, 1);
  791. }
  792. ocfs2_unlock_global_qf(oinfo, 0);
  793. }
  794. mlog_exit(0);
  795. return 0;
  796. out_ilock:
  797. if (exclusive)
  798. ocfs2_unlock_global_qf(oinfo, 1);
  799. ocfs2_unlock_global_qf(oinfo, 0);
  800. out:
  801. mlog_exit(status);
  802. return status;
  803. }
  804. static int ocfs2_dquot_drop_slow(struct inode *inode)
  805. {
  806. int status = 0;
  807. int cnt;
  808. int got_lock[MAXQUOTAS] = {0, 0};
  809. handle_t *handle;
  810. struct super_block *sb = inode->i_sb;
  811. struct ocfs2_mem_dqinfo *oinfo;
  812. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  813. if (!sb_has_quota_active(sb, cnt))
  814. continue;
  815. oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
  816. status = ocfs2_lock_global_qf(oinfo, 1);
  817. if (status < 0)
  818. goto out;
  819. got_lock[cnt] = 1;
  820. }
  821. handle = ocfs2_start_trans(OCFS2_SB(sb),
  822. ocfs2_calc_qinit_credits(sb, USRQUOTA) +
  823. ocfs2_calc_qinit_credits(sb, GRPQUOTA));
  824. if (IS_ERR(handle)) {
  825. status = PTR_ERR(handle);
  826. mlog_errno(status);
  827. goto out;
  828. }
  829. dquot_drop(inode);
  830. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  831. out:
  832. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  833. if (got_lock[cnt]) {
  834. oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
  835. ocfs2_unlock_global_qf(oinfo, 1);
  836. }
  837. return status;
  838. }
  839. /* See the comment before ocfs2_dquot_initialize. */
  840. static int ocfs2_dquot_drop(struct inode *inode)
  841. {
  842. int status = 0;
  843. struct super_block *sb = inode->i_sb;
  844. struct ocfs2_mem_dqinfo *oinfo;
  845. int exclusive = 0;
  846. int cnt;
  847. int got_lock[MAXQUOTAS] = {0, 0};
  848. mlog_entry_void();
  849. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  850. if (!sb_has_quota_active(sb, cnt))
  851. continue;
  852. oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
  853. status = ocfs2_lock_global_qf(oinfo, 0);
  854. if (status < 0)
  855. goto out;
  856. got_lock[cnt] = 1;
  857. }
  858. /* Lock against anyone releasing references so that when when we check
  859. * we know we are not going to be last ones to release dquot */
  860. down_write(&sb_dqopt(sb)->dqptr_sem);
  861. /* Urgh, this is a terrible hack :( */
  862. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  863. if (inode->i_dquot[cnt] != NODQUOT &&
  864. atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
  865. exclusive = 1;
  866. break;
  867. }
  868. }
  869. if (!exclusive)
  870. dquot_drop_locked(inode);
  871. up_write(&sb_dqopt(sb)->dqptr_sem);
  872. out:
  873. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  874. if (got_lock[cnt]) {
  875. oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
  876. ocfs2_unlock_global_qf(oinfo, 0);
  877. }
  878. /* In case we bailed out because we had to do expensive locking
  879. * do it now... */
  880. if (exclusive)
  881. status = ocfs2_dquot_drop_slow(inode);
  882. mlog_exit(status);
  883. return status;
  884. }
  885. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  886. {
  887. struct ocfs2_dquot *dquot =
  888. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  889. if (!dquot)
  890. return NULL;
  891. return &dquot->dq_dquot;
  892. }
  893. static void ocfs2_destroy_dquot(struct dquot *dquot)
  894. {
  895. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  896. }
  897. struct dquot_operations ocfs2_quota_operations = {
  898. .initialize = ocfs2_dquot_initialize,
  899. .drop = ocfs2_dquot_drop,
  900. .alloc_space = dquot_alloc_space,
  901. .alloc_inode = dquot_alloc_inode,
  902. .free_space = dquot_free_space,
  903. .free_inode = dquot_free_inode,
  904. .transfer = dquot_transfer,
  905. .write_dquot = ocfs2_write_dquot,
  906. .acquire_dquot = ocfs2_acquire_dquot,
  907. .release_dquot = ocfs2_release_dquot,
  908. .mark_dirty = ocfs2_mark_dquot_dirty,
  909. .write_info = ocfs2_write_info,
  910. .alloc_dquot = ocfs2_alloc_dquot,
  911. .destroy_dquot = ocfs2_destroy_dquot,
  912. };
  913. int ocfs2_quota_setup(void)
  914. {
  915. ocfs2_quota_wq = create_workqueue("o2quot");
  916. if (!ocfs2_quota_wq)
  917. return -ENOMEM;
  918. return 0;
  919. }
  920. void ocfs2_quota_shutdown(void)
  921. {
  922. if (ocfs2_quota_wq) {
  923. flush_workqueue(ocfs2_quota_wq);
  924. destroy_workqueue(ocfs2_quota_wq);
  925. ocfs2_quota_wq = NULL;
  926. }
  927. }