quota.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need to a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #include <linux/sched.h>
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/completion.h>
  41. #include <linux/buffer_head.h>
  42. #include <linux/sort.h>
  43. #include <linux/fs.h>
  44. #include <linux/bio.h>
  45. #include <linux/gfs2_ondisk.h>
  46. #include <linux/lm_interface.h>
  47. #include <linux/kthread.h>
  48. #include <linux/freezer.h>
  49. #include "gfs2.h"
  50. #include "incore.h"
  51. #include "bmap.h"
  52. #include "glock.h"
  53. #include "glops.h"
  54. #include "log.h"
  55. #include "meta_io.h"
  56. #include "quota.h"
  57. #include "rgrp.h"
  58. #include "super.h"
  59. #include "trans.h"
  60. #include "inode.h"
  61. #include "ops_address.h"
  62. #include "util.h"
  63. #define QUOTA_USER 1
  64. #define QUOTA_GROUP 0
  65. struct gfs2_quota_host {
  66. u64 qu_limit;
  67. u64 qu_warn;
  68. s64 qu_value;
  69. u32 qu_ll_next;
  70. };
  71. struct gfs2_quota_change_host {
  72. u64 qc_change;
  73. u32 qc_flags; /* GFS2_QCF_... */
  74. u32 qc_id;
  75. };
  76. static u64 qd2offset(struct gfs2_quota_data *qd)
  77. {
  78. u64 offset;
  79. offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
  80. offset *= sizeof(struct gfs2_quota);
  81. return offset;
  82. }
  83. static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
  84. struct gfs2_quota_data **qdp)
  85. {
  86. struct gfs2_quota_data *qd;
  87. int error;
  88. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  89. if (!qd)
  90. return -ENOMEM;
  91. qd->qd_count = 1;
  92. qd->qd_id = id;
  93. if (user)
  94. set_bit(QDF_USER, &qd->qd_flags);
  95. qd->qd_slot = -1;
  96. error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
  97. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  98. if (error)
  99. goto fail;
  100. error = gfs2_lvb_hold(qd->qd_gl);
  101. gfs2_glock_put(qd->qd_gl);
  102. if (error)
  103. goto fail;
  104. *qdp = qd;
  105. return 0;
  106. fail:
  107. kmem_cache_free(gfs2_quotad_cachep, qd);
  108. return error;
  109. }
  110. static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
  111. struct gfs2_quota_data **qdp)
  112. {
  113. struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
  114. int error, found;
  115. *qdp = NULL;
  116. for (;;) {
  117. found = 0;
  118. spin_lock(&sdp->sd_quota_spin);
  119. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  120. if (qd->qd_id == id &&
  121. !test_bit(QDF_USER, &qd->qd_flags) == !user) {
  122. qd->qd_count++;
  123. found = 1;
  124. break;
  125. }
  126. }
  127. if (!found)
  128. qd = NULL;
  129. if (!qd && new_qd) {
  130. qd = new_qd;
  131. list_add(&qd->qd_list, &sdp->sd_quota_list);
  132. atomic_inc(&sdp->sd_quota_count);
  133. new_qd = NULL;
  134. }
  135. spin_unlock(&sdp->sd_quota_spin);
  136. if (qd || !create) {
  137. if (new_qd) {
  138. gfs2_lvb_unhold(new_qd->qd_gl);
  139. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  140. }
  141. *qdp = qd;
  142. return 0;
  143. }
  144. error = qd_alloc(sdp, user, id, &new_qd);
  145. if (error)
  146. return error;
  147. }
  148. }
  149. static void qd_hold(struct gfs2_quota_data *qd)
  150. {
  151. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  152. spin_lock(&sdp->sd_quota_spin);
  153. gfs2_assert(sdp, qd->qd_count);
  154. qd->qd_count++;
  155. spin_unlock(&sdp->sd_quota_spin);
  156. }
  157. static void qd_put(struct gfs2_quota_data *qd)
  158. {
  159. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  160. spin_lock(&sdp->sd_quota_spin);
  161. gfs2_assert(sdp, qd->qd_count);
  162. if (!--qd->qd_count)
  163. qd->qd_last_touched = jiffies;
  164. spin_unlock(&sdp->sd_quota_spin);
  165. }
  166. static int slot_get(struct gfs2_quota_data *qd)
  167. {
  168. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  169. unsigned int c, o = 0, b;
  170. unsigned char byte = 0;
  171. spin_lock(&sdp->sd_quota_spin);
  172. if (qd->qd_slot_count++) {
  173. spin_unlock(&sdp->sd_quota_spin);
  174. return 0;
  175. }
  176. for (c = 0; c < sdp->sd_quota_chunks; c++)
  177. for (o = 0; o < PAGE_SIZE; o++) {
  178. byte = sdp->sd_quota_bitmap[c][o];
  179. if (byte != 0xFF)
  180. goto found;
  181. }
  182. goto fail;
  183. found:
  184. for (b = 0; b < 8; b++)
  185. if (!(byte & (1 << b)))
  186. break;
  187. qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
  188. if (qd->qd_slot >= sdp->sd_quota_slots)
  189. goto fail;
  190. sdp->sd_quota_bitmap[c][o] |= 1 << b;
  191. spin_unlock(&sdp->sd_quota_spin);
  192. return 0;
  193. fail:
  194. qd->qd_slot_count--;
  195. spin_unlock(&sdp->sd_quota_spin);
  196. return -ENOSPC;
  197. }
  198. static void slot_hold(struct gfs2_quota_data *qd)
  199. {
  200. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  201. spin_lock(&sdp->sd_quota_spin);
  202. gfs2_assert(sdp, qd->qd_slot_count);
  203. qd->qd_slot_count++;
  204. spin_unlock(&sdp->sd_quota_spin);
  205. }
  206. static void slot_put(struct gfs2_quota_data *qd)
  207. {
  208. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  209. spin_lock(&sdp->sd_quota_spin);
  210. gfs2_assert(sdp, qd->qd_slot_count);
  211. if (!--qd->qd_slot_count) {
  212. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
  213. qd->qd_slot = -1;
  214. }
  215. spin_unlock(&sdp->sd_quota_spin);
  216. }
  217. static int bh_get(struct gfs2_quota_data *qd)
  218. {
  219. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  220. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  221. unsigned int block, offset;
  222. struct buffer_head *bh;
  223. int error;
  224. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  225. mutex_lock(&sdp->sd_quota_mutex);
  226. if (qd->qd_bh_count++) {
  227. mutex_unlock(&sdp->sd_quota_mutex);
  228. return 0;
  229. }
  230. block = qd->qd_slot / sdp->sd_qc_per_block;
  231. offset = qd->qd_slot % sdp->sd_qc_per_block;
  232. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  233. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  234. if (error)
  235. goto fail;
  236. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  237. if (error)
  238. goto fail;
  239. error = -EIO;
  240. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  241. goto fail_brelse;
  242. qd->qd_bh = bh;
  243. qd->qd_bh_qc = (struct gfs2_quota_change *)
  244. (bh->b_data + sizeof(struct gfs2_meta_header) +
  245. offset * sizeof(struct gfs2_quota_change));
  246. mutex_unlock(&sdp->sd_quota_mutex);
  247. return 0;
  248. fail_brelse:
  249. brelse(bh);
  250. fail:
  251. qd->qd_bh_count--;
  252. mutex_unlock(&sdp->sd_quota_mutex);
  253. return error;
  254. }
  255. static void bh_put(struct gfs2_quota_data *qd)
  256. {
  257. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  258. mutex_lock(&sdp->sd_quota_mutex);
  259. gfs2_assert(sdp, qd->qd_bh_count);
  260. if (!--qd->qd_bh_count) {
  261. brelse(qd->qd_bh);
  262. qd->qd_bh = NULL;
  263. qd->qd_bh_qc = NULL;
  264. }
  265. mutex_unlock(&sdp->sd_quota_mutex);
  266. }
  267. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  268. {
  269. struct gfs2_quota_data *qd = NULL;
  270. int error;
  271. int found = 0;
  272. *qdp = NULL;
  273. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  274. return 0;
  275. spin_lock(&sdp->sd_quota_spin);
  276. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  277. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  278. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  279. qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
  280. continue;
  281. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  282. set_bit(QDF_LOCKED, &qd->qd_flags);
  283. gfs2_assert_warn(sdp, qd->qd_count);
  284. qd->qd_count++;
  285. qd->qd_change_sync = qd->qd_change;
  286. gfs2_assert_warn(sdp, qd->qd_slot_count);
  287. qd->qd_slot_count++;
  288. found = 1;
  289. break;
  290. }
  291. if (!found)
  292. qd = NULL;
  293. spin_unlock(&sdp->sd_quota_spin);
  294. if (qd) {
  295. gfs2_assert_warn(sdp, qd->qd_change_sync);
  296. error = bh_get(qd);
  297. if (error) {
  298. clear_bit(QDF_LOCKED, &qd->qd_flags);
  299. slot_put(qd);
  300. qd_put(qd);
  301. return error;
  302. }
  303. }
  304. *qdp = qd;
  305. return 0;
  306. }
  307. static int qd_trylock(struct gfs2_quota_data *qd)
  308. {
  309. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  310. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  311. return 0;
  312. spin_lock(&sdp->sd_quota_spin);
  313. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  314. !test_bit(QDF_CHANGE, &qd->qd_flags)) {
  315. spin_unlock(&sdp->sd_quota_spin);
  316. return 0;
  317. }
  318. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  319. set_bit(QDF_LOCKED, &qd->qd_flags);
  320. gfs2_assert_warn(sdp, qd->qd_count);
  321. qd->qd_count++;
  322. qd->qd_change_sync = qd->qd_change;
  323. gfs2_assert_warn(sdp, qd->qd_slot_count);
  324. qd->qd_slot_count++;
  325. spin_unlock(&sdp->sd_quota_spin);
  326. gfs2_assert_warn(sdp, qd->qd_change_sync);
  327. if (bh_get(qd)) {
  328. clear_bit(QDF_LOCKED, &qd->qd_flags);
  329. slot_put(qd);
  330. qd_put(qd);
  331. return 0;
  332. }
  333. return 1;
  334. }
  335. static void qd_unlock(struct gfs2_quota_data *qd)
  336. {
  337. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  338. test_bit(QDF_LOCKED, &qd->qd_flags));
  339. clear_bit(QDF_LOCKED, &qd->qd_flags);
  340. bh_put(qd);
  341. slot_put(qd);
  342. qd_put(qd);
  343. }
  344. static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
  345. struct gfs2_quota_data **qdp)
  346. {
  347. int error;
  348. error = qd_get(sdp, user, id, create, qdp);
  349. if (error)
  350. return error;
  351. error = slot_get(*qdp);
  352. if (error)
  353. goto fail;
  354. error = bh_get(*qdp);
  355. if (error)
  356. goto fail_slot;
  357. return 0;
  358. fail_slot:
  359. slot_put(*qdp);
  360. fail:
  361. qd_put(*qdp);
  362. return error;
  363. }
  364. static void qdsb_put(struct gfs2_quota_data *qd)
  365. {
  366. bh_put(qd);
  367. slot_put(qd);
  368. qd_put(qd);
  369. }
  370. int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
  371. {
  372. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  373. struct gfs2_alloc *al = ip->i_alloc;
  374. struct gfs2_quota_data **qd = al->al_qd;
  375. int error;
  376. if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
  377. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  378. return -EIO;
  379. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  380. return 0;
  381. error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
  382. if (error)
  383. goto out;
  384. al->al_qd_num++;
  385. qd++;
  386. error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
  387. if (error)
  388. goto out;
  389. al->al_qd_num++;
  390. qd++;
  391. if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
  392. error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
  393. if (error)
  394. goto out;
  395. al->al_qd_num++;
  396. qd++;
  397. }
  398. if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
  399. error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
  400. if (error)
  401. goto out;
  402. al->al_qd_num++;
  403. qd++;
  404. }
  405. out:
  406. if (error)
  407. gfs2_quota_unhold(ip);
  408. return error;
  409. }
  410. void gfs2_quota_unhold(struct gfs2_inode *ip)
  411. {
  412. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  413. struct gfs2_alloc *al = ip->i_alloc;
  414. unsigned int x;
  415. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  416. for (x = 0; x < al->al_qd_num; x++) {
  417. qdsb_put(al->al_qd[x]);
  418. al->al_qd[x] = NULL;
  419. }
  420. al->al_qd_num = 0;
  421. }
  422. static int sort_qd(const void *a, const void *b)
  423. {
  424. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  425. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  426. if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
  427. !test_bit(QDF_USER, &qd_b->qd_flags)) {
  428. if (test_bit(QDF_USER, &qd_a->qd_flags))
  429. return -1;
  430. else
  431. return 1;
  432. }
  433. if (qd_a->qd_id < qd_b->qd_id)
  434. return -1;
  435. if (qd_a->qd_id > qd_b->qd_id)
  436. return 1;
  437. return 0;
  438. }
  439. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  440. {
  441. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  442. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  443. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  444. s64 x;
  445. mutex_lock(&sdp->sd_quota_mutex);
  446. gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
  447. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  448. qc->qc_change = 0;
  449. qc->qc_flags = 0;
  450. if (test_bit(QDF_USER, &qd->qd_flags))
  451. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  452. qc->qc_id = cpu_to_be32(qd->qd_id);
  453. }
  454. x = be64_to_cpu(qc->qc_change) + change;
  455. qc->qc_change = cpu_to_be64(x);
  456. spin_lock(&sdp->sd_quota_spin);
  457. qd->qd_change = x;
  458. spin_unlock(&sdp->sd_quota_spin);
  459. if (!x) {
  460. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  461. clear_bit(QDF_CHANGE, &qd->qd_flags);
  462. qc->qc_flags = 0;
  463. qc->qc_id = 0;
  464. slot_put(qd);
  465. qd_put(qd);
  466. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  467. qd_hold(qd);
  468. slot_hold(qd);
  469. }
  470. mutex_unlock(&sdp->sd_quota_mutex);
  471. }
  472. static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
  473. {
  474. const struct gfs2_quota *str = buf;
  475. qu->qu_limit = be64_to_cpu(str->qu_limit);
  476. qu->qu_warn = be64_to_cpu(str->qu_warn);
  477. qu->qu_value = be64_to_cpu(str->qu_value);
  478. qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
  479. }
  480. static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
  481. {
  482. struct gfs2_quota *str = buf;
  483. str->qu_limit = cpu_to_be64(qu->qu_limit);
  484. str->qu_warn = cpu_to_be64(qu->qu_warn);
  485. str->qu_value = cpu_to_be64(qu->qu_value);
  486. str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
  487. memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
  488. }
  489. /**
  490. * gfs2_adjust_quota
  491. *
  492. * This function was mostly borrowed from gfs2_block_truncate_page which was
  493. * in turn mostly borrowed from ext3
  494. */
  495. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  496. s64 change, struct gfs2_quota_data *qd)
  497. {
  498. struct inode *inode = &ip->i_inode;
  499. struct address_space *mapping = inode->i_mapping;
  500. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  501. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  502. unsigned blocksize, iblock, pos;
  503. struct buffer_head *bh;
  504. struct page *page;
  505. void *kaddr;
  506. char *ptr;
  507. struct gfs2_quota_host qp;
  508. s64 value;
  509. int err = -EIO;
  510. if (gfs2_is_stuffed(ip))
  511. gfs2_unstuff_dinode(ip, NULL);
  512. page = grab_cache_page(mapping, index);
  513. if (!page)
  514. return -ENOMEM;
  515. blocksize = inode->i_sb->s_blocksize;
  516. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  517. if (!page_has_buffers(page))
  518. create_empty_buffers(page, blocksize, 0);
  519. bh = page_buffers(page);
  520. pos = blocksize;
  521. while (offset >= pos) {
  522. bh = bh->b_this_page;
  523. iblock++;
  524. pos += blocksize;
  525. }
  526. if (!buffer_mapped(bh)) {
  527. gfs2_block_map(inode, iblock, bh, 1);
  528. if (!buffer_mapped(bh))
  529. goto unlock;
  530. }
  531. if (PageUptodate(page))
  532. set_buffer_uptodate(bh);
  533. if (!buffer_uptodate(bh)) {
  534. ll_rw_block(READ_META, 1, &bh);
  535. wait_on_buffer(bh);
  536. if (!buffer_uptodate(bh))
  537. goto unlock;
  538. }
  539. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  540. kaddr = kmap_atomic(page, KM_USER0);
  541. ptr = kaddr + offset;
  542. gfs2_quota_in(&qp, ptr);
  543. qp.qu_value += change;
  544. value = qp.qu_value;
  545. gfs2_quota_out(&qp, ptr);
  546. flush_dcache_page(page);
  547. kunmap_atomic(kaddr, KM_USER0);
  548. err = 0;
  549. qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
  550. qd->qd_qb.qb_value = cpu_to_be64(value);
  551. ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
  552. ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
  553. unlock:
  554. unlock_page(page);
  555. page_cache_release(page);
  556. return err;
  557. }
  558. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  559. {
  560. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  561. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  562. unsigned int data_blocks, ind_blocks;
  563. struct gfs2_holder *ghs, i_gh;
  564. unsigned int qx, x;
  565. struct gfs2_quota_data *qd;
  566. loff_t offset;
  567. unsigned int nalloc = 0, blocks;
  568. struct gfs2_alloc *al = NULL;
  569. int error;
  570. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  571. &data_blocks, &ind_blocks);
  572. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  573. if (!ghs)
  574. return -ENOMEM;
  575. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  576. for (qx = 0; qx < num_qd; qx++) {
  577. error = gfs2_glock_nq_init(qda[qx]->qd_gl,
  578. LM_ST_EXCLUSIVE,
  579. GL_NOCACHE, &ghs[qx]);
  580. if (error)
  581. goto out;
  582. }
  583. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  584. if (error)
  585. goto out;
  586. for (x = 0; x < num_qd; x++) {
  587. int alloc_required;
  588. offset = qd2offset(qda[x]);
  589. error = gfs2_write_alloc_required(ip, offset,
  590. sizeof(struct gfs2_quota),
  591. &alloc_required);
  592. if (error)
  593. goto out_gunlock;
  594. if (alloc_required)
  595. nalloc++;
  596. }
  597. al = gfs2_alloc_get(ip);
  598. if (!al) {
  599. error = -ENOMEM;
  600. goto out_gunlock;
  601. }
  602. /*
  603. * 1 blk for unstuffing inode if stuffed. We add this extra
  604. * block to the reservation unconditionally. If the inode
  605. * doesn't need unstuffing, the block will be released to the
  606. * rgrp since it won't be allocated during the transaction
  607. */
  608. al->al_requested = 1;
  609. /* +1 in the end for block requested above for unstuffing */
  610. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
  611. if (nalloc)
  612. al->al_requested += nalloc * (data_blocks + ind_blocks);
  613. error = gfs2_inplace_reserve(ip);
  614. if (error)
  615. goto out_alloc;
  616. if (nalloc)
  617. blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
  618. error = gfs2_trans_begin(sdp, blocks, 0);
  619. if (error)
  620. goto out_ipres;
  621. for (x = 0; x < num_qd; x++) {
  622. qd = qda[x];
  623. offset = qd2offset(qd);
  624. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
  625. (struct gfs2_quota_data *)
  626. qd);
  627. if (error)
  628. goto out_end_trans;
  629. do_qc(qd, -qd->qd_change_sync);
  630. }
  631. error = 0;
  632. out_end_trans:
  633. gfs2_trans_end(sdp);
  634. out_ipres:
  635. gfs2_inplace_release(ip);
  636. out_alloc:
  637. gfs2_alloc_put(ip);
  638. out_gunlock:
  639. gfs2_glock_dq_uninit(&i_gh);
  640. out:
  641. while (qx--)
  642. gfs2_glock_dq_uninit(&ghs[qx]);
  643. kfree(ghs);
  644. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
  645. return error;
  646. }
  647. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  648. struct gfs2_holder *q_gh)
  649. {
  650. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  651. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  652. struct gfs2_holder i_gh;
  653. struct gfs2_quota_host q;
  654. char buf[sizeof(struct gfs2_quota)];
  655. int error;
  656. struct gfs2_quota_lvb *qlvb;
  657. restart:
  658. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  659. if (error)
  660. return error;
  661. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  662. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  663. loff_t pos;
  664. gfs2_glock_dq_uninit(q_gh);
  665. error = gfs2_glock_nq_init(qd->qd_gl,
  666. LM_ST_EXCLUSIVE, GL_NOCACHE,
  667. q_gh);
  668. if (error)
  669. return error;
  670. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  671. if (error)
  672. goto fail;
  673. memset(buf, 0, sizeof(struct gfs2_quota));
  674. pos = qd2offset(qd);
  675. error = gfs2_internal_read(ip, NULL, buf, &pos,
  676. sizeof(struct gfs2_quota));
  677. if (error < 0)
  678. goto fail_gunlock;
  679. gfs2_glock_dq_uninit(&i_gh);
  680. gfs2_quota_in(&q, buf);
  681. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  682. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  683. qlvb->__pad = 0;
  684. qlvb->qb_limit = cpu_to_be64(q.qu_limit);
  685. qlvb->qb_warn = cpu_to_be64(q.qu_warn);
  686. qlvb->qb_value = cpu_to_be64(q.qu_value);
  687. qd->qd_qb = *qlvb;
  688. if (gfs2_glock_is_blocking(qd->qd_gl)) {
  689. gfs2_glock_dq_uninit(q_gh);
  690. force_refresh = 0;
  691. goto restart;
  692. }
  693. }
  694. return 0;
  695. fail_gunlock:
  696. gfs2_glock_dq_uninit(&i_gh);
  697. fail:
  698. gfs2_glock_dq_uninit(q_gh);
  699. return error;
  700. }
  701. int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
  702. {
  703. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  704. struct gfs2_alloc *al = ip->i_alloc;
  705. unsigned int x;
  706. int error = 0;
  707. gfs2_quota_hold(ip, uid, gid);
  708. if (capable(CAP_SYS_RESOURCE) ||
  709. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  710. return 0;
  711. sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
  712. sort_qd, NULL);
  713. for (x = 0; x < al->al_qd_num; x++) {
  714. error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
  715. if (error)
  716. break;
  717. }
  718. if (!error)
  719. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  720. else {
  721. while (x--)
  722. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  723. gfs2_quota_unhold(ip);
  724. }
  725. return error;
  726. }
  727. static int need_sync(struct gfs2_quota_data *qd)
  728. {
  729. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  730. struct gfs2_tune *gt = &sdp->sd_tune;
  731. s64 value;
  732. unsigned int num, den;
  733. int do_sync = 1;
  734. if (!qd->qd_qb.qb_limit)
  735. return 0;
  736. spin_lock(&sdp->sd_quota_spin);
  737. value = qd->qd_change;
  738. spin_unlock(&sdp->sd_quota_spin);
  739. spin_lock(&gt->gt_spin);
  740. num = gt->gt_quota_scale_num;
  741. den = gt->gt_quota_scale_den;
  742. spin_unlock(&gt->gt_spin);
  743. if (value < 0)
  744. do_sync = 0;
  745. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  746. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  747. do_sync = 0;
  748. else {
  749. value *= gfs2_jindex_size(sdp) * num;
  750. value = div_s64(value, den);
  751. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  752. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  753. do_sync = 0;
  754. }
  755. return do_sync;
  756. }
  757. void gfs2_quota_unlock(struct gfs2_inode *ip)
  758. {
  759. struct gfs2_alloc *al = ip->i_alloc;
  760. struct gfs2_quota_data *qda[4];
  761. unsigned int count = 0;
  762. unsigned int x;
  763. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  764. goto out;
  765. for (x = 0; x < al->al_qd_num; x++) {
  766. struct gfs2_quota_data *qd;
  767. int sync;
  768. qd = al->al_qd[x];
  769. sync = need_sync(qd);
  770. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  771. if (sync && qd_trylock(qd))
  772. qda[count++] = qd;
  773. }
  774. if (count) {
  775. do_sync(count, qda);
  776. for (x = 0; x < count; x++)
  777. qd_unlock(qda[x]);
  778. }
  779. out:
  780. gfs2_quota_unhold(ip);
  781. }
  782. #define MAX_LINE 256
  783. static int print_message(struct gfs2_quota_data *qd, char *type)
  784. {
  785. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  786. printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
  787. sdp->sd_fsname, type,
  788. (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
  789. qd->qd_id);
  790. return 0;
  791. }
  792. int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
  793. {
  794. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  795. struct gfs2_alloc *al = ip->i_alloc;
  796. struct gfs2_quota_data *qd;
  797. s64 value;
  798. unsigned int x;
  799. int error = 0;
  800. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  801. return 0;
  802. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  803. return 0;
  804. for (x = 0; x < al->al_qd_num; x++) {
  805. qd = al->al_qd[x];
  806. if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  807. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
  808. continue;
  809. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  810. spin_lock(&sdp->sd_quota_spin);
  811. value += qd->qd_change;
  812. spin_unlock(&sdp->sd_quota_spin);
  813. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  814. print_message(qd, "exceeded");
  815. error = -EDQUOT;
  816. break;
  817. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  818. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  819. time_after_eq(jiffies, qd->qd_last_warn +
  820. gfs2_tune_get(sdp,
  821. gt_quota_warn_period) * HZ)) {
  822. error = print_message(qd, "warning");
  823. qd->qd_last_warn = jiffies;
  824. }
  825. }
  826. return error;
  827. }
  828. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  829. u32 uid, u32 gid)
  830. {
  831. struct gfs2_alloc *al = ip->i_alloc;
  832. struct gfs2_quota_data *qd;
  833. unsigned int x;
  834. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  835. return;
  836. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  837. return;
  838. for (x = 0; x < al->al_qd_num; x++) {
  839. qd = al->al_qd[x];
  840. if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  841. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
  842. do_qc(qd, change);
  843. }
  844. }
  845. }
  846. int gfs2_quota_sync(struct gfs2_sbd *sdp)
  847. {
  848. struct gfs2_quota_data **qda;
  849. unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
  850. unsigned int num_qd;
  851. unsigned int x;
  852. int error = 0;
  853. sdp->sd_quota_sync_gen++;
  854. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  855. if (!qda)
  856. return -ENOMEM;
  857. do {
  858. num_qd = 0;
  859. for (;;) {
  860. error = qd_fish(sdp, qda + num_qd);
  861. if (error || !qda[num_qd])
  862. break;
  863. if (++num_qd == max_qd)
  864. break;
  865. }
  866. if (num_qd) {
  867. if (!error)
  868. error = do_sync(num_qd, qda);
  869. if (!error)
  870. for (x = 0; x < num_qd; x++)
  871. qda[x]->qd_sync_gen =
  872. sdp->sd_quota_sync_gen;
  873. for (x = 0; x < num_qd; x++)
  874. qd_unlock(qda[x]);
  875. }
  876. } while (!error && num_qd == max_qd);
  877. kfree(qda);
  878. return error;
  879. }
  880. int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
  881. {
  882. struct gfs2_quota_data *qd;
  883. struct gfs2_holder q_gh;
  884. int error;
  885. error = qd_get(sdp, user, id, CREATE, &qd);
  886. if (error)
  887. return error;
  888. error = do_glock(qd, FORCE, &q_gh);
  889. if (!error)
  890. gfs2_glock_dq_uninit(&q_gh);
  891. qd_put(qd);
  892. return error;
  893. }
  894. static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
  895. {
  896. const struct gfs2_quota_change *str = buf;
  897. qc->qc_change = be64_to_cpu(str->qc_change);
  898. qc->qc_flags = be32_to_cpu(str->qc_flags);
  899. qc->qc_id = be32_to_cpu(str->qc_id);
  900. }
  901. int gfs2_quota_init(struct gfs2_sbd *sdp)
  902. {
  903. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  904. unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
  905. unsigned int x, slot = 0;
  906. unsigned int found = 0;
  907. u64 dblock;
  908. u32 extlen = 0;
  909. int error;
  910. if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
  911. ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
  912. gfs2_consist_inode(ip);
  913. return -EIO;
  914. }
  915. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  916. sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
  917. error = -ENOMEM;
  918. sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
  919. sizeof(unsigned char *), GFP_NOFS);
  920. if (!sdp->sd_quota_bitmap)
  921. return error;
  922. for (x = 0; x < sdp->sd_quota_chunks; x++) {
  923. sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
  924. if (!sdp->sd_quota_bitmap[x])
  925. goto fail;
  926. }
  927. for (x = 0; x < blocks; x++) {
  928. struct buffer_head *bh;
  929. unsigned int y;
  930. if (!extlen) {
  931. int new = 0;
  932. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  933. if (error)
  934. goto fail;
  935. }
  936. error = -EIO;
  937. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  938. if (!bh)
  939. goto fail;
  940. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  941. brelse(bh);
  942. goto fail;
  943. }
  944. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  945. y++, slot++) {
  946. struct gfs2_quota_change_host qc;
  947. struct gfs2_quota_data *qd;
  948. gfs2_quota_change_in(&qc, bh->b_data +
  949. sizeof(struct gfs2_meta_header) +
  950. y * sizeof(struct gfs2_quota_change));
  951. if (!qc.qc_change)
  952. continue;
  953. error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
  954. qc.qc_id, &qd);
  955. if (error) {
  956. brelse(bh);
  957. goto fail;
  958. }
  959. set_bit(QDF_CHANGE, &qd->qd_flags);
  960. qd->qd_change = qc.qc_change;
  961. qd->qd_slot = slot;
  962. qd->qd_slot_count = 1;
  963. qd->qd_last_touched = jiffies;
  964. spin_lock(&sdp->sd_quota_spin);
  965. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
  966. list_add(&qd->qd_list, &sdp->sd_quota_list);
  967. atomic_inc(&sdp->sd_quota_count);
  968. spin_unlock(&sdp->sd_quota_spin);
  969. found++;
  970. }
  971. brelse(bh);
  972. dblock++;
  973. extlen--;
  974. }
  975. if (found)
  976. fs_info(sdp, "found %u quota changes\n", found);
  977. return 0;
  978. fail:
  979. gfs2_quota_cleanup(sdp);
  980. return error;
  981. }
  982. static void gfs2_quota_scan(struct gfs2_sbd *sdp)
  983. {
  984. struct gfs2_quota_data *qd, *safe;
  985. LIST_HEAD(dead);
  986. spin_lock(&sdp->sd_quota_spin);
  987. list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
  988. if (!qd->qd_count &&
  989. time_after_eq(jiffies, qd->qd_last_touched +
  990. gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
  991. list_move(&qd->qd_list, &dead);
  992. gfs2_assert_warn(sdp,
  993. atomic_read(&sdp->sd_quota_count) > 0);
  994. atomic_dec(&sdp->sd_quota_count);
  995. }
  996. }
  997. spin_unlock(&sdp->sd_quota_spin);
  998. while (!list_empty(&dead)) {
  999. qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
  1000. list_del(&qd->qd_list);
  1001. gfs2_assert_warn(sdp, !qd->qd_change);
  1002. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1003. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1004. gfs2_lvb_unhold(qd->qd_gl);
  1005. kmem_cache_free(gfs2_quotad_cachep, qd);
  1006. }
  1007. }
  1008. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1009. {
  1010. struct list_head *head = &sdp->sd_quota_list;
  1011. struct gfs2_quota_data *qd;
  1012. unsigned int x;
  1013. spin_lock(&sdp->sd_quota_spin);
  1014. while (!list_empty(head)) {
  1015. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1016. if (qd->qd_count > 1 ||
  1017. (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
  1018. list_move(&qd->qd_list, head);
  1019. spin_unlock(&sdp->sd_quota_spin);
  1020. schedule();
  1021. spin_lock(&sdp->sd_quota_spin);
  1022. continue;
  1023. }
  1024. list_del(&qd->qd_list);
  1025. atomic_dec(&sdp->sd_quota_count);
  1026. spin_unlock(&sdp->sd_quota_spin);
  1027. if (!qd->qd_count) {
  1028. gfs2_assert_warn(sdp, !qd->qd_change);
  1029. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1030. } else
  1031. gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
  1032. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1033. gfs2_lvb_unhold(qd->qd_gl);
  1034. kmem_cache_free(gfs2_quotad_cachep, qd);
  1035. spin_lock(&sdp->sd_quota_spin);
  1036. }
  1037. spin_unlock(&sdp->sd_quota_spin);
  1038. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1039. if (sdp->sd_quota_bitmap) {
  1040. for (x = 0; x < sdp->sd_quota_chunks; x++)
  1041. kfree(sdp->sd_quota_bitmap[x]);
  1042. kfree(sdp->sd_quota_bitmap);
  1043. }
  1044. }
  1045. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1046. {
  1047. if (error == 0 || error == -EROFS)
  1048. return;
  1049. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1050. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1051. }
  1052. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1053. int (*fxn)(struct gfs2_sbd *sdp),
  1054. unsigned long t, unsigned long *timeo,
  1055. unsigned int *new_timeo)
  1056. {
  1057. if (t >= *timeo) {
  1058. int error = fxn(sdp);
  1059. quotad_error(sdp, msg, error);
  1060. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1061. } else {
  1062. *timeo -= t;
  1063. }
  1064. }
  1065. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1066. {
  1067. struct gfs2_inode *ip;
  1068. while(1) {
  1069. ip = NULL;
  1070. spin_lock(&sdp->sd_trunc_lock);
  1071. if (!list_empty(&sdp->sd_trunc_list)) {
  1072. ip = list_entry(sdp->sd_trunc_list.next,
  1073. struct gfs2_inode, i_trunc_list);
  1074. list_del_init(&ip->i_trunc_list);
  1075. }
  1076. spin_unlock(&sdp->sd_trunc_lock);
  1077. if (ip == NULL)
  1078. return;
  1079. gfs2_glock_finish_truncate(ip);
  1080. }
  1081. }
  1082. /**
  1083. * gfs2_quotad - Write cached quota changes into the quota file
  1084. * @sdp: Pointer to GFS2 superblock
  1085. *
  1086. */
  1087. int gfs2_quotad(void *data)
  1088. {
  1089. struct gfs2_sbd *sdp = data;
  1090. struct gfs2_tune *tune = &sdp->sd_tune;
  1091. unsigned long statfs_timeo = 0;
  1092. unsigned long quotad_timeo = 0;
  1093. unsigned long t = 0;
  1094. DEFINE_WAIT(wait);
  1095. int empty;
  1096. while (!kthread_should_stop()) {
  1097. /* Update the master statfs file */
  1098. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1099. &statfs_timeo, &tune->gt_statfs_quantum);
  1100. /* Update quota file */
  1101. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1102. &quotad_timeo, &tune->gt_quota_quantum);
  1103. /* FIXME: This should be turned into a shrinker */
  1104. gfs2_quota_scan(sdp);
  1105. /* Check for & recover partially truncated inodes */
  1106. quotad_check_trunc_list(sdp);
  1107. if (freezing(current))
  1108. refrigerator();
  1109. t = min(quotad_timeo, statfs_timeo);
  1110. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE);
  1111. spin_lock(&sdp->sd_trunc_lock);
  1112. empty = list_empty(&sdp->sd_trunc_list);
  1113. spin_unlock(&sdp->sd_trunc_lock);
  1114. if (empty)
  1115. t -= schedule_timeout(t);
  1116. else
  1117. t = 0;
  1118. finish_wait(&sdp->sd_quota_wait, &wait);
  1119. }
  1120. return 0;
  1121. }