quota.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #include <linux/sched.h>
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/completion.h>
  41. #include <linux/buffer_head.h>
  42. #include <linux/sort.h>
  43. #include <linux/fs.h>
  44. #include <linux/bio.h>
  45. #include <linux/gfs2_ondisk.h>
  46. #include <linux/kthread.h>
  47. #include <linux/freezer.h>
  48. #include <linux/dqblk_xfs.h>
  49. #include "gfs2.h"
  50. #include "incore.h"
  51. #include "bmap.h"
  52. #include "glock.h"
  53. #include "glops.h"
  54. #include "log.h"
  55. #include "meta_io.h"
  56. #include "quota.h"
  57. #include "rgrp.h"
  58. #include "super.h"
  59. #include "trans.h"
  60. #include "inode.h"
  61. #include "util.h"
  62. #define QUOTA_USER 1
  63. #define QUOTA_GROUP 0
  64. struct gfs2_quota_change_host {
  65. u64 qc_change;
  66. u32 qc_flags; /* GFS2_QCF_... */
  67. u32 qc_id;
  68. };
  69. static LIST_HEAD(qd_lru_list);
  70. static atomic_t qd_lru_count = ATOMIC_INIT(0);
  71. static DEFINE_SPINLOCK(qd_lru_lock);
  72. int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
  73. {
  74. struct gfs2_quota_data *qd;
  75. struct gfs2_sbd *sdp;
  76. if (nr == 0)
  77. goto out;
  78. if (!(gfp_mask & __GFP_FS))
  79. return -1;
  80. spin_lock(&qd_lru_lock);
  81. while (nr && !list_empty(&qd_lru_list)) {
  82. qd = list_entry(qd_lru_list.next,
  83. struct gfs2_quota_data, qd_reclaim);
  84. sdp = qd->qd_gl->gl_sbd;
  85. /* Free from the filesystem-specific list */
  86. list_del(&qd->qd_list);
  87. gfs2_assert_warn(sdp, !qd->qd_change);
  88. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  89. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  90. gfs2_glock_put(qd->qd_gl);
  91. atomic_dec(&sdp->sd_quota_count);
  92. /* Delete it from the common reclaim list */
  93. list_del_init(&qd->qd_reclaim);
  94. atomic_dec(&qd_lru_count);
  95. spin_unlock(&qd_lru_lock);
  96. kmem_cache_free(gfs2_quotad_cachep, qd);
  97. spin_lock(&qd_lru_lock);
  98. nr--;
  99. }
  100. spin_unlock(&qd_lru_lock);
  101. out:
  102. return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
  103. }
  104. static u64 qd2offset(struct gfs2_quota_data *qd)
  105. {
  106. u64 offset;
  107. offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
  108. offset *= sizeof(struct gfs2_quota);
  109. return offset;
  110. }
  111. static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
  112. struct gfs2_quota_data **qdp)
  113. {
  114. struct gfs2_quota_data *qd;
  115. int error;
  116. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  117. if (!qd)
  118. return -ENOMEM;
  119. atomic_set(&qd->qd_count, 1);
  120. qd->qd_id = id;
  121. if (user)
  122. set_bit(QDF_USER, &qd->qd_flags);
  123. qd->qd_slot = -1;
  124. INIT_LIST_HEAD(&qd->qd_reclaim);
  125. error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
  126. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  127. if (error)
  128. goto fail;
  129. *qdp = qd;
  130. return 0;
  131. fail:
  132. kmem_cache_free(gfs2_quotad_cachep, qd);
  133. return error;
  134. }
  135. static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
  136. struct gfs2_quota_data **qdp)
  137. {
  138. struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
  139. int error, found;
  140. *qdp = NULL;
  141. for (;;) {
  142. found = 0;
  143. spin_lock(&qd_lru_lock);
  144. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  145. if (qd->qd_id == id &&
  146. !test_bit(QDF_USER, &qd->qd_flags) == !user) {
  147. if (!atomic_read(&qd->qd_count) &&
  148. !list_empty(&qd->qd_reclaim)) {
  149. /* Remove it from reclaim list */
  150. list_del_init(&qd->qd_reclaim);
  151. atomic_dec(&qd_lru_count);
  152. }
  153. atomic_inc(&qd->qd_count);
  154. found = 1;
  155. break;
  156. }
  157. }
  158. if (!found)
  159. qd = NULL;
  160. if (!qd && new_qd) {
  161. qd = new_qd;
  162. list_add(&qd->qd_list, &sdp->sd_quota_list);
  163. atomic_inc(&sdp->sd_quota_count);
  164. new_qd = NULL;
  165. }
  166. spin_unlock(&qd_lru_lock);
  167. if (qd) {
  168. if (new_qd) {
  169. gfs2_glock_put(new_qd->qd_gl);
  170. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  171. }
  172. *qdp = qd;
  173. return 0;
  174. }
  175. error = qd_alloc(sdp, user, id, &new_qd);
  176. if (error)
  177. return error;
  178. }
  179. }
  180. static void qd_hold(struct gfs2_quota_data *qd)
  181. {
  182. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  183. gfs2_assert(sdp, atomic_read(&qd->qd_count));
  184. atomic_inc(&qd->qd_count);
  185. }
  186. static void qd_put(struct gfs2_quota_data *qd)
  187. {
  188. if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
  189. /* Add to the reclaim list */
  190. list_add_tail(&qd->qd_reclaim, &qd_lru_list);
  191. atomic_inc(&qd_lru_count);
  192. spin_unlock(&qd_lru_lock);
  193. }
  194. }
  195. static int slot_get(struct gfs2_quota_data *qd)
  196. {
  197. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  198. unsigned int c, o = 0, b;
  199. unsigned char byte = 0;
  200. spin_lock(&qd_lru_lock);
  201. if (qd->qd_slot_count++) {
  202. spin_unlock(&qd_lru_lock);
  203. return 0;
  204. }
  205. for (c = 0; c < sdp->sd_quota_chunks; c++)
  206. for (o = 0; o < PAGE_SIZE; o++) {
  207. byte = sdp->sd_quota_bitmap[c][o];
  208. if (byte != 0xFF)
  209. goto found;
  210. }
  211. goto fail;
  212. found:
  213. for (b = 0; b < 8; b++)
  214. if (!(byte & (1 << b)))
  215. break;
  216. qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
  217. if (qd->qd_slot >= sdp->sd_quota_slots)
  218. goto fail;
  219. sdp->sd_quota_bitmap[c][o] |= 1 << b;
  220. spin_unlock(&qd_lru_lock);
  221. return 0;
  222. fail:
  223. qd->qd_slot_count--;
  224. spin_unlock(&qd_lru_lock);
  225. return -ENOSPC;
  226. }
  227. static void slot_hold(struct gfs2_quota_data *qd)
  228. {
  229. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  230. spin_lock(&qd_lru_lock);
  231. gfs2_assert(sdp, qd->qd_slot_count);
  232. qd->qd_slot_count++;
  233. spin_unlock(&qd_lru_lock);
  234. }
  235. static void slot_put(struct gfs2_quota_data *qd)
  236. {
  237. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  238. spin_lock(&qd_lru_lock);
  239. gfs2_assert(sdp, qd->qd_slot_count);
  240. if (!--qd->qd_slot_count) {
  241. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
  242. qd->qd_slot = -1;
  243. }
  244. spin_unlock(&qd_lru_lock);
  245. }
  246. static int bh_get(struct gfs2_quota_data *qd)
  247. {
  248. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  249. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  250. unsigned int block, offset;
  251. struct buffer_head *bh;
  252. int error;
  253. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  254. mutex_lock(&sdp->sd_quota_mutex);
  255. if (qd->qd_bh_count++) {
  256. mutex_unlock(&sdp->sd_quota_mutex);
  257. return 0;
  258. }
  259. block = qd->qd_slot / sdp->sd_qc_per_block;
  260. offset = qd->qd_slot % sdp->sd_qc_per_block;
  261. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  262. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  263. if (error)
  264. goto fail;
  265. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  266. if (error)
  267. goto fail;
  268. error = -EIO;
  269. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  270. goto fail_brelse;
  271. qd->qd_bh = bh;
  272. qd->qd_bh_qc = (struct gfs2_quota_change *)
  273. (bh->b_data + sizeof(struct gfs2_meta_header) +
  274. offset * sizeof(struct gfs2_quota_change));
  275. mutex_unlock(&sdp->sd_quota_mutex);
  276. return 0;
  277. fail_brelse:
  278. brelse(bh);
  279. fail:
  280. qd->qd_bh_count--;
  281. mutex_unlock(&sdp->sd_quota_mutex);
  282. return error;
  283. }
  284. static void bh_put(struct gfs2_quota_data *qd)
  285. {
  286. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  287. mutex_lock(&sdp->sd_quota_mutex);
  288. gfs2_assert(sdp, qd->qd_bh_count);
  289. if (!--qd->qd_bh_count) {
  290. brelse(qd->qd_bh);
  291. qd->qd_bh = NULL;
  292. qd->qd_bh_qc = NULL;
  293. }
  294. mutex_unlock(&sdp->sd_quota_mutex);
  295. }
  296. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  297. {
  298. struct gfs2_quota_data *qd = NULL;
  299. int error;
  300. int found = 0;
  301. *qdp = NULL;
  302. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  303. return 0;
  304. spin_lock(&qd_lru_lock);
  305. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  306. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  307. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  308. qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
  309. continue;
  310. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  311. set_bit(QDF_LOCKED, &qd->qd_flags);
  312. gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
  313. atomic_inc(&qd->qd_count);
  314. qd->qd_change_sync = qd->qd_change;
  315. gfs2_assert_warn(sdp, qd->qd_slot_count);
  316. qd->qd_slot_count++;
  317. found = 1;
  318. break;
  319. }
  320. if (!found)
  321. qd = NULL;
  322. spin_unlock(&qd_lru_lock);
  323. if (qd) {
  324. gfs2_assert_warn(sdp, qd->qd_change_sync);
  325. error = bh_get(qd);
  326. if (error) {
  327. clear_bit(QDF_LOCKED, &qd->qd_flags);
  328. slot_put(qd);
  329. qd_put(qd);
  330. return error;
  331. }
  332. }
  333. *qdp = qd;
  334. return 0;
  335. }
  336. static int qd_trylock(struct gfs2_quota_data *qd)
  337. {
  338. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  339. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  340. return 0;
  341. spin_lock(&qd_lru_lock);
  342. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  343. !test_bit(QDF_CHANGE, &qd->qd_flags)) {
  344. spin_unlock(&qd_lru_lock);
  345. return 0;
  346. }
  347. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  348. set_bit(QDF_LOCKED, &qd->qd_flags);
  349. gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
  350. atomic_inc(&qd->qd_count);
  351. qd->qd_change_sync = qd->qd_change;
  352. gfs2_assert_warn(sdp, qd->qd_slot_count);
  353. qd->qd_slot_count++;
  354. spin_unlock(&qd_lru_lock);
  355. gfs2_assert_warn(sdp, qd->qd_change_sync);
  356. if (bh_get(qd)) {
  357. clear_bit(QDF_LOCKED, &qd->qd_flags);
  358. slot_put(qd);
  359. qd_put(qd);
  360. return 0;
  361. }
  362. return 1;
  363. }
  364. static void qd_unlock(struct gfs2_quota_data *qd)
  365. {
  366. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  367. test_bit(QDF_LOCKED, &qd->qd_flags));
  368. clear_bit(QDF_LOCKED, &qd->qd_flags);
  369. bh_put(qd);
  370. slot_put(qd);
  371. qd_put(qd);
  372. }
  373. static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
  374. struct gfs2_quota_data **qdp)
  375. {
  376. int error;
  377. error = qd_get(sdp, user, id, qdp);
  378. if (error)
  379. return error;
  380. error = slot_get(*qdp);
  381. if (error)
  382. goto fail;
  383. error = bh_get(*qdp);
  384. if (error)
  385. goto fail_slot;
  386. return 0;
  387. fail_slot:
  388. slot_put(*qdp);
  389. fail:
  390. qd_put(*qdp);
  391. return error;
  392. }
  393. static void qdsb_put(struct gfs2_quota_data *qd)
  394. {
  395. bh_put(qd);
  396. slot_put(qd);
  397. qd_put(qd);
  398. }
  399. int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
  400. {
  401. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  402. struct gfs2_alloc *al = ip->i_alloc;
  403. struct gfs2_quota_data **qd = al->al_qd;
  404. int error;
  405. if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
  406. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  407. return -EIO;
  408. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  409. return 0;
  410. error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
  411. if (error)
  412. goto out;
  413. al->al_qd_num++;
  414. qd++;
  415. error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
  416. if (error)
  417. goto out;
  418. al->al_qd_num++;
  419. qd++;
  420. if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
  421. error = qdsb_get(sdp, QUOTA_USER, uid, qd);
  422. if (error)
  423. goto out;
  424. al->al_qd_num++;
  425. qd++;
  426. }
  427. if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
  428. error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
  429. if (error)
  430. goto out;
  431. al->al_qd_num++;
  432. qd++;
  433. }
  434. out:
  435. if (error)
  436. gfs2_quota_unhold(ip);
  437. return error;
  438. }
  439. void gfs2_quota_unhold(struct gfs2_inode *ip)
  440. {
  441. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  442. struct gfs2_alloc *al = ip->i_alloc;
  443. unsigned int x;
  444. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  445. for (x = 0; x < al->al_qd_num; x++) {
  446. qdsb_put(al->al_qd[x]);
  447. al->al_qd[x] = NULL;
  448. }
  449. al->al_qd_num = 0;
  450. }
  451. static int sort_qd(const void *a, const void *b)
  452. {
  453. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  454. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  455. if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
  456. !test_bit(QDF_USER, &qd_b->qd_flags)) {
  457. if (test_bit(QDF_USER, &qd_a->qd_flags))
  458. return -1;
  459. else
  460. return 1;
  461. }
  462. if (qd_a->qd_id < qd_b->qd_id)
  463. return -1;
  464. if (qd_a->qd_id > qd_b->qd_id)
  465. return 1;
  466. return 0;
  467. }
  468. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  469. {
  470. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  471. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  472. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  473. s64 x;
  474. mutex_lock(&sdp->sd_quota_mutex);
  475. gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
  476. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  477. qc->qc_change = 0;
  478. qc->qc_flags = 0;
  479. if (test_bit(QDF_USER, &qd->qd_flags))
  480. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  481. qc->qc_id = cpu_to_be32(qd->qd_id);
  482. }
  483. x = be64_to_cpu(qc->qc_change) + change;
  484. qc->qc_change = cpu_to_be64(x);
  485. spin_lock(&qd_lru_lock);
  486. qd->qd_change = x;
  487. spin_unlock(&qd_lru_lock);
  488. if (!x) {
  489. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  490. clear_bit(QDF_CHANGE, &qd->qd_flags);
  491. qc->qc_flags = 0;
  492. qc->qc_id = 0;
  493. slot_put(qd);
  494. qd_put(qd);
  495. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  496. qd_hold(qd);
  497. slot_hold(qd);
  498. }
  499. mutex_unlock(&sdp->sd_quota_mutex);
  500. }
  501. /**
  502. * gfs2_adjust_quota - adjust record of current block usage
  503. * @ip: The quota inode
  504. * @loc: Offset of the entry in the quota file
  505. * @change: The amount of change to record
  506. * @qd: The quota data
  507. *
  508. * This function was mostly borrowed from gfs2_block_truncate_page which was
  509. * in turn mostly borrowed from ext3
  510. *
  511. * Returns: 0 or -ve on error
  512. */
  513. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  514. s64 change, struct gfs2_quota_data *qd)
  515. {
  516. struct inode *inode = &ip->i_inode;
  517. struct address_space *mapping = inode->i_mapping;
  518. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  519. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  520. unsigned blocksize, iblock, pos;
  521. struct buffer_head *bh;
  522. struct page *page;
  523. void *kaddr;
  524. struct gfs2_quota *qp;
  525. s64 value;
  526. int err = -EIO;
  527. if (gfs2_is_stuffed(ip))
  528. gfs2_unstuff_dinode(ip, NULL);
  529. page = grab_cache_page(mapping, index);
  530. if (!page)
  531. return -ENOMEM;
  532. blocksize = inode->i_sb->s_blocksize;
  533. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  534. if (!page_has_buffers(page))
  535. create_empty_buffers(page, blocksize, 0);
  536. bh = page_buffers(page);
  537. pos = blocksize;
  538. while (offset >= pos) {
  539. bh = bh->b_this_page;
  540. iblock++;
  541. pos += blocksize;
  542. }
  543. if (!buffer_mapped(bh)) {
  544. gfs2_block_map(inode, iblock, bh, 1);
  545. if (!buffer_mapped(bh))
  546. goto unlock;
  547. }
  548. if (PageUptodate(page))
  549. set_buffer_uptodate(bh);
  550. if (!buffer_uptodate(bh)) {
  551. ll_rw_block(READ_META, 1, &bh);
  552. wait_on_buffer(bh);
  553. if (!buffer_uptodate(bh))
  554. goto unlock;
  555. }
  556. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  557. kaddr = kmap_atomic(page, KM_USER0);
  558. qp = kaddr + offset;
  559. value = (s64)be64_to_cpu(qp->qu_value) + change;
  560. qp->qu_value = cpu_to_be64(value);
  561. qd->qd_qb.qb_value = qp->qu_value;
  562. flush_dcache_page(page);
  563. kunmap_atomic(kaddr, KM_USER0);
  564. err = 0;
  565. unlock:
  566. unlock_page(page);
  567. page_cache_release(page);
  568. return err;
  569. }
  570. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  571. {
  572. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  573. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  574. unsigned int data_blocks, ind_blocks;
  575. struct gfs2_holder *ghs, i_gh;
  576. unsigned int qx, x;
  577. struct gfs2_quota_data *qd;
  578. loff_t offset;
  579. unsigned int nalloc = 0, blocks;
  580. struct gfs2_alloc *al = NULL;
  581. int error;
  582. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  583. &data_blocks, &ind_blocks);
  584. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  585. if (!ghs)
  586. return -ENOMEM;
  587. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  588. for (qx = 0; qx < num_qd; qx++) {
  589. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  590. GL_NOCACHE, &ghs[qx]);
  591. if (error)
  592. goto out;
  593. }
  594. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  595. if (error)
  596. goto out;
  597. for (x = 0; x < num_qd; x++) {
  598. int alloc_required;
  599. offset = qd2offset(qda[x]);
  600. error = gfs2_write_alloc_required(ip, offset,
  601. sizeof(struct gfs2_quota),
  602. &alloc_required);
  603. if (error)
  604. goto out_gunlock;
  605. if (alloc_required)
  606. nalloc++;
  607. }
  608. al = gfs2_alloc_get(ip);
  609. if (!al) {
  610. error = -ENOMEM;
  611. goto out_gunlock;
  612. }
  613. /*
  614. * 1 blk for unstuffing inode if stuffed. We add this extra
  615. * block to the reservation unconditionally. If the inode
  616. * doesn't need unstuffing, the block will be released to the
  617. * rgrp since it won't be allocated during the transaction
  618. */
  619. al->al_requested = 1;
  620. /* +1 in the end for block requested above for unstuffing */
  621. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
  622. if (nalloc)
  623. al->al_requested += nalloc * (data_blocks + ind_blocks);
  624. error = gfs2_inplace_reserve(ip);
  625. if (error)
  626. goto out_alloc;
  627. if (nalloc)
  628. blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
  629. error = gfs2_trans_begin(sdp, blocks, 0);
  630. if (error)
  631. goto out_ipres;
  632. for (x = 0; x < num_qd; x++) {
  633. qd = qda[x];
  634. offset = qd2offset(qd);
  635. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
  636. (struct gfs2_quota_data *)qd);
  637. if (error)
  638. goto out_end_trans;
  639. do_qc(qd, -qd->qd_change_sync);
  640. }
  641. error = 0;
  642. out_end_trans:
  643. gfs2_trans_end(sdp);
  644. out_ipres:
  645. gfs2_inplace_release(ip);
  646. out_alloc:
  647. gfs2_alloc_put(ip);
  648. out_gunlock:
  649. gfs2_glock_dq_uninit(&i_gh);
  650. out:
  651. while (qx--)
  652. gfs2_glock_dq_uninit(&ghs[qx]);
  653. kfree(ghs);
  654. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
  655. return error;
  656. }
  657. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  658. struct gfs2_holder *q_gh)
  659. {
  660. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  661. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  662. struct gfs2_holder i_gh;
  663. struct gfs2_quota q;
  664. int error;
  665. struct gfs2_quota_lvb *qlvb;
  666. restart:
  667. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  668. if (error)
  669. return error;
  670. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  671. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  672. loff_t pos;
  673. gfs2_glock_dq_uninit(q_gh);
  674. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  675. GL_NOCACHE, q_gh);
  676. if (error)
  677. return error;
  678. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  679. if (error)
  680. goto fail;
  681. memset(&q, 0, sizeof(struct gfs2_quota));
  682. pos = qd2offset(qd);
  683. error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
  684. if (error < 0)
  685. goto fail_gunlock;
  686. if ((error < sizeof(q)) && force_refresh) {
  687. error = -ENOENT;
  688. goto fail_gunlock;
  689. }
  690. gfs2_glock_dq_uninit(&i_gh);
  691. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  692. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  693. qlvb->__pad = 0;
  694. qlvb->qb_limit = q.qu_limit;
  695. qlvb->qb_warn = q.qu_warn;
  696. qlvb->qb_value = q.qu_value;
  697. qd->qd_qb = *qlvb;
  698. gfs2_glock_dq_uninit(q_gh);
  699. force_refresh = 0;
  700. goto restart;
  701. }
  702. return 0;
  703. fail_gunlock:
  704. gfs2_glock_dq_uninit(&i_gh);
  705. fail:
  706. gfs2_glock_dq_uninit(q_gh);
  707. return error;
  708. }
  709. int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
  710. {
  711. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  712. struct gfs2_alloc *al = ip->i_alloc;
  713. unsigned int x;
  714. int error = 0;
  715. gfs2_quota_hold(ip, uid, gid);
  716. if (capable(CAP_SYS_RESOURCE) ||
  717. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  718. return 0;
  719. sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
  720. sort_qd, NULL);
  721. for (x = 0; x < al->al_qd_num; x++) {
  722. error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
  723. if (error)
  724. break;
  725. }
  726. if (!error)
  727. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  728. else {
  729. while (x--)
  730. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  731. gfs2_quota_unhold(ip);
  732. }
  733. return error;
  734. }
  735. static int need_sync(struct gfs2_quota_data *qd)
  736. {
  737. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  738. struct gfs2_tune *gt = &sdp->sd_tune;
  739. s64 value;
  740. unsigned int num, den;
  741. int do_sync = 1;
  742. if (!qd->qd_qb.qb_limit)
  743. return 0;
  744. spin_lock(&qd_lru_lock);
  745. value = qd->qd_change;
  746. spin_unlock(&qd_lru_lock);
  747. spin_lock(&gt->gt_spin);
  748. num = gt->gt_quota_scale_num;
  749. den = gt->gt_quota_scale_den;
  750. spin_unlock(&gt->gt_spin);
  751. if (value < 0)
  752. do_sync = 0;
  753. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  754. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  755. do_sync = 0;
  756. else {
  757. value *= gfs2_jindex_size(sdp) * num;
  758. value = div_s64(value, den);
  759. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  760. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  761. do_sync = 0;
  762. }
  763. return do_sync;
  764. }
  765. void gfs2_quota_unlock(struct gfs2_inode *ip)
  766. {
  767. struct gfs2_alloc *al = ip->i_alloc;
  768. struct gfs2_quota_data *qda[4];
  769. unsigned int count = 0;
  770. unsigned int x;
  771. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  772. goto out;
  773. for (x = 0; x < al->al_qd_num; x++) {
  774. struct gfs2_quota_data *qd;
  775. int sync;
  776. qd = al->al_qd[x];
  777. sync = need_sync(qd);
  778. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  779. if (sync && qd_trylock(qd))
  780. qda[count++] = qd;
  781. }
  782. if (count) {
  783. do_sync(count, qda);
  784. for (x = 0; x < count; x++)
  785. qd_unlock(qda[x]);
  786. }
  787. out:
  788. gfs2_quota_unhold(ip);
  789. }
  790. #define MAX_LINE 256
  791. static int print_message(struct gfs2_quota_data *qd, char *type)
  792. {
  793. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  794. printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
  795. sdp->sd_fsname, type,
  796. (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
  797. qd->qd_id);
  798. return 0;
  799. }
  800. int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
  801. {
  802. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  803. struct gfs2_alloc *al = ip->i_alloc;
  804. struct gfs2_quota_data *qd;
  805. s64 value;
  806. unsigned int x;
  807. int error = 0;
  808. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  809. return 0;
  810. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  811. return 0;
  812. for (x = 0; x < al->al_qd_num; x++) {
  813. qd = al->al_qd[x];
  814. if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  815. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
  816. continue;
  817. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  818. spin_lock(&qd_lru_lock);
  819. value += qd->qd_change;
  820. spin_unlock(&qd_lru_lock);
  821. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  822. print_message(qd, "exceeded");
  823. error = -EDQUOT;
  824. break;
  825. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  826. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  827. time_after_eq(jiffies, qd->qd_last_warn +
  828. gfs2_tune_get(sdp,
  829. gt_quota_warn_period) * HZ)) {
  830. error = print_message(qd, "warning");
  831. qd->qd_last_warn = jiffies;
  832. }
  833. }
  834. return error;
  835. }
  836. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  837. u32 uid, u32 gid)
  838. {
  839. struct gfs2_alloc *al = ip->i_alloc;
  840. struct gfs2_quota_data *qd;
  841. unsigned int x;
  842. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  843. return;
  844. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  845. return;
  846. for (x = 0; x < al->al_qd_num; x++) {
  847. qd = al->al_qd[x];
  848. if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  849. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
  850. do_qc(qd, change);
  851. }
  852. }
  853. }
  854. int gfs2_quota_sync(struct super_block *sb, int type)
  855. {
  856. struct gfs2_sbd *sdp = sb->s_fs_info;
  857. struct gfs2_quota_data **qda;
  858. unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
  859. unsigned int num_qd;
  860. unsigned int x;
  861. int error = 0;
  862. sdp->sd_quota_sync_gen++;
  863. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  864. if (!qda)
  865. return -ENOMEM;
  866. do {
  867. num_qd = 0;
  868. for (;;) {
  869. error = qd_fish(sdp, qda + num_qd);
  870. if (error || !qda[num_qd])
  871. break;
  872. if (++num_qd == max_qd)
  873. break;
  874. }
  875. if (num_qd) {
  876. if (!error)
  877. error = do_sync(num_qd, qda);
  878. if (!error)
  879. for (x = 0; x < num_qd; x++)
  880. qda[x]->qd_sync_gen =
  881. sdp->sd_quota_sync_gen;
  882. for (x = 0; x < num_qd; x++)
  883. qd_unlock(qda[x]);
  884. }
  885. } while (!error && num_qd == max_qd);
  886. kfree(qda);
  887. return error;
  888. }
  889. int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
  890. {
  891. struct gfs2_quota_data *qd;
  892. struct gfs2_holder q_gh;
  893. int error;
  894. error = qd_get(sdp, user, id, &qd);
  895. if (error)
  896. return error;
  897. error = do_glock(qd, FORCE, &q_gh);
  898. if (!error)
  899. gfs2_glock_dq_uninit(&q_gh);
  900. qd_put(qd);
  901. return error;
  902. }
  903. static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
  904. {
  905. const struct gfs2_quota_change *str = buf;
  906. qc->qc_change = be64_to_cpu(str->qc_change);
  907. qc->qc_flags = be32_to_cpu(str->qc_flags);
  908. qc->qc_id = be32_to_cpu(str->qc_id);
  909. }
  910. int gfs2_quota_init(struct gfs2_sbd *sdp)
  911. {
  912. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  913. unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
  914. unsigned int x, slot = 0;
  915. unsigned int found = 0;
  916. u64 dblock;
  917. u32 extlen = 0;
  918. int error;
  919. if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
  920. ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
  921. gfs2_consist_inode(ip);
  922. return -EIO;
  923. }
  924. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  925. sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
  926. error = -ENOMEM;
  927. sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
  928. sizeof(unsigned char *), GFP_NOFS);
  929. if (!sdp->sd_quota_bitmap)
  930. return error;
  931. for (x = 0; x < sdp->sd_quota_chunks; x++) {
  932. sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
  933. if (!sdp->sd_quota_bitmap[x])
  934. goto fail;
  935. }
  936. for (x = 0; x < blocks; x++) {
  937. struct buffer_head *bh;
  938. unsigned int y;
  939. if (!extlen) {
  940. int new = 0;
  941. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  942. if (error)
  943. goto fail;
  944. }
  945. error = -EIO;
  946. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  947. if (!bh)
  948. goto fail;
  949. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  950. brelse(bh);
  951. goto fail;
  952. }
  953. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  954. y++, slot++) {
  955. struct gfs2_quota_change_host qc;
  956. struct gfs2_quota_data *qd;
  957. gfs2_quota_change_in(&qc, bh->b_data +
  958. sizeof(struct gfs2_meta_header) +
  959. y * sizeof(struct gfs2_quota_change));
  960. if (!qc.qc_change)
  961. continue;
  962. error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
  963. qc.qc_id, &qd);
  964. if (error) {
  965. brelse(bh);
  966. goto fail;
  967. }
  968. set_bit(QDF_CHANGE, &qd->qd_flags);
  969. qd->qd_change = qc.qc_change;
  970. qd->qd_slot = slot;
  971. qd->qd_slot_count = 1;
  972. spin_lock(&qd_lru_lock);
  973. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
  974. list_add(&qd->qd_list, &sdp->sd_quota_list);
  975. atomic_inc(&sdp->sd_quota_count);
  976. spin_unlock(&qd_lru_lock);
  977. found++;
  978. }
  979. brelse(bh);
  980. dblock++;
  981. extlen--;
  982. }
  983. if (found)
  984. fs_info(sdp, "found %u quota changes\n", found);
  985. return 0;
  986. fail:
  987. gfs2_quota_cleanup(sdp);
  988. return error;
  989. }
  990. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  991. {
  992. struct list_head *head = &sdp->sd_quota_list;
  993. struct gfs2_quota_data *qd;
  994. unsigned int x;
  995. spin_lock(&qd_lru_lock);
  996. while (!list_empty(head)) {
  997. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  998. if (atomic_read(&qd->qd_count) > 1 ||
  999. (atomic_read(&qd->qd_count) &&
  1000. !test_bit(QDF_CHANGE, &qd->qd_flags))) {
  1001. list_move(&qd->qd_list, head);
  1002. spin_unlock(&qd_lru_lock);
  1003. schedule();
  1004. spin_lock(&qd_lru_lock);
  1005. continue;
  1006. }
  1007. list_del(&qd->qd_list);
  1008. /* Also remove if this qd exists in the reclaim list */
  1009. if (!list_empty(&qd->qd_reclaim)) {
  1010. list_del_init(&qd->qd_reclaim);
  1011. atomic_dec(&qd_lru_count);
  1012. }
  1013. atomic_dec(&sdp->sd_quota_count);
  1014. spin_unlock(&qd_lru_lock);
  1015. if (!atomic_read(&qd->qd_count)) {
  1016. gfs2_assert_warn(sdp, !qd->qd_change);
  1017. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1018. } else
  1019. gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
  1020. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1021. gfs2_glock_put(qd->qd_gl);
  1022. kmem_cache_free(gfs2_quotad_cachep, qd);
  1023. spin_lock(&qd_lru_lock);
  1024. }
  1025. spin_unlock(&qd_lru_lock);
  1026. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1027. if (sdp->sd_quota_bitmap) {
  1028. for (x = 0; x < sdp->sd_quota_chunks; x++)
  1029. kfree(sdp->sd_quota_bitmap[x]);
  1030. kfree(sdp->sd_quota_bitmap);
  1031. }
  1032. }
  1033. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1034. {
  1035. if (error == 0 || error == -EROFS)
  1036. return;
  1037. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1038. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1039. }
  1040. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1041. int (*fxn)(struct super_block *sb, int type),
  1042. unsigned long t, unsigned long *timeo,
  1043. unsigned int *new_timeo)
  1044. {
  1045. if (t >= *timeo) {
  1046. int error = fxn(sdp->sd_vfs, 0);
  1047. quotad_error(sdp, msg, error);
  1048. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1049. } else {
  1050. *timeo -= t;
  1051. }
  1052. }
  1053. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1054. {
  1055. struct gfs2_inode *ip;
  1056. while(1) {
  1057. ip = NULL;
  1058. spin_lock(&sdp->sd_trunc_lock);
  1059. if (!list_empty(&sdp->sd_trunc_list)) {
  1060. ip = list_entry(sdp->sd_trunc_list.next,
  1061. struct gfs2_inode, i_trunc_list);
  1062. list_del_init(&ip->i_trunc_list);
  1063. }
  1064. spin_unlock(&sdp->sd_trunc_lock);
  1065. if (ip == NULL)
  1066. return;
  1067. gfs2_glock_finish_truncate(ip);
  1068. }
  1069. }
  1070. /**
  1071. * gfs2_quotad - Write cached quota changes into the quota file
  1072. * @sdp: Pointer to GFS2 superblock
  1073. *
  1074. */
  1075. int gfs2_quotad(void *data)
  1076. {
  1077. struct gfs2_sbd *sdp = data;
  1078. struct gfs2_tune *tune = &sdp->sd_tune;
  1079. unsigned long statfs_timeo = 0;
  1080. unsigned long quotad_timeo = 0;
  1081. unsigned long t = 0;
  1082. DEFINE_WAIT(wait);
  1083. int empty;
  1084. while (!kthread_should_stop()) {
  1085. /* Update the master statfs file */
  1086. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1087. &statfs_timeo, &tune->gt_statfs_quantum);
  1088. /* Update quota file */
  1089. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1090. &quotad_timeo, &tune->gt_quota_quantum);
  1091. /* Check for & recover partially truncated inodes */
  1092. quotad_check_trunc_list(sdp);
  1093. if (freezing(current))
  1094. refrigerator();
  1095. t = min(quotad_timeo, statfs_timeo);
  1096. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1097. spin_lock(&sdp->sd_trunc_lock);
  1098. empty = list_empty(&sdp->sd_trunc_list);
  1099. spin_unlock(&sdp->sd_trunc_lock);
  1100. if (empty)
  1101. t -= schedule_timeout(t);
  1102. else
  1103. t = 0;
  1104. finish_wait(&sdp->sd_quota_wait, &wait);
  1105. }
  1106. return 0;
  1107. }
  1108. static int gfs2_quota_get_xstate(struct super_block *sb,
  1109. struct fs_quota_stat *fqs)
  1110. {
  1111. struct gfs2_sbd *sdp = sb->s_fs_info;
  1112. memset(fqs, 0, sizeof(struct fs_quota_stat));
  1113. fqs->qs_version = FS_QSTAT_VERSION;
  1114. if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
  1115. fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
  1116. else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
  1117. fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
  1118. if (sdp->sd_quota_inode) {
  1119. fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1120. fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
  1121. }
  1122. fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
  1123. fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
  1124. fqs->qs_incoredqs = atomic_read(&qd_lru_count);
  1125. return 0;
  1126. }
  1127. const struct quotactl_ops gfs2_quotactl_ops = {
  1128. .quota_sync = gfs2_quota_sync,
  1129. .get_xstate = gfs2_quota_get_xstate,
  1130. };