quota.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #include <linux/sched.h>
  38. #include <linux/slab.h>
  39. #include <linux/mm.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/completion.h>
  42. #include <linux/buffer_head.h>
  43. #include <linux/sort.h>
  44. #include <linux/fs.h>
  45. #include <linux/bio.h>
  46. #include <linux/gfs2_ondisk.h>
  47. #include <linux/kthread.h>
  48. #include <linux/freezer.h>
  49. #include <linux/quota.h>
  50. #include <linux/dqblk_xfs.h>
  51. #include "gfs2.h"
  52. #include "incore.h"
  53. #include "bmap.h"
  54. #include "glock.h"
  55. #include "glops.h"
  56. #include "log.h"
  57. #include "meta_io.h"
  58. #include "quota.h"
  59. #include "rgrp.h"
  60. #include "super.h"
  61. #include "trans.h"
  62. #include "inode.h"
  63. #include "util.h"
  64. struct gfs2_quota_change_host {
  65. u64 qc_change;
  66. u32 qc_flags; /* GFS2_QCF_... */
  67. struct kqid qc_id;
  68. };
  69. static LIST_HEAD(qd_lru_list);
  70. static atomic_t qd_lru_count = ATOMIC_INIT(0);
  71. static DEFINE_SPINLOCK(qd_lru_lock);
  72. unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  73. struct shrink_control *sc)
  74. {
  75. struct gfs2_quota_data *qd;
  76. struct gfs2_sbd *sdp;
  77. int nr_to_scan = sc->nr_to_scan;
  78. long freed = 0;
  79. if (!(sc->gfp_mask & __GFP_FS))
  80. return SHRINK_STOP;
  81. spin_lock(&qd_lru_lock);
  82. while (nr_to_scan && !list_empty(&qd_lru_list)) {
  83. qd = list_entry(qd_lru_list.next,
  84. struct gfs2_quota_data, qd_reclaim);
  85. sdp = qd->qd_gl->gl_sbd;
  86. /* Free from the filesystem-specific list */
  87. list_del(&qd->qd_list);
  88. gfs2_assert_warn(sdp, !qd->qd_change);
  89. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  90. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  91. gfs2_glock_put(qd->qd_gl);
  92. atomic_dec(&sdp->sd_quota_count);
  93. /* Delete it from the common reclaim list */
  94. list_del_init(&qd->qd_reclaim);
  95. atomic_dec(&qd_lru_count);
  96. spin_unlock(&qd_lru_lock);
  97. kmem_cache_free(gfs2_quotad_cachep, qd);
  98. spin_lock(&qd_lru_lock);
  99. nr_to_scan--;
  100. freed++;
  101. }
  102. spin_unlock(&qd_lru_lock);
  103. return freed;
  104. }
  105. unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  106. struct shrink_control *sc)
  107. {
  108. return vfs_pressure_ratio(atomic_read(&qd_lru_count));
  109. }
  110. static u64 qd2index(struct gfs2_quota_data *qd)
  111. {
  112. struct kqid qid = qd->qd_id;
  113. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  114. ((qid.type == USRQUOTA) ? 0 : 1);
  115. }
  116. static u64 qd2offset(struct gfs2_quota_data *qd)
  117. {
  118. u64 offset;
  119. offset = qd2index(qd);
  120. offset *= sizeof(struct gfs2_quota);
  121. return offset;
  122. }
  123. static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
  124. struct gfs2_quota_data **qdp)
  125. {
  126. struct gfs2_quota_data *qd;
  127. int error;
  128. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  129. if (!qd)
  130. return -ENOMEM;
  131. atomic_set(&qd->qd_count, 1);
  132. qd->qd_id = qid;
  133. qd->qd_slot = -1;
  134. INIT_LIST_HEAD(&qd->qd_reclaim);
  135. error = gfs2_glock_get(sdp, qd2index(qd),
  136. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  137. if (error)
  138. goto fail;
  139. *qdp = qd;
  140. return 0;
  141. fail:
  142. kmem_cache_free(gfs2_quotad_cachep, qd);
  143. return error;
  144. }
  145. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  146. struct gfs2_quota_data **qdp)
  147. {
  148. struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
  149. int error, found;
  150. *qdp = NULL;
  151. for (;;) {
  152. found = 0;
  153. spin_lock(&qd_lru_lock);
  154. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  155. if (qid_eq(qd->qd_id, qid)) {
  156. if (!atomic_read(&qd->qd_count) &&
  157. !list_empty(&qd->qd_reclaim)) {
  158. /* Remove it from reclaim list */
  159. list_del_init(&qd->qd_reclaim);
  160. atomic_dec(&qd_lru_count);
  161. }
  162. atomic_inc(&qd->qd_count);
  163. found = 1;
  164. break;
  165. }
  166. }
  167. if (!found)
  168. qd = NULL;
  169. if (!qd && new_qd) {
  170. qd = new_qd;
  171. list_add(&qd->qd_list, &sdp->sd_quota_list);
  172. atomic_inc(&sdp->sd_quota_count);
  173. new_qd = NULL;
  174. }
  175. spin_unlock(&qd_lru_lock);
  176. if (qd) {
  177. if (new_qd) {
  178. gfs2_glock_put(new_qd->qd_gl);
  179. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  180. }
  181. *qdp = qd;
  182. return 0;
  183. }
  184. error = qd_alloc(sdp, qid, &new_qd);
  185. if (error)
  186. return error;
  187. }
  188. }
  189. static void qd_hold(struct gfs2_quota_data *qd)
  190. {
  191. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  192. gfs2_assert(sdp, atomic_read(&qd->qd_count));
  193. atomic_inc(&qd->qd_count);
  194. }
  195. static void qd_put(struct gfs2_quota_data *qd)
  196. {
  197. if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
  198. /* Add to the reclaim list */
  199. list_add_tail(&qd->qd_reclaim, &qd_lru_list);
  200. atomic_inc(&qd_lru_count);
  201. spin_unlock(&qd_lru_lock);
  202. }
  203. }
  204. static int slot_get(struct gfs2_quota_data *qd)
  205. {
  206. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  207. unsigned int c, o = 0, b;
  208. unsigned char byte = 0;
  209. spin_lock(&qd_lru_lock);
  210. if (qd->qd_slot_count++) {
  211. spin_unlock(&qd_lru_lock);
  212. return 0;
  213. }
  214. for (c = 0; c < sdp->sd_quota_chunks; c++)
  215. for (o = 0; o < PAGE_SIZE; o++) {
  216. byte = sdp->sd_quota_bitmap[c][o];
  217. if (byte != 0xFF)
  218. goto found;
  219. }
  220. goto fail;
  221. found:
  222. for (b = 0; b < 8; b++)
  223. if (!(byte & (1 << b)))
  224. break;
  225. qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
  226. if (qd->qd_slot >= sdp->sd_quota_slots)
  227. goto fail;
  228. sdp->sd_quota_bitmap[c][o] |= 1 << b;
  229. spin_unlock(&qd_lru_lock);
  230. return 0;
  231. fail:
  232. qd->qd_slot_count--;
  233. spin_unlock(&qd_lru_lock);
  234. return -ENOSPC;
  235. }
  236. static void slot_hold(struct gfs2_quota_data *qd)
  237. {
  238. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  239. spin_lock(&qd_lru_lock);
  240. gfs2_assert(sdp, qd->qd_slot_count);
  241. qd->qd_slot_count++;
  242. spin_unlock(&qd_lru_lock);
  243. }
  244. static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
  245. unsigned int bit, int new_value)
  246. {
  247. unsigned int c, o, b = bit;
  248. int old_value;
  249. c = b / (8 * PAGE_SIZE);
  250. b %= 8 * PAGE_SIZE;
  251. o = b / 8;
  252. b %= 8;
  253. old_value = (bitmap[c][o] & (1 << b));
  254. gfs2_assert_withdraw(sdp, !old_value != !new_value);
  255. if (new_value)
  256. bitmap[c][o] |= 1 << b;
  257. else
  258. bitmap[c][o] &= ~(1 << b);
  259. }
  260. static void slot_put(struct gfs2_quota_data *qd)
  261. {
  262. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  263. spin_lock(&qd_lru_lock);
  264. gfs2_assert(sdp, qd->qd_slot_count);
  265. if (!--qd->qd_slot_count) {
  266. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
  267. qd->qd_slot = -1;
  268. }
  269. spin_unlock(&qd_lru_lock);
  270. }
  271. static int bh_get(struct gfs2_quota_data *qd)
  272. {
  273. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  274. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  275. unsigned int block, offset;
  276. struct buffer_head *bh;
  277. int error;
  278. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  279. mutex_lock(&sdp->sd_quota_mutex);
  280. if (qd->qd_bh_count++) {
  281. mutex_unlock(&sdp->sd_quota_mutex);
  282. return 0;
  283. }
  284. block = qd->qd_slot / sdp->sd_qc_per_block;
  285. offset = qd->qd_slot % sdp->sd_qc_per_block;
  286. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  287. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  288. if (error)
  289. goto fail;
  290. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  291. if (error)
  292. goto fail;
  293. error = -EIO;
  294. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  295. goto fail_brelse;
  296. qd->qd_bh = bh;
  297. qd->qd_bh_qc = (struct gfs2_quota_change *)
  298. (bh->b_data + sizeof(struct gfs2_meta_header) +
  299. offset * sizeof(struct gfs2_quota_change));
  300. mutex_unlock(&sdp->sd_quota_mutex);
  301. return 0;
  302. fail_brelse:
  303. brelse(bh);
  304. fail:
  305. qd->qd_bh_count--;
  306. mutex_unlock(&sdp->sd_quota_mutex);
  307. return error;
  308. }
  309. static void bh_put(struct gfs2_quota_data *qd)
  310. {
  311. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  312. mutex_lock(&sdp->sd_quota_mutex);
  313. gfs2_assert(sdp, qd->qd_bh_count);
  314. if (!--qd->qd_bh_count) {
  315. brelse(qd->qd_bh);
  316. qd->qd_bh = NULL;
  317. qd->qd_bh_qc = NULL;
  318. }
  319. mutex_unlock(&sdp->sd_quota_mutex);
  320. }
  321. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  322. {
  323. struct gfs2_quota_data *qd = NULL;
  324. int error;
  325. int found = 0;
  326. *qdp = NULL;
  327. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  328. return 0;
  329. spin_lock(&qd_lru_lock);
  330. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  331. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  332. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  333. qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
  334. continue;
  335. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  336. set_bit(QDF_LOCKED, &qd->qd_flags);
  337. gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
  338. atomic_inc(&qd->qd_count);
  339. qd->qd_change_sync = qd->qd_change;
  340. gfs2_assert_warn(sdp, qd->qd_slot_count);
  341. qd->qd_slot_count++;
  342. found = 1;
  343. break;
  344. }
  345. if (!found)
  346. qd = NULL;
  347. spin_unlock(&qd_lru_lock);
  348. if (qd) {
  349. gfs2_assert_warn(sdp, qd->qd_change_sync);
  350. error = bh_get(qd);
  351. if (error) {
  352. clear_bit(QDF_LOCKED, &qd->qd_flags);
  353. slot_put(qd);
  354. qd_put(qd);
  355. return error;
  356. }
  357. }
  358. *qdp = qd;
  359. return 0;
  360. }
  361. static int qd_trylock(struct gfs2_quota_data *qd)
  362. {
  363. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  364. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  365. return 0;
  366. spin_lock(&qd_lru_lock);
  367. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  368. !test_bit(QDF_CHANGE, &qd->qd_flags)) {
  369. spin_unlock(&qd_lru_lock);
  370. return 0;
  371. }
  372. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  373. set_bit(QDF_LOCKED, &qd->qd_flags);
  374. gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
  375. atomic_inc(&qd->qd_count);
  376. qd->qd_change_sync = qd->qd_change;
  377. gfs2_assert_warn(sdp, qd->qd_slot_count);
  378. qd->qd_slot_count++;
  379. spin_unlock(&qd_lru_lock);
  380. gfs2_assert_warn(sdp, qd->qd_change_sync);
  381. if (bh_get(qd)) {
  382. clear_bit(QDF_LOCKED, &qd->qd_flags);
  383. slot_put(qd);
  384. qd_put(qd);
  385. return 0;
  386. }
  387. return 1;
  388. }
  389. static void qd_unlock(struct gfs2_quota_data *qd)
  390. {
  391. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  392. test_bit(QDF_LOCKED, &qd->qd_flags));
  393. clear_bit(QDF_LOCKED, &qd->qd_flags);
  394. bh_put(qd);
  395. slot_put(qd);
  396. qd_put(qd);
  397. }
  398. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  399. struct gfs2_quota_data **qdp)
  400. {
  401. int error;
  402. error = qd_get(sdp, qid, qdp);
  403. if (error)
  404. return error;
  405. error = slot_get(*qdp);
  406. if (error)
  407. goto fail;
  408. error = bh_get(*qdp);
  409. if (error)
  410. goto fail_slot;
  411. return 0;
  412. fail_slot:
  413. slot_put(*qdp);
  414. fail:
  415. qd_put(*qdp);
  416. return error;
  417. }
  418. static void qdsb_put(struct gfs2_quota_data *qd)
  419. {
  420. bh_put(qd);
  421. slot_put(qd);
  422. qd_put(qd);
  423. }
  424. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  425. {
  426. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  427. struct gfs2_quota_data **qd;
  428. int error;
  429. if (ip->i_res == NULL) {
  430. error = gfs2_rs_alloc(ip);
  431. if (error)
  432. return error;
  433. }
  434. qd = ip->i_res->rs_qa_qd;
  435. if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
  436. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  437. return -EIO;
  438. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  439. return 0;
  440. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  441. if (error)
  442. goto out;
  443. ip->i_res->rs_qa_qd_num++;
  444. qd++;
  445. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  446. if (error)
  447. goto out;
  448. ip->i_res->rs_qa_qd_num++;
  449. qd++;
  450. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  451. !uid_eq(uid, ip->i_inode.i_uid)) {
  452. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  453. if (error)
  454. goto out;
  455. ip->i_res->rs_qa_qd_num++;
  456. qd++;
  457. }
  458. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  459. !gid_eq(gid, ip->i_inode.i_gid)) {
  460. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  461. if (error)
  462. goto out;
  463. ip->i_res->rs_qa_qd_num++;
  464. qd++;
  465. }
  466. out:
  467. if (error)
  468. gfs2_quota_unhold(ip);
  469. return error;
  470. }
  471. void gfs2_quota_unhold(struct gfs2_inode *ip)
  472. {
  473. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  474. unsigned int x;
  475. if (ip->i_res == NULL)
  476. return;
  477. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  478. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  479. qdsb_put(ip->i_res->rs_qa_qd[x]);
  480. ip->i_res->rs_qa_qd[x] = NULL;
  481. }
  482. ip->i_res->rs_qa_qd_num = 0;
  483. }
  484. static int sort_qd(const void *a, const void *b)
  485. {
  486. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  487. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  488. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  489. return -1;
  490. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  491. return 1;
  492. return 0;
  493. }
  494. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  495. {
  496. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  497. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  498. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  499. s64 x;
  500. mutex_lock(&sdp->sd_quota_mutex);
  501. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  502. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  503. qc->qc_change = 0;
  504. qc->qc_flags = 0;
  505. if (qd->qd_id.type == USRQUOTA)
  506. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  507. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  508. }
  509. x = be64_to_cpu(qc->qc_change) + change;
  510. qc->qc_change = cpu_to_be64(x);
  511. spin_lock(&qd_lru_lock);
  512. qd->qd_change = x;
  513. spin_unlock(&qd_lru_lock);
  514. if (!x) {
  515. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  516. clear_bit(QDF_CHANGE, &qd->qd_flags);
  517. qc->qc_flags = 0;
  518. qc->qc_id = 0;
  519. slot_put(qd);
  520. qd_put(qd);
  521. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  522. qd_hold(qd);
  523. slot_hold(qd);
  524. }
  525. mutex_unlock(&sdp->sd_quota_mutex);
  526. }
  527. /**
  528. * gfs2_adjust_quota - adjust record of current block usage
  529. * @ip: The quota inode
  530. * @loc: Offset of the entry in the quota file
  531. * @change: The amount of usage change to record
  532. * @qd: The quota data
  533. * @fdq: The updated limits to record
  534. *
  535. * This function was mostly borrowed from gfs2_block_truncate_page which was
  536. * in turn mostly borrowed from ext3
  537. *
  538. * Returns: 0 or -ve on error
  539. */
  540. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  541. s64 change, struct gfs2_quota_data *qd,
  542. struct fs_disk_quota *fdq)
  543. {
  544. struct inode *inode = &ip->i_inode;
  545. struct gfs2_sbd *sdp = GFS2_SB(inode);
  546. struct address_space *mapping = inode->i_mapping;
  547. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  548. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  549. unsigned blocksize, iblock, pos;
  550. struct buffer_head *bh;
  551. struct page *page;
  552. void *kaddr, *ptr;
  553. struct gfs2_quota q, *qp;
  554. int err, nbytes;
  555. u64 size;
  556. if (gfs2_is_stuffed(ip)) {
  557. err = gfs2_unstuff_dinode(ip, NULL);
  558. if (err)
  559. return err;
  560. }
  561. memset(&q, 0, sizeof(struct gfs2_quota));
  562. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  563. if (err < 0)
  564. return err;
  565. err = -EIO;
  566. qp = &q;
  567. qp->qu_value = be64_to_cpu(qp->qu_value);
  568. qp->qu_value += change;
  569. qp->qu_value = cpu_to_be64(qp->qu_value);
  570. qd->qd_qb.qb_value = qp->qu_value;
  571. if (fdq) {
  572. if (fdq->d_fieldmask & FS_DQ_BSOFT) {
  573. qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
  574. qd->qd_qb.qb_warn = qp->qu_warn;
  575. }
  576. if (fdq->d_fieldmask & FS_DQ_BHARD) {
  577. qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
  578. qd->qd_qb.qb_limit = qp->qu_limit;
  579. }
  580. if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
  581. qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
  582. qd->qd_qb.qb_value = qp->qu_value;
  583. }
  584. }
  585. /* Write the quota into the quota file on disk */
  586. ptr = qp;
  587. nbytes = sizeof(struct gfs2_quota);
  588. get_a_page:
  589. page = find_or_create_page(mapping, index, GFP_NOFS);
  590. if (!page)
  591. return -ENOMEM;
  592. blocksize = inode->i_sb->s_blocksize;
  593. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  594. if (!page_has_buffers(page))
  595. create_empty_buffers(page, blocksize, 0);
  596. bh = page_buffers(page);
  597. pos = blocksize;
  598. while (offset >= pos) {
  599. bh = bh->b_this_page;
  600. iblock++;
  601. pos += blocksize;
  602. }
  603. if (!buffer_mapped(bh)) {
  604. gfs2_block_map(inode, iblock, bh, 1);
  605. if (!buffer_mapped(bh))
  606. goto unlock_out;
  607. /* If it's a newly allocated disk block for quota, zero it */
  608. if (buffer_new(bh))
  609. zero_user(page, pos - blocksize, bh->b_size);
  610. }
  611. if (PageUptodate(page))
  612. set_buffer_uptodate(bh);
  613. if (!buffer_uptodate(bh)) {
  614. ll_rw_block(READ | REQ_META, 1, &bh);
  615. wait_on_buffer(bh);
  616. if (!buffer_uptodate(bh))
  617. goto unlock_out;
  618. }
  619. gfs2_trans_add_data(ip->i_gl, bh);
  620. kaddr = kmap_atomic(page);
  621. if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
  622. nbytes = PAGE_CACHE_SIZE - offset;
  623. memcpy(kaddr + offset, ptr, nbytes);
  624. flush_dcache_page(page);
  625. kunmap_atomic(kaddr);
  626. unlock_page(page);
  627. page_cache_release(page);
  628. /* If quota straddles page boundary, we need to update the rest of the
  629. * quota at the beginning of the next page */
  630. if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
  631. ptr = ptr + nbytes;
  632. nbytes = sizeof(struct gfs2_quota) - nbytes;
  633. offset = 0;
  634. index++;
  635. goto get_a_page;
  636. }
  637. size = loc + sizeof(struct gfs2_quota);
  638. if (size > inode->i_size)
  639. i_size_write(inode, size);
  640. inode->i_mtime = inode->i_atime = CURRENT_TIME;
  641. mark_inode_dirty(inode);
  642. return 0;
  643. unlock_out:
  644. unlock_page(page);
  645. page_cache_release(page);
  646. return err;
  647. }
  648. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  649. {
  650. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  651. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  652. struct gfs2_alloc_parms ap = { .aflags = 0, };
  653. unsigned int data_blocks, ind_blocks;
  654. struct gfs2_holder *ghs, i_gh;
  655. unsigned int qx, x;
  656. struct gfs2_quota_data *qd;
  657. unsigned reserved;
  658. loff_t offset;
  659. unsigned int nalloc = 0, blocks;
  660. int error;
  661. error = gfs2_rs_alloc(ip);
  662. if (error)
  663. return error;
  664. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  665. &data_blocks, &ind_blocks);
  666. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  667. if (!ghs)
  668. return -ENOMEM;
  669. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  670. mutex_lock(&ip->i_inode.i_mutex);
  671. for (qx = 0; qx < num_qd; qx++) {
  672. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  673. GL_NOCACHE, &ghs[qx]);
  674. if (error)
  675. goto out;
  676. }
  677. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  678. if (error)
  679. goto out;
  680. for (x = 0; x < num_qd; x++) {
  681. offset = qd2offset(qda[x]);
  682. if (gfs2_write_alloc_required(ip, offset,
  683. sizeof(struct gfs2_quota)))
  684. nalloc++;
  685. }
  686. /*
  687. * 1 blk for unstuffing inode if stuffed. We add this extra
  688. * block to the reservation unconditionally. If the inode
  689. * doesn't need unstuffing, the block will be released to the
  690. * rgrp since it won't be allocated during the transaction
  691. */
  692. /* +3 in the end for unstuffing block, inode size update block
  693. * and another block in case quota straddles page boundary and
  694. * two blocks need to be updated instead of 1 */
  695. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  696. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  697. ap.target = reserved;
  698. error = gfs2_inplace_reserve(ip, &ap);
  699. if (error)
  700. goto out_alloc;
  701. if (nalloc)
  702. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  703. error = gfs2_trans_begin(sdp, blocks, 0);
  704. if (error)
  705. goto out_ipres;
  706. for (x = 0; x < num_qd; x++) {
  707. qd = qda[x];
  708. offset = qd2offset(qd);
  709. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  710. if (error)
  711. goto out_end_trans;
  712. do_qc(qd, -qd->qd_change_sync);
  713. set_bit(QDF_REFRESH, &qd->qd_flags);
  714. }
  715. error = 0;
  716. out_end_trans:
  717. gfs2_trans_end(sdp);
  718. out_ipres:
  719. gfs2_inplace_release(ip);
  720. out_alloc:
  721. gfs2_glock_dq_uninit(&i_gh);
  722. out:
  723. while (qx--)
  724. gfs2_glock_dq_uninit(&ghs[qx]);
  725. mutex_unlock(&ip->i_inode.i_mutex);
  726. kfree(ghs);
  727. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
  728. return error;
  729. }
  730. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  731. {
  732. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  733. struct gfs2_quota q;
  734. struct gfs2_quota_lvb *qlvb;
  735. loff_t pos;
  736. int error;
  737. memset(&q, 0, sizeof(struct gfs2_quota));
  738. pos = qd2offset(qd);
  739. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  740. if (error < 0)
  741. return error;
  742. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  743. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  744. qlvb->__pad = 0;
  745. qlvb->qb_limit = q.qu_limit;
  746. qlvb->qb_warn = q.qu_warn;
  747. qlvb->qb_value = q.qu_value;
  748. qd->qd_qb = *qlvb;
  749. return 0;
  750. }
  751. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  752. struct gfs2_holder *q_gh)
  753. {
  754. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  755. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  756. struct gfs2_holder i_gh;
  757. int error;
  758. restart:
  759. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  760. if (error)
  761. return error;
  762. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  763. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  764. gfs2_glock_dq_uninit(q_gh);
  765. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  766. GL_NOCACHE, q_gh);
  767. if (error)
  768. return error;
  769. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  770. if (error)
  771. goto fail;
  772. error = update_qd(sdp, qd);
  773. if (error)
  774. goto fail_gunlock;
  775. gfs2_glock_dq_uninit(&i_gh);
  776. gfs2_glock_dq_uninit(q_gh);
  777. force_refresh = 0;
  778. goto restart;
  779. }
  780. return 0;
  781. fail_gunlock:
  782. gfs2_glock_dq_uninit(&i_gh);
  783. fail:
  784. gfs2_glock_dq_uninit(q_gh);
  785. return error;
  786. }
  787. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  788. {
  789. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  790. struct gfs2_quota_data *qd;
  791. unsigned int x;
  792. int error = 0;
  793. error = gfs2_quota_hold(ip, uid, gid);
  794. if (error)
  795. return error;
  796. if (capable(CAP_SYS_RESOURCE) ||
  797. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  798. return 0;
  799. sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
  800. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  801. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  802. int force = NO_FORCE;
  803. qd = ip->i_res->rs_qa_qd[x];
  804. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  805. force = FORCE;
  806. error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
  807. if (error)
  808. break;
  809. }
  810. if (!error)
  811. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  812. else {
  813. while (x--)
  814. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  815. gfs2_quota_unhold(ip);
  816. }
  817. return error;
  818. }
  819. static int need_sync(struct gfs2_quota_data *qd)
  820. {
  821. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  822. struct gfs2_tune *gt = &sdp->sd_tune;
  823. s64 value;
  824. unsigned int num, den;
  825. int do_sync = 1;
  826. if (!qd->qd_qb.qb_limit)
  827. return 0;
  828. spin_lock(&qd_lru_lock);
  829. value = qd->qd_change;
  830. spin_unlock(&qd_lru_lock);
  831. spin_lock(&gt->gt_spin);
  832. num = gt->gt_quota_scale_num;
  833. den = gt->gt_quota_scale_den;
  834. spin_unlock(&gt->gt_spin);
  835. if (value < 0)
  836. do_sync = 0;
  837. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  838. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  839. do_sync = 0;
  840. else {
  841. value *= gfs2_jindex_size(sdp) * num;
  842. value = div_s64(value, den);
  843. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  844. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  845. do_sync = 0;
  846. }
  847. return do_sync;
  848. }
  849. void gfs2_quota_unlock(struct gfs2_inode *ip)
  850. {
  851. struct gfs2_quota_data *qda[4];
  852. unsigned int count = 0;
  853. unsigned int x;
  854. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  855. goto out;
  856. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  857. struct gfs2_quota_data *qd;
  858. int sync;
  859. qd = ip->i_res->rs_qa_qd[x];
  860. sync = need_sync(qd);
  861. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  862. if (sync && qd_trylock(qd))
  863. qda[count++] = qd;
  864. }
  865. if (count) {
  866. do_sync(count, qda);
  867. for (x = 0; x < count; x++)
  868. qd_unlock(qda[x]);
  869. }
  870. out:
  871. gfs2_quota_unhold(ip);
  872. }
  873. #define MAX_LINE 256
  874. static int print_message(struct gfs2_quota_data *qd, char *type)
  875. {
  876. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  877. printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
  878. sdp->sd_fsname, type,
  879. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  880. from_kqid(&init_user_ns, qd->qd_id));
  881. return 0;
  882. }
  883. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  884. {
  885. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  886. struct gfs2_quota_data *qd;
  887. s64 value;
  888. unsigned int x;
  889. int error = 0;
  890. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  891. return 0;
  892. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  893. return 0;
  894. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  895. qd = ip->i_res->rs_qa_qd[x];
  896. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  897. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  898. continue;
  899. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  900. spin_lock(&qd_lru_lock);
  901. value += qd->qd_change;
  902. spin_unlock(&qd_lru_lock);
  903. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  904. print_message(qd, "exceeded");
  905. quota_send_warning(qd->qd_id,
  906. sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
  907. error = -EDQUOT;
  908. break;
  909. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  910. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  911. time_after_eq(jiffies, qd->qd_last_warn +
  912. gfs2_tune_get(sdp,
  913. gt_quota_warn_period) * HZ)) {
  914. quota_send_warning(qd->qd_id,
  915. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  916. error = print_message(qd, "warning");
  917. qd->qd_last_warn = jiffies;
  918. }
  919. }
  920. return error;
  921. }
  922. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  923. kuid_t uid, kgid_t gid)
  924. {
  925. struct gfs2_quota_data *qd;
  926. unsigned int x;
  927. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  928. return;
  929. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  930. return;
  931. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  932. qd = ip->i_res->rs_qa_qd[x];
  933. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  934. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  935. do_qc(qd, change);
  936. }
  937. }
  938. }
  939. int gfs2_quota_sync(struct super_block *sb, int type)
  940. {
  941. struct gfs2_sbd *sdp = sb->s_fs_info;
  942. struct gfs2_quota_data **qda;
  943. unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
  944. unsigned int num_qd;
  945. unsigned int x;
  946. int error = 0;
  947. sdp->sd_quota_sync_gen++;
  948. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  949. if (!qda)
  950. return -ENOMEM;
  951. do {
  952. num_qd = 0;
  953. for (;;) {
  954. error = qd_fish(sdp, qda + num_qd);
  955. if (error || !qda[num_qd])
  956. break;
  957. if (++num_qd == max_qd)
  958. break;
  959. }
  960. if (num_qd) {
  961. if (!error)
  962. error = do_sync(num_qd, qda);
  963. if (!error)
  964. for (x = 0; x < num_qd; x++)
  965. qda[x]->qd_sync_gen =
  966. sdp->sd_quota_sync_gen;
  967. for (x = 0; x < num_qd; x++)
  968. qd_unlock(qda[x]);
  969. }
  970. } while (!error && num_qd == max_qd);
  971. kfree(qda);
  972. return error;
  973. }
  974. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  975. {
  976. struct gfs2_quota_data *qd;
  977. struct gfs2_holder q_gh;
  978. int error;
  979. error = qd_get(sdp, qid, &qd);
  980. if (error)
  981. return error;
  982. error = do_glock(qd, FORCE, &q_gh);
  983. if (!error)
  984. gfs2_glock_dq_uninit(&q_gh);
  985. qd_put(qd);
  986. return error;
  987. }
  988. static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
  989. {
  990. const struct gfs2_quota_change *str = buf;
  991. qc->qc_change = be64_to_cpu(str->qc_change);
  992. qc->qc_flags = be32_to_cpu(str->qc_flags);
  993. qc->qc_id = make_kqid(&init_user_ns,
  994. (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
  995. be32_to_cpu(str->qc_id));
  996. }
  997. int gfs2_quota_init(struct gfs2_sbd *sdp)
  998. {
  999. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1000. u64 size = i_size_read(sdp->sd_qc_inode);
  1001. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1002. unsigned int x, slot = 0;
  1003. unsigned int found = 0;
  1004. u64 dblock;
  1005. u32 extlen = 0;
  1006. int error;
  1007. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1008. return -EIO;
  1009. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1010. sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
  1011. error = -ENOMEM;
  1012. sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
  1013. sizeof(unsigned char *), GFP_NOFS);
  1014. if (!sdp->sd_quota_bitmap)
  1015. return error;
  1016. for (x = 0; x < sdp->sd_quota_chunks; x++) {
  1017. sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
  1018. if (!sdp->sd_quota_bitmap[x])
  1019. goto fail;
  1020. }
  1021. for (x = 0; x < blocks; x++) {
  1022. struct buffer_head *bh;
  1023. unsigned int y;
  1024. if (!extlen) {
  1025. int new = 0;
  1026. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1027. if (error)
  1028. goto fail;
  1029. }
  1030. error = -EIO;
  1031. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1032. if (!bh)
  1033. goto fail;
  1034. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1035. brelse(bh);
  1036. goto fail;
  1037. }
  1038. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1039. y++, slot++) {
  1040. struct gfs2_quota_change_host qc;
  1041. struct gfs2_quota_data *qd;
  1042. gfs2_quota_change_in(&qc, bh->b_data +
  1043. sizeof(struct gfs2_meta_header) +
  1044. y * sizeof(struct gfs2_quota_change));
  1045. if (!qc.qc_change)
  1046. continue;
  1047. error = qd_alloc(sdp, qc.qc_id, &qd);
  1048. if (error) {
  1049. brelse(bh);
  1050. goto fail;
  1051. }
  1052. set_bit(QDF_CHANGE, &qd->qd_flags);
  1053. qd->qd_change = qc.qc_change;
  1054. qd->qd_slot = slot;
  1055. qd->qd_slot_count = 1;
  1056. spin_lock(&qd_lru_lock);
  1057. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
  1058. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1059. atomic_inc(&sdp->sd_quota_count);
  1060. spin_unlock(&qd_lru_lock);
  1061. found++;
  1062. }
  1063. brelse(bh);
  1064. dblock++;
  1065. extlen--;
  1066. }
  1067. if (found)
  1068. fs_info(sdp, "found %u quota changes\n", found);
  1069. return 0;
  1070. fail:
  1071. gfs2_quota_cleanup(sdp);
  1072. return error;
  1073. }
  1074. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1075. {
  1076. struct list_head *head = &sdp->sd_quota_list;
  1077. struct gfs2_quota_data *qd;
  1078. unsigned int x;
  1079. spin_lock(&qd_lru_lock);
  1080. while (!list_empty(head)) {
  1081. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1082. if (atomic_read(&qd->qd_count) > 1 ||
  1083. (atomic_read(&qd->qd_count) &&
  1084. !test_bit(QDF_CHANGE, &qd->qd_flags))) {
  1085. list_move(&qd->qd_list, head);
  1086. spin_unlock(&qd_lru_lock);
  1087. schedule();
  1088. spin_lock(&qd_lru_lock);
  1089. continue;
  1090. }
  1091. list_del(&qd->qd_list);
  1092. /* Also remove if this qd exists in the reclaim list */
  1093. if (!list_empty(&qd->qd_reclaim)) {
  1094. list_del_init(&qd->qd_reclaim);
  1095. atomic_dec(&qd_lru_count);
  1096. }
  1097. atomic_dec(&sdp->sd_quota_count);
  1098. spin_unlock(&qd_lru_lock);
  1099. if (!atomic_read(&qd->qd_count)) {
  1100. gfs2_assert_warn(sdp, !qd->qd_change);
  1101. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1102. } else
  1103. gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
  1104. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1105. gfs2_glock_put(qd->qd_gl);
  1106. kmem_cache_free(gfs2_quotad_cachep, qd);
  1107. spin_lock(&qd_lru_lock);
  1108. }
  1109. spin_unlock(&qd_lru_lock);
  1110. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1111. if (sdp->sd_quota_bitmap) {
  1112. for (x = 0; x < sdp->sd_quota_chunks; x++)
  1113. kfree(sdp->sd_quota_bitmap[x]);
  1114. kfree(sdp->sd_quota_bitmap);
  1115. }
  1116. }
  1117. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1118. {
  1119. if (error == 0 || error == -EROFS)
  1120. return;
  1121. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1122. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1123. }
  1124. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1125. int (*fxn)(struct super_block *sb, int type),
  1126. unsigned long t, unsigned long *timeo,
  1127. unsigned int *new_timeo)
  1128. {
  1129. if (t >= *timeo) {
  1130. int error = fxn(sdp->sd_vfs, 0);
  1131. quotad_error(sdp, msg, error);
  1132. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1133. } else {
  1134. *timeo -= t;
  1135. }
  1136. }
  1137. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1138. {
  1139. struct gfs2_inode *ip;
  1140. while(1) {
  1141. ip = NULL;
  1142. spin_lock(&sdp->sd_trunc_lock);
  1143. if (!list_empty(&sdp->sd_trunc_list)) {
  1144. ip = list_entry(sdp->sd_trunc_list.next,
  1145. struct gfs2_inode, i_trunc_list);
  1146. list_del_init(&ip->i_trunc_list);
  1147. }
  1148. spin_unlock(&sdp->sd_trunc_lock);
  1149. if (ip == NULL)
  1150. return;
  1151. gfs2_glock_finish_truncate(ip);
  1152. }
  1153. }
  1154. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1155. if (!sdp->sd_statfs_force_sync) {
  1156. sdp->sd_statfs_force_sync = 1;
  1157. wake_up(&sdp->sd_quota_wait);
  1158. }
  1159. }
  1160. /**
  1161. * gfs2_quotad - Write cached quota changes into the quota file
  1162. * @sdp: Pointer to GFS2 superblock
  1163. *
  1164. */
  1165. int gfs2_quotad(void *data)
  1166. {
  1167. struct gfs2_sbd *sdp = data;
  1168. struct gfs2_tune *tune = &sdp->sd_tune;
  1169. unsigned long statfs_timeo = 0;
  1170. unsigned long quotad_timeo = 0;
  1171. unsigned long t = 0;
  1172. DEFINE_WAIT(wait);
  1173. int empty;
  1174. while (!kthread_should_stop()) {
  1175. /* Update the master statfs file */
  1176. if (sdp->sd_statfs_force_sync) {
  1177. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1178. quotad_error(sdp, "statfs", error);
  1179. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1180. }
  1181. else
  1182. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1183. &statfs_timeo,
  1184. &tune->gt_statfs_quantum);
  1185. /* Update quota file */
  1186. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1187. &quotad_timeo, &tune->gt_quota_quantum);
  1188. /* Check for & recover partially truncated inodes */
  1189. quotad_check_trunc_list(sdp);
  1190. try_to_freeze();
  1191. t = min(quotad_timeo, statfs_timeo);
  1192. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1193. spin_lock(&sdp->sd_trunc_lock);
  1194. empty = list_empty(&sdp->sd_trunc_list);
  1195. spin_unlock(&sdp->sd_trunc_lock);
  1196. if (empty && !sdp->sd_statfs_force_sync)
  1197. t -= schedule_timeout(t);
  1198. else
  1199. t = 0;
  1200. finish_wait(&sdp->sd_quota_wait, &wait);
  1201. }
  1202. return 0;
  1203. }
  1204. static int gfs2_quota_get_xstate(struct super_block *sb,
  1205. struct fs_quota_stat *fqs)
  1206. {
  1207. struct gfs2_sbd *sdp = sb->s_fs_info;
  1208. memset(fqs, 0, sizeof(struct fs_quota_stat));
  1209. fqs->qs_version = FS_QSTAT_VERSION;
  1210. switch (sdp->sd_args.ar_quota) {
  1211. case GFS2_QUOTA_ON:
  1212. fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
  1213. /*FALLTHRU*/
  1214. case GFS2_QUOTA_ACCOUNT:
  1215. fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
  1216. break;
  1217. case GFS2_QUOTA_OFF:
  1218. break;
  1219. }
  1220. if (sdp->sd_quota_inode) {
  1221. fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1222. fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
  1223. }
  1224. fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
  1225. fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
  1226. fqs->qs_incoredqs = atomic_read(&qd_lru_count);
  1227. return 0;
  1228. }
  1229. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1230. struct fs_disk_quota *fdq)
  1231. {
  1232. struct gfs2_sbd *sdp = sb->s_fs_info;
  1233. struct gfs2_quota_lvb *qlvb;
  1234. struct gfs2_quota_data *qd;
  1235. struct gfs2_holder q_gh;
  1236. int error;
  1237. memset(fdq, 0, sizeof(struct fs_disk_quota));
  1238. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1239. return -ESRCH; /* Crazy XFS error code */
  1240. if ((qid.type != USRQUOTA) &&
  1241. (qid.type != GRPQUOTA))
  1242. return -EINVAL;
  1243. error = qd_get(sdp, qid, &qd);
  1244. if (error)
  1245. return error;
  1246. error = do_glock(qd, FORCE, &q_gh);
  1247. if (error)
  1248. goto out;
  1249. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1250. fdq->d_version = FS_DQUOT_VERSION;
  1251. fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
  1252. fdq->d_id = from_kqid_munged(current_user_ns(), qid);
  1253. fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
  1254. fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
  1255. fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
  1256. gfs2_glock_dq_uninit(&q_gh);
  1257. out:
  1258. qd_put(qd);
  1259. return error;
  1260. }
  1261. /* GFS2 only supports a subset of the XFS fields */
  1262. #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
  1263. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1264. struct fs_disk_quota *fdq)
  1265. {
  1266. struct gfs2_sbd *sdp = sb->s_fs_info;
  1267. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1268. struct gfs2_quota_data *qd;
  1269. struct gfs2_holder q_gh, i_gh;
  1270. unsigned int data_blocks, ind_blocks;
  1271. unsigned int blocks = 0;
  1272. int alloc_required;
  1273. loff_t offset;
  1274. int error;
  1275. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1276. return -ESRCH; /* Crazy XFS error code */
  1277. if ((qid.type != USRQUOTA) &&
  1278. (qid.type != GRPQUOTA))
  1279. return -EINVAL;
  1280. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1281. return -EINVAL;
  1282. error = qd_get(sdp, qid, &qd);
  1283. if (error)
  1284. return error;
  1285. error = gfs2_rs_alloc(ip);
  1286. if (error)
  1287. goto out_put;
  1288. mutex_lock(&ip->i_inode.i_mutex);
  1289. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1290. if (error)
  1291. goto out_unlockput;
  1292. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1293. if (error)
  1294. goto out_q;
  1295. /* Check for existing entry, if none then alloc new blocks */
  1296. error = update_qd(sdp, qd);
  1297. if (error)
  1298. goto out_i;
  1299. /* If nothing has changed, this is a no-op */
  1300. if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
  1301. ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1302. fdq->d_fieldmask ^= FS_DQ_BSOFT;
  1303. if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
  1304. ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1305. fdq->d_fieldmask ^= FS_DQ_BHARD;
  1306. if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
  1307. ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1308. fdq->d_fieldmask ^= FS_DQ_BCOUNT;
  1309. if (fdq->d_fieldmask == 0)
  1310. goto out_i;
  1311. offset = qd2offset(qd);
  1312. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1313. if (gfs2_is_stuffed(ip))
  1314. alloc_required = 1;
  1315. if (alloc_required) {
  1316. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1317. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1318. &data_blocks, &ind_blocks);
  1319. blocks = 1 + data_blocks + ind_blocks;
  1320. ap.target = blocks;
  1321. error = gfs2_inplace_reserve(ip, &ap);
  1322. if (error)
  1323. goto out_i;
  1324. blocks += gfs2_rg_blocks(ip, blocks);
  1325. }
  1326. /* Some quotas span block boundaries and can update two blocks,
  1327. adding an extra block to the transaction to handle such quotas */
  1328. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1329. if (error)
  1330. goto out_release;
  1331. /* Apply changes */
  1332. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1333. gfs2_trans_end(sdp);
  1334. out_release:
  1335. if (alloc_required)
  1336. gfs2_inplace_release(ip);
  1337. out_i:
  1338. gfs2_glock_dq_uninit(&i_gh);
  1339. out_q:
  1340. gfs2_glock_dq_uninit(&q_gh);
  1341. out_unlockput:
  1342. mutex_unlock(&ip->i_inode.i_mutex);
  1343. out_put:
  1344. qd_put(qd);
  1345. return error;
  1346. }
  1347. const struct quotactl_ops gfs2_quotactl_ops = {
  1348. .quota_sync = gfs2_quota_sync,
  1349. .get_xstate = gfs2_quota_get_xstate,
  1350. .get_dqblk = gfs2_get_dqblk,
  1351. .set_dqblk = gfs2_set_dqblk,
  1352. };