quota.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need to a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #include <linux/sched.h>
  38. #include <linux/slab.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/completion.h>
  41. #include <linux/buffer_head.h>
  42. #include <linux/sort.h>
  43. #include <linux/fs.h>
  44. #include <linux/bio.h>
  45. #include <linux/gfs2_ondisk.h>
  46. #include <linux/lm_interface.h>
  47. #include "gfs2.h"
  48. #include "incore.h"
  49. #include "bmap.h"
  50. #include "glock.h"
  51. #include "glops.h"
  52. #include "log.h"
  53. #include "meta_io.h"
  54. #include "quota.h"
  55. #include "rgrp.h"
  56. #include "super.h"
  57. #include "trans.h"
  58. #include "inode.h"
  59. #include "ops_file.h"
  60. #include "ops_address.h"
  61. #include "util.h"
  62. #define QUOTA_USER 1
  63. #define QUOTA_GROUP 0
  64. static u64 qd2offset(struct gfs2_quota_data *qd)
  65. {
  66. u64 offset;
  67. offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
  68. offset *= sizeof(struct gfs2_quota);
  69. return offset;
  70. }
  71. static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
  72. struct gfs2_quota_data **qdp)
  73. {
  74. struct gfs2_quota_data *qd;
  75. int error;
  76. qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
  77. if (!qd)
  78. return -ENOMEM;
  79. qd->qd_count = 1;
  80. qd->qd_id = id;
  81. if (user)
  82. set_bit(QDF_USER, &qd->qd_flags);
  83. qd->qd_slot = -1;
  84. error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
  85. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  86. if (error)
  87. goto fail;
  88. error = gfs2_lvb_hold(qd->qd_gl);
  89. gfs2_glock_put(qd->qd_gl);
  90. if (error)
  91. goto fail;
  92. *qdp = qd;
  93. return 0;
  94. fail:
  95. kfree(qd);
  96. return error;
  97. }
  98. static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
  99. struct gfs2_quota_data **qdp)
  100. {
  101. struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
  102. int error, found;
  103. *qdp = NULL;
  104. for (;;) {
  105. found = 0;
  106. spin_lock(&sdp->sd_quota_spin);
  107. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  108. if (qd->qd_id == id &&
  109. !test_bit(QDF_USER, &qd->qd_flags) == !user) {
  110. qd->qd_count++;
  111. found = 1;
  112. break;
  113. }
  114. }
  115. if (!found)
  116. qd = NULL;
  117. if (!qd && new_qd) {
  118. qd = new_qd;
  119. list_add(&qd->qd_list, &sdp->sd_quota_list);
  120. atomic_inc(&sdp->sd_quota_count);
  121. new_qd = NULL;
  122. }
  123. spin_unlock(&sdp->sd_quota_spin);
  124. if (qd || !create) {
  125. if (new_qd) {
  126. gfs2_lvb_unhold(new_qd->qd_gl);
  127. kfree(new_qd);
  128. }
  129. *qdp = qd;
  130. return 0;
  131. }
  132. error = qd_alloc(sdp, user, id, &new_qd);
  133. if (error)
  134. return error;
  135. }
  136. }
  137. static void qd_hold(struct gfs2_quota_data *qd)
  138. {
  139. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  140. spin_lock(&sdp->sd_quota_spin);
  141. gfs2_assert(sdp, qd->qd_count);
  142. qd->qd_count++;
  143. spin_unlock(&sdp->sd_quota_spin);
  144. }
  145. static void qd_put(struct gfs2_quota_data *qd)
  146. {
  147. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  148. spin_lock(&sdp->sd_quota_spin);
  149. gfs2_assert(sdp, qd->qd_count);
  150. if (!--qd->qd_count)
  151. qd->qd_last_touched = jiffies;
  152. spin_unlock(&sdp->sd_quota_spin);
  153. }
  154. static int slot_get(struct gfs2_quota_data *qd)
  155. {
  156. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  157. unsigned int c, o = 0, b;
  158. unsigned char byte = 0;
  159. spin_lock(&sdp->sd_quota_spin);
  160. if (qd->qd_slot_count++) {
  161. spin_unlock(&sdp->sd_quota_spin);
  162. return 0;
  163. }
  164. for (c = 0; c < sdp->sd_quota_chunks; c++)
  165. for (o = 0; o < PAGE_SIZE; o++) {
  166. byte = sdp->sd_quota_bitmap[c][o];
  167. if (byte != 0xFF)
  168. goto found;
  169. }
  170. goto fail;
  171. found:
  172. for (b = 0; b < 8; b++)
  173. if (!(byte & (1 << b)))
  174. break;
  175. qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
  176. if (qd->qd_slot >= sdp->sd_quota_slots)
  177. goto fail;
  178. sdp->sd_quota_bitmap[c][o] |= 1 << b;
  179. spin_unlock(&sdp->sd_quota_spin);
  180. return 0;
  181. fail:
  182. qd->qd_slot_count--;
  183. spin_unlock(&sdp->sd_quota_spin);
  184. return -ENOSPC;
  185. }
  186. static void slot_hold(struct gfs2_quota_data *qd)
  187. {
  188. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  189. spin_lock(&sdp->sd_quota_spin);
  190. gfs2_assert(sdp, qd->qd_slot_count);
  191. qd->qd_slot_count++;
  192. spin_unlock(&sdp->sd_quota_spin);
  193. }
  194. static void slot_put(struct gfs2_quota_data *qd)
  195. {
  196. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  197. spin_lock(&sdp->sd_quota_spin);
  198. gfs2_assert(sdp, qd->qd_slot_count);
  199. if (!--qd->qd_slot_count) {
  200. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
  201. qd->qd_slot = -1;
  202. }
  203. spin_unlock(&sdp->sd_quota_spin);
  204. }
  205. static int bh_get(struct gfs2_quota_data *qd)
  206. {
  207. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  208. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  209. unsigned int block, offset;
  210. struct buffer_head *bh;
  211. int error;
  212. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  213. mutex_lock(&sdp->sd_quota_mutex);
  214. if (qd->qd_bh_count++) {
  215. mutex_unlock(&sdp->sd_quota_mutex);
  216. return 0;
  217. }
  218. block = qd->qd_slot / sdp->sd_qc_per_block;
  219. offset = qd->qd_slot % sdp->sd_qc_per_block;;
  220. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  221. error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
  222. if (error)
  223. goto fail;
  224. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  225. if (error)
  226. goto fail;
  227. error = -EIO;
  228. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  229. goto fail_brelse;
  230. qd->qd_bh = bh;
  231. qd->qd_bh_qc = (struct gfs2_quota_change *)
  232. (bh->b_data + sizeof(struct gfs2_meta_header) +
  233. offset * sizeof(struct gfs2_quota_change));
  234. mutex_unlock(&sdp->sd_quota_mutex);
  235. return 0;
  236. fail_brelse:
  237. brelse(bh);
  238. fail:
  239. qd->qd_bh_count--;
  240. mutex_unlock(&sdp->sd_quota_mutex);
  241. return error;
  242. }
  243. static void bh_put(struct gfs2_quota_data *qd)
  244. {
  245. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  246. mutex_lock(&sdp->sd_quota_mutex);
  247. gfs2_assert(sdp, qd->qd_bh_count);
  248. if (!--qd->qd_bh_count) {
  249. brelse(qd->qd_bh);
  250. qd->qd_bh = NULL;
  251. qd->qd_bh_qc = NULL;
  252. }
  253. mutex_unlock(&sdp->sd_quota_mutex);
  254. }
  255. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  256. {
  257. struct gfs2_quota_data *qd = NULL;
  258. int error;
  259. int found = 0;
  260. *qdp = NULL;
  261. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  262. return 0;
  263. spin_lock(&sdp->sd_quota_spin);
  264. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  265. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  266. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  267. qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
  268. continue;
  269. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  270. set_bit(QDF_LOCKED, &qd->qd_flags);
  271. gfs2_assert_warn(sdp, qd->qd_count);
  272. qd->qd_count++;
  273. qd->qd_change_sync = qd->qd_change;
  274. gfs2_assert_warn(sdp, qd->qd_slot_count);
  275. qd->qd_slot_count++;
  276. found = 1;
  277. break;
  278. }
  279. if (!found)
  280. qd = NULL;
  281. spin_unlock(&sdp->sd_quota_spin);
  282. if (qd) {
  283. gfs2_assert_warn(sdp, qd->qd_change_sync);
  284. error = bh_get(qd);
  285. if (error) {
  286. clear_bit(QDF_LOCKED, &qd->qd_flags);
  287. slot_put(qd);
  288. qd_put(qd);
  289. return error;
  290. }
  291. }
  292. *qdp = qd;
  293. return 0;
  294. }
  295. static int qd_trylock(struct gfs2_quota_data *qd)
  296. {
  297. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  298. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  299. return 0;
  300. spin_lock(&sdp->sd_quota_spin);
  301. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  302. !test_bit(QDF_CHANGE, &qd->qd_flags)) {
  303. spin_unlock(&sdp->sd_quota_spin);
  304. return 0;
  305. }
  306. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  307. set_bit(QDF_LOCKED, &qd->qd_flags);
  308. gfs2_assert_warn(sdp, qd->qd_count);
  309. qd->qd_count++;
  310. qd->qd_change_sync = qd->qd_change;
  311. gfs2_assert_warn(sdp, qd->qd_slot_count);
  312. qd->qd_slot_count++;
  313. spin_unlock(&sdp->sd_quota_spin);
  314. gfs2_assert_warn(sdp, qd->qd_change_sync);
  315. if (bh_get(qd)) {
  316. clear_bit(QDF_LOCKED, &qd->qd_flags);
  317. slot_put(qd);
  318. qd_put(qd);
  319. return 0;
  320. }
  321. return 1;
  322. }
  323. static void qd_unlock(struct gfs2_quota_data *qd)
  324. {
  325. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  326. test_bit(QDF_LOCKED, &qd->qd_flags));
  327. clear_bit(QDF_LOCKED, &qd->qd_flags);
  328. bh_put(qd);
  329. slot_put(qd);
  330. qd_put(qd);
  331. }
  332. static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
  333. struct gfs2_quota_data **qdp)
  334. {
  335. int error;
  336. error = qd_get(sdp, user, id, create, qdp);
  337. if (error)
  338. return error;
  339. error = slot_get(*qdp);
  340. if (error)
  341. goto fail;
  342. error = bh_get(*qdp);
  343. if (error)
  344. goto fail_slot;
  345. return 0;
  346. fail_slot:
  347. slot_put(*qdp);
  348. fail:
  349. qd_put(*qdp);
  350. return error;
  351. }
  352. static void qdsb_put(struct gfs2_quota_data *qd)
  353. {
  354. bh_put(qd);
  355. slot_put(qd);
  356. qd_put(qd);
  357. }
  358. int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
  359. {
  360. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  361. struct gfs2_alloc *al = &ip->i_alloc;
  362. struct gfs2_quota_data **qd = al->al_qd;
  363. int error;
  364. if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
  365. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  366. return -EIO;
  367. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  368. return 0;
  369. error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
  370. if (error)
  371. goto out;
  372. al->al_qd_num++;
  373. qd++;
  374. error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
  375. if (error)
  376. goto out;
  377. al->al_qd_num++;
  378. qd++;
  379. if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
  380. error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
  381. if (error)
  382. goto out;
  383. al->al_qd_num++;
  384. qd++;
  385. }
  386. if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
  387. error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
  388. if (error)
  389. goto out;
  390. al->al_qd_num++;
  391. qd++;
  392. }
  393. out:
  394. if (error)
  395. gfs2_quota_unhold(ip);
  396. return error;
  397. }
  398. void gfs2_quota_unhold(struct gfs2_inode *ip)
  399. {
  400. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  401. struct gfs2_alloc *al = &ip->i_alloc;
  402. unsigned int x;
  403. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  404. for (x = 0; x < al->al_qd_num; x++) {
  405. qdsb_put(al->al_qd[x]);
  406. al->al_qd[x] = NULL;
  407. }
  408. al->al_qd_num = 0;
  409. }
  410. static int sort_qd(const void *a, const void *b)
  411. {
  412. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  413. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  414. if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
  415. !test_bit(QDF_USER, &qd_b->qd_flags)) {
  416. if (test_bit(QDF_USER, &qd_a->qd_flags))
  417. return -1;
  418. else
  419. return 1;
  420. }
  421. if (qd_a->qd_id < qd_b->qd_id)
  422. return -1;
  423. if (qd_a->qd_id > qd_b->qd_id)
  424. return 1;
  425. return 0;
  426. }
  427. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  428. {
  429. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  430. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  431. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  432. s64 x;
  433. mutex_lock(&sdp->sd_quota_mutex);
  434. gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
  435. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  436. qc->qc_change = 0;
  437. qc->qc_flags = 0;
  438. if (test_bit(QDF_USER, &qd->qd_flags))
  439. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  440. qc->qc_id = cpu_to_be32(qd->qd_id);
  441. }
  442. x = be64_to_cpu(qc->qc_change) + change;
  443. qc->qc_change = cpu_to_be64(x);
  444. spin_lock(&sdp->sd_quota_spin);
  445. qd->qd_change = x;
  446. spin_unlock(&sdp->sd_quota_spin);
  447. if (!x) {
  448. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  449. clear_bit(QDF_CHANGE, &qd->qd_flags);
  450. qc->qc_flags = 0;
  451. qc->qc_id = 0;
  452. slot_put(qd);
  453. qd_put(qd);
  454. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  455. qd_hold(qd);
  456. slot_hold(qd);
  457. }
  458. mutex_unlock(&sdp->sd_quota_mutex);
  459. }
  460. /**
  461. * gfs2_adjust_quota
  462. *
  463. * This function was mostly borrowed from gfs2_block_truncate_page which was
  464. * in turn mostly borrowed from ext3
  465. */
  466. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  467. s64 change, struct gfs2_quota_data *qd)
  468. {
  469. struct inode *inode = &ip->i_inode;
  470. struct address_space *mapping = inode->i_mapping;
  471. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  472. unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
  473. unsigned blocksize, iblock, pos;
  474. struct buffer_head *bh;
  475. struct page *page;
  476. void *kaddr;
  477. __be64 *ptr;
  478. s64 value;
  479. int err = -EIO;
  480. page = grab_cache_page(mapping, index);
  481. if (!page)
  482. return -ENOMEM;
  483. blocksize = inode->i_sb->s_blocksize;
  484. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  485. if (!page_has_buffers(page))
  486. create_empty_buffers(page, blocksize, 0);
  487. bh = page_buffers(page);
  488. pos = blocksize;
  489. while (offset >= pos) {
  490. bh = bh->b_this_page;
  491. iblock++;
  492. pos += blocksize;
  493. }
  494. if (!buffer_mapped(bh)) {
  495. gfs2_get_block(inode, iblock, bh, 1);
  496. if (!buffer_mapped(bh))
  497. goto unlock;
  498. }
  499. if (PageUptodate(page))
  500. set_buffer_uptodate(bh);
  501. if (!buffer_uptodate(bh)) {
  502. ll_rw_block(READ_META, 1, &bh);
  503. wait_on_buffer(bh);
  504. if (!buffer_uptodate(bh))
  505. goto unlock;
  506. }
  507. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  508. kaddr = kmap_atomic(page, KM_USER0);
  509. ptr = kaddr + offset;
  510. value = (s64)be64_to_cpu(*ptr) + change;
  511. *ptr = cpu_to_be64(value);
  512. flush_dcache_page(page);
  513. kunmap_atomic(kaddr, KM_USER0);
  514. err = 0;
  515. qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
  516. qd->qd_qb.qb_value = cpu_to_be64(value);
  517. ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
  518. ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
  519. unlock:
  520. unlock_page(page);
  521. page_cache_release(page);
  522. return err;
  523. }
  524. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  525. {
  526. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  527. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  528. unsigned int data_blocks, ind_blocks;
  529. struct gfs2_holder *ghs, i_gh;
  530. unsigned int qx, x;
  531. struct gfs2_quota_data *qd;
  532. loff_t offset;
  533. unsigned int nalloc = 0;
  534. struct gfs2_alloc *al = NULL;
  535. int error;
  536. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  537. &data_blocks, &ind_blocks);
  538. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
  539. if (!ghs)
  540. return -ENOMEM;
  541. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  542. for (qx = 0; qx < num_qd; qx++) {
  543. error = gfs2_glock_nq_init(qda[qx]->qd_gl,
  544. LM_ST_EXCLUSIVE,
  545. GL_NOCACHE, &ghs[qx]);
  546. if (error)
  547. goto out;
  548. }
  549. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  550. if (error)
  551. goto out;
  552. for (x = 0; x < num_qd; x++) {
  553. int alloc_required;
  554. offset = qd2offset(qda[x]);
  555. error = gfs2_write_alloc_required(ip, offset,
  556. sizeof(struct gfs2_quota),
  557. &alloc_required);
  558. if (error)
  559. goto out_gunlock;
  560. if (alloc_required)
  561. nalloc++;
  562. }
  563. if (nalloc) {
  564. al = gfs2_alloc_get(ip);
  565. al->al_requested = nalloc * (data_blocks + ind_blocks);
  566. error = gfs2_inplace_reserve(ip);
  567. if (error)
  568. goto out_alloc;
  569. error = gfs2_trans_begin(sdp,
  570. al->al_rgd->rd_ri.ri_length +
  571. num_qd * data_blocks +
  572. nalloc * ind_blocks +
  573. RES_DINODE + num_qd +
  574. RES_STATFS, 0);
  575. if (error)
  576. goto out_ipres;
  577. } else {
  578. error = gfs2_trans_begin(sdp,
  579. num_qd * data_blocks +
  580. RES_DINODE + num_qd, 0);
  581. if (error)
  582. goto out_gunlock;
  583. }
  584. for (x = 0; x < num_qd; x++) {
  585. qd = qda[x];
  586. offset = qd2offset(qd);
  587. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
  588. (struct gfs2_quota_data *)
  589. qd);
  590. if (error)
  591. goto out_end_trans;
  592. do_qc(qd, -qd->qd_change_sync);
  593. }
  594. error = 0;
  595. out_end_trans:
  596. gfs2_trans_end(sdp);
  597. out_ipres:
  598. if (nalloc)
  599. gfs2_inplace_release(ip);
  600. out_alloc:
  601. if (nalloc)
  602. gfs2_alloc_put(ip);
  603. out_gunlock:
  604. gfs2_glock_dq_uninit(&i_gh);
  605. out:
  606. while (qx--)
  607. gfs2_glock_dq_uninit(&ghs[qx]);
  608. kfree(ghs);
  609. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
  610. return error;
  611. }
  612. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  613. struct gfs2_holder *q_gh)
  614. {
  615. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  616. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  617. struct gfs2_holder i_gh;
  618. struct gfs2_quota_host q;
  619. char buf[sizeof(struct gfs2_quota)];
  620. struct file_ra_state ra_state;
  621. int error;
  622. struct gfs2_quota_lvb *qlvb;
  623. file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
  624. restart:
  625. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  626. if (error)
  627. return error;
  628. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  629. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  630. loff_t pos;
  631. gfs2_glock_dq_uninit(q_gh);
  632. error = gfs2_glock_nq_init(qd->qd_gl,
  633. LM_ST_EXCLUSIVE, GL_NOCACHE,
  634. q_gh);
  635. if (error)
  636. return error;
  637. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  638. if (error)
  639. goto fail;
  640. memset(buf, 0, sizeof(struct gfs2_quota));
  641. pos = qd2offset(qd);
  642. error = gfs2_internal_read(ip, &ra_state, buf,
  643. &pos, sizeof(struct gfs2_quota));
  644. if (error < 0)
  645. goto fail_gunlock;
  646. gfs2_glock_dq_uninit(&i_gh);
  647. gfs2_quota_in(&q, buf);
  648. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
  649. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  650. qlvb->__pad = 0;
  651. qlvb->qb_limit = cpu_to_be64(q.qu_limit);
  652. qlvb->qb_warn = cpu_to_be64(q.qu_warn);
  653. qlvb->qb_value = cpu_to_be64(q.qu_value);
  654. qd->qd_qb = *qlvb;
  655. if (gfs2_glock_is_blocking(qd->qd_gl)) {
  656. gfs2_glock_dq_uninit(q_gh);
  657. force_refresh = 0;
  658. goto restart;
  659. }
  660. }
  661. return 0;
  662. fail_gunlock:
  663. gfs2_glock_dq_uninit(&i_gh);
  664. fail:
  665. gfs2_glock_dq_uninit(q_gh);
  666. return error;
  667. }
  668. int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
  669. {
  670. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  671. struct gfs2_alloc *al = &ip->i_alloc;
  672. unsigned int x;
  673. int error = 0;
  674. gfs2_quota_hold(ip, uid, gid);
  675. if (capable(CAP_SYS_RESOURCE) ||
  676. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  677. return 0;
  678. sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
  679. sort_qd, NULL);
  680. for (x = 0; x < al->al_qd_num; x++) {
  681. error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
  682. if (error)
  683. break;
  684. }
  685. if (!error)
  686. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  687. else {
  688. while (x--)
  689. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  690. gfs2_quota_unhold(ip);
  691. }
  692. return error;
  693. }
  694. static int need_sync(struct gfs2_quota_data *qd)
  695. {
  696. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  697. struct gfs2_tune *gt = &sdp->sd_tune;
  698. s64 value;
  699. unsigned int num, den;
  700. int do_sync = 1;
  701. if (!qd->qd_qb.qb_limit)
  702. return 0;
  703. spin_lock(&sdp->sd_quota_spin);
  704. value = qd->qd_change;
  705. spin_unlock(&sdp->sd_quota_spin);
  706. spin_lock(&gt->gt_spin);
  707. num = gt->gt_quota_scale_num;
  708. den = gt->gt_quota_scale_den;
  709. spin_unlock(&gt->gt_spin);
  710. if (value < 0)
  711. do_sync = 0;
  712. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  713. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  714. do_sync = 0;
  715. else {
  716. value *= gfs2_jindex_size(sdp) * num;
  717. do_div(value, den);
  718. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  719. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  720. do_sync = 0;
  721. }
  722. return do_sync;
  723. }
  724. void gfs2_quota_unlock(struct gfs2_inode *ip)
  725. {
  726. struct gfs2_alloc *al = &ip->i_alloc;
  727. struct gfs2_quota_data *qda[4];
  728. unsigned int count = 0;
  729. unsigned int x;
  730. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  731. goto out;
  732. for (x = 0; x < al->al_qd_num; x++) {
  733. struct gfs2_quota_data *qd;
  734. int sync;
  735. qd = al->al_qd[x];
  736. sync = need_sync(qd);
  737. gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
  738. if (sync && qd_trylock(qd))
  739. qda[count++] = qd;
  740. }
  741. if (count) {
  742. do_sync(count, qda);
  743. for (x = 0; x < count; x++)
  744. qd_unlock(qda[x]);
  745. }
  746. out:
  747. gfs2_quota_unhold(ip);
  748. }
  749. #define MAX_LINE 256
  750. static int print_message(struct gfs2_quota_data *qd, char *type)
  751. {
  752. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  753. printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
  754. sdp->sd_fsname, type,
  755. (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
  756. qd->qd_id);
  757. return 0;
  758. }
  759. int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
  760. {
  761. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  762. struct gfs2_alloc *al = &ip->i_alloc;
  763. struct gfs2_quota_data *qd;
  764. s64 value;
  765. unsigned int x;
  766. int error = 0;
  767. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  768. return 0;
  769. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  770. return 0;
  771. for (x = 0; x < al->al_qd_num; x++) {
  772. qd = al->al_qd[x];
  773. if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  774. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
  775. continue;
  776. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  777. spin_lock(&sdp->sd_quota_spin);
  778. value += qd->qd_change;
  779. spin_unlock(&sdp->sd_quota_spin);
  780. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  781. print_message(qd, "exceeded");
  782. error = -EDQUOT;
  783. break;
  784. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  785. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  786. time_after_eq(jiffies, qd->qd_last_warn +
  787. gfs2_tune_get(sdp,
  788. gt_quota_warn_period) * HZ)) {
  789. error = print_message(qd, "warning");
  790. qd->qd_last_warn = jiffies;
  791. }
  792. }
  793. return error;
  794. }
  795. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  796. u32 uid, u32 gid)
  797. {
  798. struct gfs2_alloc *al = &ip->i_alloc;
  799. struct gfs2_quota_data *qd;
  800. unsigned int x;
  801. unsigned int found = 0;
  802. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  803. return;
  804. if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
  805. return;
  806. for (x = 0; x < al->al_qd_num; x++) {
  807. qd = al->al_qd[x];
  808. if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
  809. (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
  810. do_qc(qd, change);
  811. found++;
  812. }
  813. }
  814. }
  815. int gfs2_quota_sync(struct gfs2_sbd *sdp)
  816. {
  817. struct gfs2_quota_data **qda;
  818. unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
  819. unsigned int num_qd;
  820. unsigned int x;
  821. int error = 0;
  822. sdp->sd_quota_sync_gen++;
  823. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  824. if (!qda)
  825. return -ENOMEM;
  826. do {
  827. num_qd = 0;
  828. for (;;) {
  829. error = qd_fish(sdp, qda + num_qd);
  830. if (error || !qda[num_qd])
  831. break;
  832. if (++num_qd == max_qd)
  833. break;
  834. }
  835. if (num_qd) {
  836. if (!error)
  837. error = do_sync(num_qd, qda);
  838. if (!error)
  839. for (x = 0; x < num_qd; x++)
  840. qda[x]->qd_sync_gen =
  841. sdp->sd_quota_sync_gen;
  842. for (x = 0; x < num_qd; x++)
  843. qd_unlock(qda[x]);
  844. }
  845. } while (!error && num_qd == max_qd);
  846. kfree(qda);
  847. return error;
  848. }
  849. int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
  850. {
  851. struct gfs2_quota_data *qd;
  852. struct gfs2_holder q_gh;
  853. int error;
  854. error = qd_get(sdp, user, id, CREATE, &qd);
  855. if (error)
  856. return error;
  857. error = do_glock(qd, FORCE, &q_gh);
  858. if (!error)
  859. gfs2_glock_dq_uninit(&q_gh);
  860. qd_put(qd);
  861. return error;
  862. }
  863. int gfs2_quota_init(struct gfs2_sbd *sdp)
  864. {
  865. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  866. unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
  867. unsigned int x, slot = 0;
  868. unsigned int found = 0;
  869. u64 dblock;
  870. u32 extlen = 0;
  871. int error;
  872. if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
  873. ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
  874. gfs2_consist_inode(ip);
  875. return -EIO;
  876. }
  877. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  878. sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
  879. error = -ENOMEM;
  880. sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
  881. sizeof(unsigned char *), GFP_KERNEL);
  882. if (!sdp->sd_quota_bitmap)
  883. return error;
  884. for (x = 0; x < sdp->sd_quota_chunks; x++) {
  885. sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
  886. if (!sdp->sd_quota_bitmap[x])
  887. goto fail;
  888. }
  889. for (x = 0; x < blocks; x++) {
  890. struct buffer_head *bh;
  891. unsigned int y;
  892. if (!extlen) {
  893. int new = 0;
  894. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  895. if (error)
  896. goto fail;
  897. }
  898. error = -EIO;
  899. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  900. if (!bh)
  901. goto fail;
  902. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  903. brelse(bh);
  904. goto fail;
  905. }
  906. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  907. y++, slot++) {
  908. struct gfs2_quota_change_host qc;
  909. struct gfs2_quota_data *qd;
  910. gfs2_quota_change_in(&qc, bh->b_data +
  911. sizeof(struct gfs2_meta_header) +
  912. y * sizeof(struct gfs2_quota_change));
  913. if (!qc.qc_change)
  914. continue;
  915. error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
  916. qc.qc_id, &qd);
  917. if (error) {
  918. brelse(bh);
  919. goto fail;
  920. }
  921. set_bit(QDF_CHANGE, &qd->qd_flags);
  922. qd->qd_change = qc.qc_change;
  923. qd->qd_slot = slot;
  924. qd->qd_slot_count = 1;
  925. qd->qd_last_touched = jiffies;
  926. spin_lock(&sdp->sd_quota_spin);
  927. gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
  928. list_add(&qd->qd_list, &sdp->sd_quota_list);
  929. atomic_inc(&sdp->sd_quota_count);
  930. spin_unlock(&sdp->sd_quota_spin);
  931. found++;
  932. }
  933. brelse(bh);
  934. dblock++;
  935. extlen--;
  936. }
  937. if (found)
  938. fs_info(sdp, "found %u quota changes\n", found);
  939. return 0;
  940. fail:
  941. gfs2_quota_cleanup(sdp);
  942. return error;
  943. }
  944. void gfs2_quota_scan(struct gfs2_sbd *sdp)
  945. {
  946. struct gfs2_quota_data *qd, *safe;
  947. LIST_HEAD(dead);
  948. spin_lock(&sdp->sd_quota_spin);
  949. list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
  950. if (!qd->qd_count &&
  951. time_after_eq(jiffies, qd->qd_last_touched +
  952. gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
  953. list_move(&qd->qd_list, &dead);
  954. gfs2_assert_warn(sdp,
  955. atomic_read(&sdp->sd_quota_count) > 0);
  956. atomic_dec(&sdp->sd_quota_count);
  957. }
  958. }
  959. spin_unlock(&sdp->sd_quota_spin);
  960. while (!list_empty(&dead)) {
  961. qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
  962. list_del(&qd->qd_list);
  963. gfs2_assert_warn(sdp, !qd->qd_change);
  964. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  965. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  966. gfs2_lvb_unhold(qd->qd_gl);
  967. kfree(qd);
  968. }
  969. }
  970. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  971. {
  972. struct list_head *head = &sdp->sd_quota_list;
  973. struct gfs2_quota_data *qd;
  974. unsigned int x;
  975. spin_lock(&sdp->sd_quota_spin);
  976. while (!list_empty(head)) {
  977. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  978. if (qd->qd_count > 1 ||
  979. (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
  980. list_move(&qd->qd_list, head);
  981. spin_unlock(&sdp->sd_quota_spin);
  982. schedule();
  983. spin_lock(&sdp->sd_quota_spin);
  984. continue;
  985. }
  986. list_del(&qd->qd_list);
  987. atomic_dec(&sdp->sd_quota_count);
  988. spin_unlock(&sdp->sd_quota_spin);
  989. if (!qd->qd_count) {
  990. gfs2_assert_warn(sdp, !qd->qd_change);
  991. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  992. } else
  993. gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
  994. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  995. gfs2_lvb_unhold(qd->qd_gl);
  996. kfree(qd);
  997. spin_lock(&sdp->sd_quota_spin);
  998. }
  999. spin_unlock(&sdp->sd_quota_spin);
  1000. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1001. if (sdp->sd_quota_bitmap) {
  1002. for (x = 0; x < sdp->sd_quota_chunks; x++)
  1003. kfree(sdp->sd_quota_bitmap[x]);
  1004. kfree(sdp->sd_quota_bitmap);
  1005. }
  1006. }