rgrp.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/fs.h>
  15. #include <asm/semaphore.h>
  16. #include "gfs2.h"
  17. #include "bits.h"
  18. #include "glock.h"
  19. #include "glops.h"
  20. #include "lops.h"
  21. #include "meta_io.h"
  22. #include "quota.h"
  23. #include "rgrp.h"
  24. #include "super.h"
  25. #include "trans.h"
  26. #include "ops_file.h"
  27. /**
  28. * gfs2_rgrp_verify - Verify that a resource group is consistent
  29. * @sdp: the filesystem
  30. * @rgd: the rgrp
  31. *
  32. */
  33. void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
  34. {
  35. struct gfs2_sbd *sdp = rgd->rd_sbd;
  36. struct gfs2_bitmap *bi = NULL;
  37. uint32_t length = rgd->rd_ri.ri_length;
  38. uint32_t count[4], tmp;
  39. int buf, x;
  40. memset(count, 0, 4 * sizeof(uint32_t));
  41. /* Count # blocks in each of 4 possible allocation states */
  42. for (buf = 0; buf < length; buf++) {
  43. bi = rgd->rd_bits + buf;
  44. for (x = 0; x < 4; x++)
  45. count[x] += gfs2_bitcount(rgd,
  46. bi->bi_bh->b_data +
  47. bi->bi_offset,
  48. bi->bi_len, x);
  49. }
  50. if (count[0] != rgd->rd_rg.rg_free) {
  51. if (gfs2_consist_rgrpd(rgd))
  52. fs_err(sdp, "free data mismatch: %u != %u\n",
  53. count[0], rgd->rd_rg.rg_free);
  54. return;
  55. }
  56. tmp = rgd->rd_ri.ri_data -
  57. rgd->rd_rg.rg_free -
  58. rgd->rd_rg.rg_dinodes;
  59. if (count[1] != tmp) {
  60. if (gfs2_consist_rgrpd(rgd))
  61. fs_err(sdp, "used data mismatch: %u != %u\n",
  62. count[1], tmp);
  63. return;
  64. }
  65. if (count[2]) {
  66. if (gfs2_consist_rgrpd(rgd))
  67. fs_err(sdp, "free metadata mismatch: %u != 0\n",
  68. count[2]);
  69. return;
  70. }
  71. if (count[3] != rgd->rd_rg.rg_dinodes) {
  72. if (gfs2_consist_rgrpd(rgd))
  73. fs_err(sdp, "used metadata mismatch: %u != %u\n",
  74. count[3], rgd->rd_rg.rg_dinodes);
  75. return;
  76. }
  77. }
  78. static inline int rgrp_contains_block(struct gfs2_rindex *ri, uint64_t block)
  79. {
  80. uint64_t first = ri->ri_data0;
  81. uint64_t last = first + ri->ri_data;
  82. return !!(first <= block && block < last);
  83. }
  84. /**
  85. * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  86. * @sdp: The GFS2 superblock
  87. * @n: The data block number
  88. *
  89. * Returns: The resource group, or NULL if not found
  90. */
  91. struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, uint64_t blk)
  92. {
  93. struct gfs2_rgrpd *rgd;
  94. spin_lock(&sdp->sd_rindex_spin);
  95. list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
  96. if (rgrp_contains_block(&rgd->rd_ri, blk)) {
  97. list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
  98. spin_unlock(&sdp->sd_rindex_spin);
  99. return rgd;
  100. }
  101. }
  102. spin_unlock(&sdp->sd_rindex_spin);
  103. return NULL;
  104. }
  105. /**
  106. * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
  107. * @sdp: The GFS2 superblock
  108. *
  109. * Returns: The first rgrp in the filesystem
  110. */
  111. struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
  112. {
  113. gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
  114. return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
  115. }
  116. /**
  117. * gfs2_rgrpd_get_next - get the next RG
  118. * @rgd: A RG
  119. *
  120. * Returns: The next rgrp
  121. */
  122. struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
  123. {
  124. if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
  125. return NULL;
  126. return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
  127. }
  128. static void clear_rgrpdi(struct gfs2_sbd *sdp)
  129. {
  130. struct list_head *head;
  131. struct gfs2_rgrpd *rgd;
  132. struct gfs2_glock *gl;
  133. spin_lock(&sdp->sd_rindex_spin);
  134. sdp->sd_rindex_forward = NULL;
  135. head = &sdp->sd_rindex_recent_list;
  136. while (!list_empty(head)) {
  137. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
  138. list_del(&rgd->rd_recent);
  139. }
  140. spin_unlock(&sdp->sd_rindex_spin);
  141. head = &sdp->sd_rindex_list;
  142. while (!list_empty(head)) {
  143. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
  144. gl = rgd->rd_gl;
  145. list_del(&rgd->rd_list);
  146. list_del(&rgd->rd_list_mru);
  147. if (gl) {
  148. set_gl2rgd(gl, NULL);
  149. gfs2_glock_put(gl);
  150. }
  151. kfree(rgd->rd_bits);
  152. kfree(rgd);
  153. }
  154. }
  155. void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
  156. {
  157. mutex_lock(&sdp->sd_rindex_mutex);
  158. clear_rgrpdi(sdp);
  159. mutex_unlock(&sdp->sd_rindex_mutex);
  160. }
  161. /**
  162. * gfs2_compute_bitstructs - Compute the bitmap sizes
  163. * @rgd: The resource group descriptor
  164. *
  165. * Calculates bitmap descriptors, one for each block that contains bitmap data
  166. *
  167. * Returns: errno
  168. */
  169. static int compute_bitstructs(struct gfs2_rgrpd *rgd)
  170. {
  171. struct gfs2_sbd *sdp = rgd->rd_sbd;
  172. struct gfs2_bitmap *bi;
  173. uint32_t length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
  174. uint32_t bytes_left, bytes;
  175. int x;
  176. rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_KERNEL);
  177. if (!rgd->rd_bits)
  178. return -ENOMEM;
  179. bytes_left = rgd->rd_ri.ri_bitbytes;
  180. for (x = 0; x < length; x++) {
  181. bi = rgd->rd_bits + x;
  182. /* small rgrp; bitmap stored completely in header block */
  183. if (length == 1) {
  184. bytes = bytes_left;
  185. bi->bi_offset = sizeof(struct gfs2_rgrp);
  186. bi->bi_start = 0;
  187. bi->bi_len = bytes;
  188. /* header block */
  189. } else if (x == 0) {
  190. bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
  191. bi->bi_offset = sizeof(struct gfs2_rgrp);
  192. bi->bi_start = 0;
  193. bi->bi_len = bytes;
  194. /* last block */
  195. } else if (x + 1 == length) {
  196. bytes = bytes_left;
  197. bi->bi_offset = sizeof(struct gfs2_meta_header);
  198. bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
  199. bi->bi_len = bytes;
  200. /* other blocks */
  201. } else {
  202. bytes = sdp->sd_sb.sb_bsize -
  203. sizeof(struct gfs2_meta_header);
  204. bi->bi_offset = sizeof(struct gfs2_meta_header);
  205. bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
  206. bi->bi_len = bytes;
  207. }
  208. bytes_left -= bytes;
  209. }
  210. if (bytes_left) {
  211. gfs2_consist_rgrpd(rgd);
  212. return -EIO;
  213. }
  214. bi = rgd->rd_bits + (length - 1);
  215. if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
  216. if (gfs2_consist_rgrpd(rgd)) {
  217. gfs2_rindex_print(&rgd->rd_ri);
  218. fs_err(sdp, "start=%u len=%u offset=%u\n",
  219. bi->bi_start, bi->bi_len, bi->bi_offset);
  220. }
  221. return -EIO;
  222. }
  223. return 0;
  224. }
  225. /**
  226. * gfs2_ri_update - Pull in a new resource index from the disk
  227. * @gl: The glock covering the rindex inode
  228. *
  229. * Returns: 0 on successful update, error code otherwise
  230. */
  231. static int gfs2_ri_update(struct gfs2_inode *ip)
  232. {
  233. struct gfs2_sbd *sdp = ip->i_sbd;
  234. struct inode *inode = ip->i_vnode;
  235. struct gfs2_rgrpd *rgd;
  236. char buf[sizeof(struct gfs2_rindex)];
  237. struct file_ra_state ra_state;
  238. uint64_t junk = ip->i_di.di_size;
  239. int error;
  240. if (do_div(junk, sizeof(struct gfs2_rindex))) {
  241. gfs2_consist_inode(ip);
  242. return -EIO;
  243. }
  244. clear_rgrpdi(sdp);
  245. file_ra_state_init(&ra_state, inode->i_mapping);
  246. for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
  247. loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
  248. error = gfs2_internal_read(ip, &ra_state, buf, &pos,
  249. sizeof(struct gfs2_rindex));
  250. if (!error)
  251. break;
  252. if (error != sizeof(struct gfs2_rindex)) {
  253. if (error > 0)
  254. error = -EIO;
  255. goto fail;
  256. }
  257. rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_KERNEL);
  258. error = -ENOMEM;
  259. if (!rgd)
  260. goto fail;
  261. mutex_init(&rgd->rd_mutex);
  262. lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
  263. rgd->rd_sbd = sdp;
  264. list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
  265. list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
  266. gfs2_rindex_in(&rgd->rd_ri, buf);
  267. error = compute_bitstructs(rgd);
  268. if (error)
  269. goto fail;
  270. error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
  271. &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
  272. if (error)
  273. goto fail;
  274. set_gl2rgd(rgd->rd_gl, rgd);
  275. rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
  276. }
  277. sdp->sd_rindex_vn = ip->i_gl->gl_vn;
  278. return 0;
  279. fail:
  280. clear_rgrpdi(sdp);
  281. return error;
  282. }
  283. /**
  284. * gfs2_rindex_hold - Grab a lock on the rindex
  285. * @sdp: The GFS2 superblock
  286. * @ri_gh: the glock holder
  287. *
  288. * We grab a lock on the rindex inode to make sure that it doesn't
  289. * change whilst we are performing an operation. We keep this lock
  290. * for quite long periods of time compared to other locks. This
  291. * doesn't matter, since it is shared and it is very, very rarely
  292. * accessed in the exclusive mode (i.e. only when expanding the filesystem).
  293. *
  294. * This makes sure that we're using the latest copy of the resource index
  295. * special file, which might have been updated if someone expanded the
  296. * filesystem (via gfs2_grow utility), which adds new resource groups.
  297. *
  298. * Returns: 0 on success, error code otherwise
  299. */
  300. int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
  301. {
  302. struct gfs2_inode *ip = get_v2ip(sdp->sd_rindex);
  303. struct gfs2_glock *gl = ip->i_gl;
  304. int error;
  305. error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
  306. if (error)
  307. return error;
  308. /* Read new copy from disk if we don't have the latest */
  309. if (sdp->sd_rindex_vn != gl->gl_vn) {
  310. mutex_lock(&sdp->sd_rindex_mutex);
  311. if (sdp->sd_rindex_vn != gl->gl_vn) {
  312. error = gfs2_ri_update(ip);
  313. if (error)
  314. gfs2_glock_dq_uninit(ri_gh);
  315. }
  316. mutex_unlock(&sdp->sd_rindex_mutex);
  317. }
  318. return error;
  319. }
  320. /**
  321. * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
  322. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  323. *
  324. * Read in all of a Resource Group's header and bitmap blocks.
  325. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
  326. *
  327. * Returns: errno
  328. */
  329. int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
  330. {
  331. struct gfs2_sbd *sdp = rgd->rd_sbd;
  332. struct gfs2_glock *gl = rgd->rd_gl;
  333. unsigned int length = rgd->rd_ri.ri_length;
  334. struct gfs2_bitmap *bi;
  335. unsigned int x, y;
  336. int error;
  337. mutex_lock(&rgd->rd_mutex);
  338. spin_lock(&sdp->sd_rindex_spin);
  339. if (rgd->rd_bh_count) {
  340. rgd->rd_bh_count++;
  341. spin_unlock(&sdp->sd_rindex_spin);
  342. mutex_unlock(&rgd->rd_mutex);
  343. return 0;
  344. }
  345. spin_unlock(&sdp->sd_rindex_spin);
  346. for (x = 0; x < length; x++) {
  347. bi = rgd->rd_bits + x;
  348. error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, DIO_START,
  349. &bi->bi_bh);
  350. if (error)
  351. goto fail;
  352. }
  353. for (y = length; y--;) {
  354. bi = rgd->rd_bits + y;
  355. error = gfs2_meta_reread(sdp, bi->bi_bh, DIO_WAIT);
  356. if (error)
  357. goto fail;
  358. if (gfs2_metatype_check(sdp, bi->bi_bh,
  359. (y) ? GFS2_METATYPE_RB :
  360. GFS2_METATYPE_RG)) {
  361. error = -EIO;
  362. goto fail;
  363. }
  364. }
  365. if (rgd->rd_rg_vn != gl->gl_vn) {
  366. gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
  367. rgd->rd_rg_vn = gl->gl_vn;
  368. }
  369. spin_lock(&sdp->sd_rindex_spin);
  370. rgd->rd_free_clone = rgd->rd_rg.rg_free;
  371. rgd->rd_bh_count++;
  372. spin_unlock(&sdp->sd_rindex_spin);
  373. mutex_unlock(&rgd->rd_mutex);
  374. return 0;
  375. fail:
  376. while (x--) {
  377. bi = rgd->rd_bits + x;
  378. brelse(bi->bi_bh);
  379. bi->bi_bh = NULL;
  380. gfs2_assert_warn(sdp, !bi->bi_clone);
  381. }
  382. mutex_unlock(&rgd->rd_mutex);
  383. return error;
  384. }
  385. void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
  386. {
  387. struct gfs2_sbd *sdp = rgd->rd_sbd;
  388. spin_lock(&sdp->sd_rindex_spin);
  389. gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
  390. rgd->rd_bh_count++;
  391. spin_unlock(&sdp->sd_rindex_spin);
  392. }
  393. /**
  394. * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
  395. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  396. *
  397. */
  398. void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
  399. {
  400. struct gfs2_sbd *sdp = rgd->rd_sbd;
  401. int x, length = rgd->rd_ri.ri_length;
  402. spin_lock(&sdp->sd_rindex_spin);
  403. gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
  404. if (--rgd->rd_bh_count) {
  405. spin_unlock(&sdp->sd_rindex_spin);
  406. return;
  407. }
  408. for (x = 0; x < length; x++) {
  409. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  410. kfree(bi->bi_clone);
  411. bi->bi_clone = NULL;
  412. brelse(bi->bi_bh);
  413. bi->bi_bh = NULL;
  414. }
  415. spin_unlock(&sdp->sd_rindex_spin);
  416. }
  417. void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
  418. {
  419. struct gfs2_sbd *sdp = rgd->rd_sbd;
  420. unsigned int length = rgd->rd_ri.ri_length;
  421. unsigned int x;
  422. for (x = 0; x < length; x++) {
  423. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  424. if (!bi->bi_clone)
  425. continue;
  426. memcpy(bi->bi_clone + bi->bi_offset,
  427. bi->bi_bh->b_data + bi->bi_offset,
  428. bi->bi_len);
  429. }
  430. spin_lock(&sdp->sd_rindex_spin);
  431. rgd->rd_free_clone = rgd->rd_rg.rg_free;
  432. spin_unlock(&sdp->sd_rindex_spin);
  433. }
  434. /**
  435. * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
  436. * @ip: the incore GFS2 inode structure
  437. *
  438. * Returns: the struct gfs2_alloc
  439. */
  440. struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
  441. {
  442. struct gfs2_alloc *al = &ip->i_alloc;
  443. /* FIXME: Should assert that the correct locks are held here... */
  444. memset(al, 0, sizeof(*al));
  445. return al;
  446. }
  447. /**
  448. * gfs2_alloc_put - throw away the struct gfs2_alloc for an inode
  449. * @ip: the inode
  450. *
  451. */
  452. void gfs2_alloc_put(struct gfs2_inode *ip)
  453. {
  454. return;
  455. }
  456. /**
  457. * try_rgrp_fit - See if a given reservation will fit in a given RG
  458. * @rgd: the RG data
  459. * @al: the struct gfs2_alloc structure describing the reservation
  460. *
  461. * If there's room for the requested blocks to be allocated from the RG:
  462. * Sets the $al_reserved_data field in @al.
  463. * Sets the $al_reserved_meta field in @al.
  464. * Sets the $al_rgd field in @al.
  465. *
  466. * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
  467. */
  468. static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
  469. {
  470. struct gfs2_sbd *sdp = rgd->rd_sbd;
  471. int ret = 0;
  472. spin_lock(&sdp->sd_rindex_spin);
  473. if (rgd->rd_free_clone >= al->al_requested) {
  474. al->al_rgd = rgd;
  475. ret = 1;
  476. }
  477. spin_unlock(&sdp->sd_rindex_spin);
  478. return ret;
  479. }
  480. /**
  481. * recent_rgrp_first - get first RG from "recent" list
  482. * @sdp: The GFS2 superblock
  483. * @rglast: address of the rgrp used last
  484. *
  485. * Returns: The first rgrp in the recent list
  486. */
  487. static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
  488. uint64_t rglast)
  489. {
  490. struct gfs2_rgrpd *rgd = NULL;
  491. spin_lock(&sdp->sd_rindex_spin);
  492. if (list_empty(&sdp->sd_rindex_recent_list))
  493. goto out;
  494. if (!rglast)
  495. goto first;
  496. list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
  497. if (rgd->rd_ri.ri_addr == rglast)
  498. goto out;
  499. }
  500. first:
  501. rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
  502. rd_recent);
  503. out:
  504. spin_unlock(&sdp->sd_rindex_spin);
  505. return rgd;
  506. }
  507. /**
  508. * recent_rgrp_next - get next RG from "recent" list
  509. * @cur_rgd: current rgrp
  510. * @remove:
  511. *
  512. * Returns: The next rgrp in the recent list
  513. */
  514. static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
  515. int remove)
  516. {
  517. struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
  518. struct list_head *head;
  519. struct gfs2_rgrpd *rgd;
  520. spin_lock(&sdp->sd_rindex_spin);
  521. head = &sdp->sd_rindex_recent_list;
  522. list_for_each_entry(rgd, head, rd_recent) {
  523. if (rgd == cur_rgd) {
  524. if (cur_rgd->rd_recent.next != head)
  525. rgd = list_entry(cur_rgd->rd_recent.next,
  526. struct gfs2_rgrpd, rd_recent);
  527. else
  528. rgd = NULL;
  529. if (remove)
  530. list_del(&cur_rgd->rd_recent);
  531. goto out;
  532. }
  533. }
  534. rgd = NULL;
  535. if (!list_empty(head))
  536. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
  537. out:
  538. spin_unlock(&sdp->sd_rindex_spin);
  539. return rgd;
  540. }
  541. /**
  542. * recent_rgrp_add - add an RG to tail of "recent" list
  543. * @new_rgd: The rgrp to add
  544. *
  545. */
  546. static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
  547. {
  548. struct gfs2_sbd *sdp = new_rgd->rd_sbd;
  549. struct gfs2_rgrpd *rgd;
  550. unsigned int count = 0;
  551. unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
  552. spin_lock(&sdp->sd_rindex_spin);
  553. list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
  554. if (rgd == new_rgd)
  555. goto out;
  556. if (++count >= max)
  557. goto out;
  558. }
  559. list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
  560. out:
  561. spin_unlock(&sdp->sd_rindex_spin);
  562. }
  563. /**
  564. * forward_rgrp_get - get an rgrp to try next from full list
  565. * @sdp: The GFS2 superblock
  566. *
  567. * Returns: The rgrp to try next
  568. */
  569. static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
  570. {
  571. struct gfs2_rgrpd *rgd;
  572. unsigned int journals = gfs2_jindex_size(sdp);
  573. unsigned int rg = 0, x;
  574. spin_lock(&sdp->sd_rindex_spin);
  575. rgd = sdp->sd_rindex_forward;
  576. if (!rgd) {
  577. if (sdp->sd_rgrps >= journals)
  578. rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
  579. for (x = 0, rgd = gfs2_rgrpd_get_first(sdp);
  580. x < rg;
  581. x++, rgd = gfs2_rgrpd_get_next(rgd))
  582. /* Do Nothing */;
  583. sdp->sd_rindex_forward = rgd;
  584. }
  585. spin_unlock(&sdp->sd_rindex_spin);
  586. return rgd;
  587. }
  588. /**
  589. * forward_rgrp_set - set the forward rgrp pointer
  590. * @sdp: the filesystem
  591. * @rgd: The new forward rgrp
  592. *
  593. */
  594. static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
  595. {
  596. spin_lock(&sdp->sd_rindex_spin);
  597. sdp->sd_rindex_forward = rgd;
  598. spin_unlock(&sdp->sd_rindex_spin);
  599. }
  600. /**
  601. * get_local_rgrp - Choose and lock a rgrp for allocation
  602. * @ip: the inode to reserve space for
  603. * @rgp: the chosen and locked rgrp
  604. *
  605. * Try to acquire rgrp in way which avoids contending with others.
  606. *
  607. * Returns: errno
  608. */
  609. static int get_local_rgrp(struct gfs2_inode *ip)
  610. {
  611. struct gfs2_sbd *sdp = ip->i_sbd;
  612. struct gfs2_rgrpd *rgd, *begin = NULL;
  613. struct gfs2_alloc *al = &ip->i_alloc;
  614. int flags = LM_FLAG_TRY;
  615. int skipped = 0;
  616. int loops = 0;
  617. int error;
  618. /* Try recently successful rgrps */
  619. rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
  620. while (rgd) {
  621. error = gfs2_glock_nq_init(rgd->rd_gl,
  622. LM_ST_EXCLUSIVE, LM_FLAG_TRY,
  623. &al->al_rgd_gh);
  624. switch (error) {
  625. case 0:
  626. if (try_rgrp_fit(rgd, al))
  627. goto out;
  628. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  629. rgd = recent_rgrp_next(rgd, 1);
  630. break;
  631. case GLR_TRYFAILED:
  632. rgd = recent_rgrp_next(rgd, 0);
  633. break;
  634. default:
  635. return error;
  636. }
  637. }
  638. /* Go through full list of rgrps */
  639. begin = rgd = forward_rgrp_get(sdp);
  640. for (;;) {
  641. error = gfs2_glock_nq_init(rgd->rd_gl,
  642. LM_ST_EXCLUSIVE, flags,
  643. &al->al_rgd_gh);
  644. switch (error) {
  645. case 0:
  646. if (try_rgrp_fit(rgd, al))
  647. goto out;
  648. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  649. break;
  650. case GLR_TRYFAILED:
  651. skipped++;
  652. break;
  653. default:
  654. return error;
  655. }
  656. rgd = gfs2_rgrpd_get_next(rgd);
  657. if (!rgd)
  658. rgd = gfs2_rgrpd_get_first(sdp);
  659. if (rgd == begin) {
  660. if (++loops >= 2 || !skipped)
  661. return -ENOSPC;
  662. flags = 0;
  663. }
  664. }
  665. out:
  666. ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
  667. if (begin) {
  668. recent_rgrp_add(rgd);
  669. rgd = gfs2_rgrpd_get_next(rgd);
  670. if (!rgd)
  671. rgd = gfs2_rgrpd_get_first(sdp);
  672. forward_rgrp_set(sdp, rgd);
  673. }
  674. return 0;
  675. }
  676. /**
  677. * gfs2_inplace_reserve_i - Reserve space in the filesystem
  678. * @ip: the inode to reserve space for
  679. *
  680. * Returns: errno
  681. */
  682. int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
  683. {
  684. struct gfs2_sbd *sdp = ip->i_sbd;
  685. struct gfs2_alloc *al = &ip->i_alloc;
  686. int error;
  687. if (gfs2_assert_warn(sdp, al->al_requested))
  688. return -EINVAL;
  689. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  690. if (error)
  691. return error;
  692. error = get_local_rgrp(ip);
  693. if (error) {
  694. gfs2_glock_dq_uninit(&al->al_ri_gh);
  695. return error;
  696. }
  697. al->al_file = file;
  698. al->al_line = line;
  699. return 0;
  700. }
  701. /**
  702. * gfs2_inplace_release - release an inplace reservation
  703. * @ip: the inode the reservation was taken out on
  704. *
  705. * Release a reservation made by gfs2_inplace_reserve().
  706. */
  707. void gfs2_inplace_release(struct gfs2_inode *ip)
  708. {
  709. struct gfs2_sbd *sdp = ip->i_sbd;
  710. struct gfs2_alloc *al = &ip->i_alloc;
  711. if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
  712. fs_warn(sdp, "al_alloced = %u, al_requested = %u "
  713. "al_file = %s, al_line = %u\n",
  714. al->al_alloced, al->al_requested, al->al_file,
  715. al->al_line);
  716. al->al_rgd = NULL;
  717. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  718. gfs2_glock_dq_uninit(&al->al_ri_gh);
  719. }
  720. /**
  721. * gfs2_get_block_type - Check a block in a RG is of given type
  722. * @rgd: the resource group holding the block
  723. * @block: the block number
  724. *
  725. * Returns: The block type (GFS2_BLKST_*)
  726. */
  727. unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, uint64_t block)
  728. {
  729. struct gfs2_bitmap *bi = NULL;
  730. uint32_t length, rgrp_block, buf_block;
  731. unsigned int buf;
  732. unsigned char type;
  733. length = rgd->rd_ri.ri_length;
  734. rgrp_block = block - rgd->rd_ri.ri_data0;
  735. for (buf = 0; buf < length; buf++) {
  736. bi = rgd->rd_bits + buf;
  737. if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  738. break;
  739. }
  740. gfs2_assert(rgd->rd_sbd, buf < length);
  741. buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
  742. type = gfs2_testbit(rgd,
  743. bi->bi_bh->b_data + bi->bi_offset,
  744. bi->bi_len, buf_block);
  745. return type;
  746. }
  747. /**
  748. * rgblk_search - find a block in @old_state, change allocation
  749. * state to @new_state
  750. * @rgd: the resource group descriptor
  751. * @goal: the goal block within the RG (start here to search for avail block)
  752. * @old_state: GFS2_BLKST_XXX the before-allocation state to find
  753. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  754. *
  755. * Walk rgrp's bitmap to find bits that represent a block in @old_state.
  756. * Add the found bitmap buffer to the transaction.
  757. * Set the found bits to @new_state to change block's allocation state.
  758. *
  759. * This function never fails, because we wouldn't call it unless we
  760. * know (from reservation results, etc.) that a block is available.
  761. *
  762. * Scope of @goal and returned block is just within rgrp, not the whole
  763. * filesystem.
  764. *
  765. * Returns: the block number allocated
  766. */
  767. static uint32_t rgblk_search(struct gfs2_rgrpd *rgd, uint32_t goal,
  768. unsigned char old_state, unsigned char new_state)
  769. {
  770. struct gfs2_bitmap *bi = NULL;
  771. uint32_t length = rgd->rd_ri.ri_length;
  772. uint32_t blk = 0;
  773. unsigned int buf, x;
  774. /* Find bitmap block that contains bits for goal block */
  775. for (buf = 0; buf < length; buf++) {
  776. bi = rgd->rd_bits + buf;
  777. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  778. break;
  779. }
  780. gfs2_assert(rgd->rd_sbd, buf < length);
  781. /* Convert scope of "goal" from rgrp-wide to within found bit block */
  782. goal -= bi->bi_start * GFS2_NBBY;
  783. /* Search (up to entire) bitmap in this rgrp for allocatable block.
  784. "x <= length", instead of "x < length", because we typically start
  785. the search in the middle of a bit block, but if we can't find an
  786. allocatable block anywhere else, we want to be able wrap around and
  787. search in the first part of our first-searched bit block. */
  788. for (x = 0; x <= length; x++) {
  789. if (bi->bi_clone)
  790. blk = gfs2_bitfit(rgd,
  791. bi->bi_clone + bi->bi_offset,
  792. bi->bi_len, goal, old_state);
  793. else
  794. blk = gfs2_bitfit(rgd,
  795. bi->bi_bh->b_data + bi->bi_offset,
  796. bi->bi_len, goal, old_state);
  797. if (blk != BFITNOENT)
  798. break;
  799. /* Try next bitmap block (wrap back to rgrp header if at end) */
  800. buf = (buf + 1) % length;
  801. bi = rgd->rd_bits + buf;
  802. goal = 0;
  803. }
  804. if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
  805. blk = 0;
  806. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  807. gfs2_setbit(rgd,
  808. bi->bi_bh->b_data + bi->bi_offset,
  809. bi->bi_len, blk, new_state);
  810. if (bi->bi_clone)
  811. gfs2_setbit(rgd,
  812. bi->bi_clone + bi->bi_offset,
  813. bi->bi_len, blk, new_state);
  814. return bi->bi_start * GFS2_NBBY + blk;
  815. }
  816. /**
  817. * rgblk_free - Change alloc state of given block(s)
  818. * @sdp: the filesystem
  819. * @bstart: the start of a run of blocks to free
  820. * @blen: the length of the block run (all must lie within ONE RG!)
  821. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  822. *
  823. * Returns: Resource group containing the block(s)
  824. */
  825. static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, uint64_t bstart,
  826. uint32_t blen, unsigned char new_state)
  827. {
  828. struct gfs2_rgrpd *rgd;
  829. struct gfs2_bitmap *bi = NULL;
  830. uint32_t length, rgrp_blk, buf_blk;
  831. unsigned int buf;
  832. rgd = gfs2_blk2rgrpd(sdp, bstart);
  833. if (!rgd) {
  834. if (gfs2_consist(sdp))
  835. fs_err(sdp, "block = %llu\n", bstart);
  836. return NULL;
  837. }
  838. length = rgd->rd_ri.ri_length;
  839. rgrp_blk = bstart - rgd->rd_ri.ri_data0;
  840. while (blen--) {
  841. for (buf = 0; buf < length; buf++) {
  842. bi = rgd->rd_bits + buf;
  843. if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  844. break;
  845. }
  846. gfs2_assert(rgd->rd_sbd, buf < length);
  847. buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
  848. rgrp_blk++;
  849. if (!bi->bi_clone) {
  850. bi->bi_clone = kmalloc(bi->bi_bh->b_size,
  851. GFP_KERNEL | __GFP_NOFAIL);
  852. memcpy(bi->bi_clone + bi->bi_offset,
  853. bi->bi_bh->b_data + bi->bi_offset,
  854. bi->bi_len);
  855. }
  856. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  857. gfs2_setbit(rgd,
  858. bi->bi_bh->b_data + bi->bi_offset,
  859. bi->bi_len, buf_blk, new_state);
  860. }
  861. return rgd;
  862. }
  863. /**
  864. * gfs2_alloc_data - Allocate a data block
  865. * @ip: the inode to allocate the data block for
  866. *
  867. * Returns: the allocated block
  868. */
  869. uint64_t gfs2_alloc_data(struct gfs2_inode *ip)
  870. {
  871. struct gfs2_sbd *sdp = ip->i_sbd;
  872. struct gfs2_alloc *al = &ip->i_alloc;
  873. struct gfs2_rgrpd *rgd = al->al_rgd;
  874. uint32_t goal, blk;
  875. uint64_t block;
  876. if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
  877. goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
  878. else
  879. goal = rgd->rd_last_alloc_data;
  880. blk = rgblk_search(rgd, goal,
  881. GFS2_BLKST_FREE, GFS2_BLKST_USED);
  882. rgd->rd_last_alloc_data = blk;
  883. block = rgd->rd_ri.ri_data0 + blk;
  884. ip->i_di.di_goal_data = block;
  885. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  886. rgd->rd_rg.rg_free--;
  887. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  888. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  889. al->al_alloced++;
  890. gfs2_statfs_change(sdp, 0, -1, 0);
  891. gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
  892. spin_lock(&sdp->sd_rindex_spin);
  893. rgd->rd_free_clone--;
  894. spin_unlock(&sdp->sd_rindex_spin);
  895. return block;
  896. }
  897. /**
  898. * gfs2_alloc_meta - Allocate a metadata block
  899. * @ip: the inode to allocate the metadata block for
  900. *
  901. * Returns: the allocated block
  902. */
  903. uint64_t gfs2_alloc_meta(struct gfs2_inode *ip)
  904. {
  905. struct gfs2_sbd *sdp = ip->i_sbd;
  906. struct gfs2_alloc *al = &ip->i_alloc;
  907. struct gfs2_rgrpd *rgd = al->al_rgd;
  908. uint32_t goal, blk;
  909. uint64_t block;
  910. if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
  911. goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
  912. else
  913. goal = rgd->rd_last_alloc_meta;
  914. blk = rgblk_search(rgd, goal,
  915. GFS2_BLKST_FREE, GFS2_BLKST_USED);
  916. rgd->rd_last_alloc_meta = blk;
  917. block = rgd->rd_ri.ri_data0 + blk;
  918. ip->i_di.di_goal_meta = block;
  919. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  920. rgd->rd_rg.rg_free--;
  921. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  922. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  923. al->al_alloced++;
  924. gfs2_statfs_change(sdp, 0, -1, 0);
  925. gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
  926. gfs2_trans_add_unrevoke(sdp, block);
  927. spin_lock(&sdp->sd_rindex_spin);
  928. rgd->rd_free_clone--;
  929. spin_unlock(&sdp->sd_rindex_spin);
  930. return block;
  931. }
  932. /**
  933. * gfs2_alloc_di - Allocate a dinode
  934. * @dip: the directory that the inode is going in
  935. *
  936. * Returns: the block allocated
  937. */
  938. uint64_t gfs2_alloc_di(struct gfs2_inode *dip)
  939. {
  940. struct gfs2_sbd *sdp = dip->i_sbd;
  941. struct gfs2_alloc *al = &dip->i_alloc;
  942. struct gfs2_rgrpd *rgd = al->al_rgd;
  943. uint32_t blk;
  944. uint64_t block;
  945. blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
  946. GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
  947. rgd->rd_last_alloc_meta = blk;
  948. block = rgd->rd_ri.ri_data0 + blk;
  949. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  950. rgd->rd_rg.rg_free--;
  951. rgd->rd_rg.rg_dinodes++;
  952. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  953. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  954. al->al_alloced++;
  955. gfs2_statfs_change(sdp, 0, -1, +1);
  956. gfs2_trans_add_unrevoke(sdp, block);
  957. spin_lock(&sdp->sd_rindex_spin);
  958. rgd->rd_free_clone--;
  959. spin_unlock(&sdp->sd_rindex_spin);
  960. return block;
  961. }
  962. /**
  963. * gfs2_free_data - free a contiguous run of data block(s)
  964. * @ip: the inode these blocks are being freed from
  965. * @bstart: first block of a run of contiguous blocks
  966. * @blen: the length of the block run
  967. *
  968. */
  969. void gfs2_free_data(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
  970. {
  971. struct gfs2_sbd *sdp = ip->i_sbd;
  972. struct gfs2_rgrpd *rgd;
  973. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  974. if (!rgd)
  975. return;
  976. rgd->rd_rg.rg_free += blen;
  977. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  978. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  979. gfs2_trans_add_rg(rgd);
  980. gfs2_statfs_change(sdp, 0, +blen, 0);
  981. gfs2_quota_change(ip, -(int64_t)blen,
  982. ip->i_di.di_uid, ip->i_di.di_gid);
  983. }
  984. /**
  985. * gfs2_free_meta - free a contiguous run of data block(s)
  986. * @ip: the inode these blocks are being freed from
  987. * @bstart: first block of a run of contiguous blocks
  988. * @blen: the length of the block run
  989. *
  990. */
  991. void gfs2_free_meta(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
  992. {
  993. struct gfs2_sbd *sdp = ip->i_sbd;
  994. struct gfs2_rgrpd *rgd;
  995. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  996. if (!rgd)
  997. return;
  998. rgd->rd_rg.rg_free += blen;
  999. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1000. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1001. gfs2_trans_add_rg(rgd);
  1002. gfs2_statfs_change(sdp, 0, +blen, 0);
  1003. gfs2_quota_change(ip, -(int64_t)blen,
  1004. ip->i_di.di_uid, ip->i_di.di_gid);
  1005. gfs2_meta_wipe(ip, bstart, blen);
  1006. }
  1007. void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, uint64_t blkno)
  1008. {
  1009. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1010. struct gfs2_rgrpd *tmp_rgd;
  1011. tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
  1012. if (!tmp_rgd)
  1013. return;
  1014. gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
  1015. if (!rgd->rd_rg.rg_dinodes)
  1016. gfs2_consist_rgrpd(rgd);
  1017. rgd->rd_rg.rg_dinodes--;
  1018. rgd->rd_rg.rg_free++;
  1019. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1020. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1021. gfs2_statfs_change(sdp, 0, +1, -1);
  1022. gfs2_trans_add_rg(rgd);
  1023. }
  1024. /**
  1025. * gfs2_free_uninit_di - free a dinode block
  1026. * @rgd: the resource group that contains the dinode
  1027. * @ip: the inode
  1028. *
  1029. */
  1030. void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
  1031. {
  1032. gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
  1033. gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
  1034. gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
  1035. }
  1036. /**
  1037. * gfs2_rlist_add - add a RG to a list of RGs
  1038. * @sdp: the filesystem
  1039. * @rlist: the list of resource groups
  1040. * @block: the block
  1041. *
  1042. * Figure out what RG a block belongs to and add that RG to the list
  1043. *
  1044. * FIXME: Don't use NOFAIL
  1045. *
  1046. */
  1047. void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
  1048. uint64_t block)
  1049. {
  1050. struct gfs2_rgrpd *rgd;
  1051. struct gfs2_rgrpd **tmp;
  1052. unsigned int new_space;
  1053. unsigned int x;
  1054. if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
  1055. return;
  1056. rgd = gfs2_blk2rgrpd(sdp, block);
  1057. if (!rgd) {
  1058. if (gfs2_consist(sdp))
  1059. fs_err(sdp, "block = %llu\n", block);
  1060. return;
  1061. }
  1062. for (x = 0; x < rlist->rl_rgrps; x++)
  1063. if (rlist->rl_rgd[x] == rgd)
  1064. return;
  1065. if (rlist->rl_rgrps == rlist->rl_space) {
  1066. new_space = rlist->rl_space + 10;
  1067. tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
  1068. GFP_KERNEL | __GFP_NOFAIL);
  1069. if (rlist->rl_rgd) {
  1070. memcpy(tmp, rlist->rl_rgd,
  1071. rlist->rl_space * sizeof(struct gfs2_rgrpd *));
  1072. kfree(rlist->rl_rgd);
  1073. }
  1074. rlist->rl_space = new_space;
  1075. rlist->rl_rgd = tmp;
  1076. }
  1077. rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
  1078. }
  1079. /**
  1080. * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
  1081. * and initialize an array of glock holders for them
  1082. * @rlist: the list of resource groups
  1083. * @state: the lock state to acquire the RG lock in
  1084. * @flags: the modifier flags for the holder structures
  1085. *
  1086. * FIXME: Don't use NOFAIL
  1087. *
  1088. */
  1089. void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
  1090. int flags)
  1091. {
  1092. unsigned int x;
  1093. rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
  1094. GFP_KERNEL | __GFP_NOFAIL);
  1095. for (x = 0; x < rlist->rl_rgrps; x++)
  1096. gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
  1097. state, flags,
  1098. &rlist->rl_ghs[x]);
  1099. }
  1100. /**
  1101. * gfs2_rlist_free - free a resource group list
  1102. * @list: the list of resource groups
  1103. *
  1104. */
  1105. void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
  1106. {
  1107. unsigned int x;
  1108. kfree(rlist->rl_rgd);
  1109. if (rlist->rl_ghs) {
  1110. for (x = 0; x < rlist->rl_rgrps; x++)
  1111. gfs2_holder_uninit(&rlist->rl_ghs[x]);
  1112. kfree(rlist->rl_ghs);
  1113. }
  1114. }