rgrp.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/fs.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <asm/semaphore.h>
  17. #include "gfs2.h"
  18. #include "lm_interface.h"
  19. #include "incore.h"
  20. #include "glock.h"
  21. #include "glops.h"
  22. #include "lops.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "rgrp.h"
  26. #include "super.h"
  27. #include "trans.h"
  28. #include "ops_file.h"
  29. #include "util.h"
  30. #define BFITNOENT 0xFFFFFFFF
  31. /*
  32. * These routines are used by the resource group routines (rgrp.c)
  33. * to keep track of block allocation. Each block is represented by two
  34. * bits. One bit indicates whether or not the block is used. (1=used,
  35. * 0=free) The other bit indicates whether or not the block contains a
  36. * dinode or not. (1=dinode, 0=not-dinode) So, each byte represents
  37. * GFS2_NBBY (i.e. 4) blocks.
  38. */
  39. static const char valid_change[16] = {
  40. /* current */
  41. /* n */ 0, 1, 0, 1,
  42. /* e */ 1, 0, 0, 0,
  43. /* w */ 0, 0, 0, 0,
  44. 1, 0, 0, 0
  45. };
  46. /**
  47. * gfs2_setbit - Set a bit in the bitmaps
  48. * @buffer: the buffer that holds the bitmaps
  49. * @buflen: the length (in bytes) of the buffer
  50. * @block: the block to set
  51. * @new_state: the new state of the block
  52. *
  53. */
  54. void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
  55. unsigned int buflen, uint32_t block, unsigned char new_state)
  56. {
  57. unsigned char *byte, *end, cur_state;
  58. unsigned int bit;
  59. byte = buffer + (block / GFS2_NBBY);
  60. bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  61. end = buffer + buflen;
  62. gfs2_assert(rgd->rd_sbd, byte < end);
  63. cur_state = (*byte >> bit) & GFS2_BIT_MASK;
  64. if (valid_change[new_state * 4 + cur_state]) {
  65. *byte ^= cur_state << bit;
  66. *byte |= new_state << bit;
  67. } else
  68. gfs2_consist_rgrpd(rgd);
  69. }
  70. /**
  71. * gfs2_testbit - test a bit in the bitmaps
  72. * @buffer: the buffer that holds the bitmaps
  73. * @buflen: the length (in bytes) of the buffer
  74. * @block: the block to read
  75. *
  76. */
  77. unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
  78. unsigned int buflen, uint32_t block)
  79. {
  80. unsigned char *byte, *end, cur_state;
  81. unsigned int bit;
  82. byte = buffer + (block / GFS2_NBBY);
  83. bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  84. end = buffer + buflen;
  85. gfs2_assert(rgd->rd_sbd, byte < end);
  86. cur_state = (*byte >> bit) & GFS2_BIT_MASK;
  87. return cur_state;
  88. }
  89. /**
  90. * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
  91. * a block in a given allocation state.
  92. * @buffer: the buffer that holds the bitmaps
  93. * @buflen: the length (in bytes) of the buffer
  94. * @goal: start search at this block's bit-pair (within @buffer)
  95. * @old_state: GFS2_BLKST_XXX the state of the block we're looking for;
  96. * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0)
  97. *
  98. * Scope of @goal and returned block number is only within this bitmap buffer,
  99. * not entire rgrp or filesystem. @buffer will be offset from the actual
  100. * beginning of a bitmap block buffer, skipping any header structures.
  101. *
  102. * Return: the block number (bitmap buffer scope) that was found
  103. */
  104. uint32_t gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
  105. unsigned int buflen, uint32_t goal,
  106. unsigned char old_state)
  107. {
  108. unsigned char *byte, *end, alloc;
  109. uint32_t blk = goal;
  110. unsigned int bit;
  111. byte = buffer + (goal / GFS2_NBBY);
  112. bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
  113. end = buffer + buflen;
  114. alloc = (old_state & 1) ? 0 : 0x55;
  115. while (byte < end) {
  116. if ((*byte & 0x55) == alloc) {
  117. blk += (8 - bit) >> 1;
  118. bit = 0;
  119. byte++;
  120. continue;
  121. }
  122. if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
  123. return blk;
  124. bit += GFS2_BIT_SIZE;
  125. if (bit >= 8) {
  126. bit = 0;
  127. byte++;
  128. }
  129. blk++;
  130. }
  131. return BFITNOENT;
  132. }
  133. /**
  134. * gfs2_bitcount - count the number of bits in a certain state
  135. * @buffer: the buffer that holds the bitmaps
  136. * @buflen: the length (in bytes) of the buffer
  137. * @state: the state of the block we're looking for
  138. *
  139. * Returns: The number of bits
  140. */
  141. uint32_t gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer,
  142. unsigned int buflen, unsigned char state)
  143. {
  144. unsigned char *byte = buffer;
  145. unsigned char *end = buffer + buflen;
  146. unsigned char state1 = state << 2;
  147. unsigned char state2 = state << 4;
  148. unsigned char state3 = state << 6;
  149. uint32_t count = 0;
  150. for (; byte < end; byte++) {
  151. if (((*byte) & 0x03) == state)
  152. count++;
  153. if (((*byte) & 0x0C) == state1)
  154. count++;
  155. if (((*byte) & 0x30) == state2)
  156. count++;
  157. if (((*byte) & 0xC0) == state3)
  158. count++;
  159. }
  160. return count;
  161. }
  162. /**
  163. * gfs2_rgrp_verify - Verify that a resource group is consistent
  164. * @sdp: the filesystem
  165. * @rgd: the rgrp
  166. *
  167. */
  168. void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
  169. {
  170. struct gfs2_sbd *sdp = rgd->rd_sbd;
  171. struct gfs2_bitmap *bi = NULL;
  172. uint32_t length = rgd->rd_ri.ri_length;
  173. uint32_t count[4], tmp;
  174. int buf, x;
  175. memset(count, 0, 4 * sizeof(uint32_t));
  176. /* Count # blocks in each of 4 possible allocation states */
  177. for (buf = 0; buf < length; buf++) {
  178. bi = rgd->rd_bits + buf;
  179. for (x = 0; x < 4; x++)
  180. count[x] += gfs2_bitcount(rgd,
  181. bi->bi_bh->b_data +
  182. bi->bi_offset,
  183. bi->bi_len, x);
  184. }
  185. if (count[0] != rgd->rd_rg.rg_free) {
  186. if (gfs2_consist_rgrpd(rgd))
  187. fs_err(sdp, "free data mismatch: %u != %u\n",
  188. count[0], rgd->rd_rg.rg_free);
  189. return;
  190. }
  191. tmp = rgd->rd_ri.ri_data -
  192. rgd->rd_rg.rg_free -
  193. rgd->rd_rg.rg_dinodes;
  194. if (count[1] != tmp) {
  195. if (gfs2_consist_rgrpd(rgd))
  196. fs_err(sdp, "used data mismatch: %u != %u\n",
  197. count[1], tmp);
  198. return;
  199. }
  200. if (count[2]) {
  201. if (gfs2_consist_rgrpd(rgd))
  202. fs_err(sdp, "free metadata mismatch: %u != 0\n",
  203. count[2]);
  204. return;
  205. }
  206. if (count[3] != rgd->rd_rg.rg_dinodes) {
  207. if (gfs2_consist_rgrpd(rgd))
  208. fs_err(sdp, "used metadata mismatch: %u != %u\n",
  209. count[3], rgd->rd_rg.rg_dinodes);
  210. return;
  211. }
  212. }
  213. static inline int rgrp_contains_block(struct gfs2_rindex *ri, uint64_t block)
  214. {
  215. uint64_t first = ri->ri_data0;
  216. uint64_t last = first + ri->ri_data;
  217. return !!(first <= block && block < last);
  218. }
  219. /**
  220. * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  221. * @sdp: The GFS2 superblock
  222. * @n: The data block number
  223. *
  224. * Returns: The resource group, or NULL if not found
  225. */
  226. struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, uint64_t blk)
  227. {
  228. struct gfs2_rgrpd *rgd;
  229. spin_lock(&sdp->sd_rindex_spin);
  230. list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
  231. if (rgrp_contains_block(&rgd->rd_ri, blk)) {
  232. list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
  233. spin_unlock(&sdp->sd_rindex_spin);
  234. return rgd;
  235. }
  236. }
  237. spin_unlock(&sdp->sd_rindex_spin);
  238. return NULL;
  239. }
  240. /**
  241. * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
  242. * @sdp: The GFS2 superblock
  243. *
  244. * Returns: The first rgrp in the filesystem
  245. */
  246. struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
  247. {
  248. gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
  249. return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
  250. }
  251. /**
  252. * gfs2_rgrpd_get_next - get the next RG
  253. * @rgd: A RG
  254. *
  255. * Returns: The next rgrp
  256. */
  257. struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
  258. {
  259. if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
  260. return NULL;
  261. return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
  262. }
  263. static void clear_rgrpdi(struct gfs2_sbd *sdp)
  264. {
  265. struct list_head *head;
  266. struct gfs2_rgrpd *rgd;
  267. struct gfs2_glock *gl;
  268. spin_lock(&sdp->sd_rindex_spin);
  269. sdp->sd_rindex_forward = NULL;
  270. head = &sdp->sd_rindex_recent_list;
  271. while (!list_empty(head)) {
  272. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
  273. list_del(&rgd->rd_recent);
  274. }
  275. spin_unlock(&sdp->sd_rindex_spin);
  276. head = &sdp->sd_rindex_list;
  277. while (!list_empty(head)) {
  278. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
  279. gl = rgd->rd_gl;
  280. list_del(&rgd->rd_list);
  281. list_del(&rgd->rd_list_mru);
  282. if (gl) {
  283. gl->gl_object = NULL;
  284. gfs2_glock_put(gl);
  285. }
  286. kfree(rgd->rd_bits);
  287. kfree(rgd);
  288. }
  289. }
  290. void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
  291. {
  292. mutex_lock(&sdp->sd_rindex_mutex);
  293. clear_rgrpdi(sdp);
  294. mutex_unlock(&sdp->sd_rindex_mutex);
  295. }
  296. /**
  297. * gfs2_compute_bitstructs - Compute the bitmap sizes
  298. * @rgd: The resource group descriptor
  299. *
  300. * Calculates bitmap descriptors, one for each block that contains bitmap data
  301. *
  302. * Returns: errno
  303. */
  304. static int compute_bitstructs(struct gfs2_rgrpd *rgd)
  305. {
  306. struct gfs2_sbd *sdp = rgd->rd_sbd;
  307. struct gfs2_bitmap *bi;
  308. uint32_t length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
  309. uint32_t bytes_left, bytes;
  310. int x;
  311. rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_KERNEL);
  312. if (!rgd->rd_bits)
  313. return -ENOMEM;
  314. bytes_left = rgd->rd_ri.ri_bitbytes;
  315. for (x = 0; x < length; x++) {
  316. bi = rgd->rd_bits + x;
  317. /* small rgrp; bitmap stored completely in header block */
  318. if (length == 1) {
  319. bytes = bytes_left;
  320. bi->bi_offset = sizeof(struct gfs2_rgrp);
  321. bi->bi_start = 0;
  322. bi->bi_len = bytes;
  323. /* header block */
  324. } else if (x == 0) {
  325. bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
  326. bi->bi_offset = sizeof(struct gfs2_rgrp);
  327. bi->bi_start = 0;
  328. bi->bi_len = bytes;
  329. /* last block */
  330. } else if (x + 1 == length) {
  331. bytes = bytes_left;
  332. bi->bi_offset = sizeof(struct gfs2_meta_header);
  333. bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
  334. bi->bi_len = bytes;
  335. /* other blocks */
  336. } else {
  337. bytes = sdp->sd_sb.sb_bsize -
  338. sizeof(struct gfs2_meta_header);
  339. bi->bi_offset = sizeof(struct gfs2_meta_header);
  340. bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
  341. bi->bi_len = bytes;
  342. }
  343. bytes_left -= bytes;
  344. }
  345. if (bytes_left) {
  346. gfs2_consist_rgrpd(rgd);
  347. return -EIO;
  348. }
  349. bi = rgd->rd_bits + (length - 1);
  350. if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
  351. if (gfs2_consist_rgrpd(rgd)) {
  352. gfs2_rindex_print(&rgd->rd_ri);
  353. fs_err(sdp, "start=%u len=%u offset=%u\n",
  354. bi->bi_start, bi->bi_len, bi->bi_offset);
  355. }
  356. return -EIO;
  357. }
  358. return 0;
  359. }
  360. /**
  361. * gfs2_ri_update - Pull in a new resource index from the disk
  362. * @gl: The glock covering the rindex inode
  363. *
  364. * Returns: 0 on successful update, error code otherwise
  365. */
  366. static int gfs2_ri_update(struct gfs2_inode *ip)
  367. {
  368. struct gfs2_sbd *sdp = ip->i_sbd;
  369. struct inode *inode = ip->i_vnode;
  370. struct gfs2_rgrpd *rgd;
  371. char buf[sizeof(struct gfs2_rindex)];
  372. struct file_ra_state ra_state;
  373. uint64_t junk = ip->i_di.di_size;
  374. int error;
  375. if (do_div(junk, sizeof(struct gfs2_rindex))) {
  376. gfs2_consist_inode(ip);
  377. return -EIO;
  378. }
  379. clear_rgrpdi(sdp);
  380. file_ra_state_init(&ra_state, inode->i_mapping);
  381. for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
  382. loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
  383. error = gfs2_internal_read(ip, &ra_state, buf, &pos,
  384. sizeof(struct gfs2_rindex));
  385. if (!error)
  386. break;
  387. if (error != sizeof(struct gfs2_rindex)) {
  388. if (error > 0)
  389. error = -EIO;
  390. goto fail;
  391. }
  392. rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_KERNEL);
  393. error = -ENOMEM;
  394. if (!rgd)
  395. goto fail;
  396. mutex_init(&rgd->rd_mutex);
  397. lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
  398. rgd->rd_sbd = sdp;
  399. list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
  400. list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
  401. gfs2_rindex_in(&rgd->rd_ri, buf);
  402. error = compute_bitstructs(rgd);
  403. if (error)
  404. goto fail;
  405. error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
  406. &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
  407. if (error)
  408. goto fail;
  409. rgd->rd_gl->gl_object = rgd;
  410. rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
  411. }
  412. sdp->sd_rindex_vn = ip->i_gl->gl_vn;
  413. return 0;
  414. fail:
  415. clear_rgrpdi(sdp);
  416. return error;
  417. }
  418. /**
  419. * gfs2_rindex_hold - Grab a lock on the rindex
  420. * @sdp: The GFS2 superblock
  421. * @ri_gh: the glock holder
  422. *
  423. * We grab a lock on the rindex inode to make sure that it doesn't
  424. * change whilst we are performing an operation. We keep this lock
  425. * for quite long periods of time compared to other locks. This
  426. * doesn't matter, since it is shared and it is very, very rarely
  427. * accessed in the exclusive mode (i.e. only when expanding the filesystem).
  428. *
  429. * This makes sure that we're using the latest copy of the resource index
  430. * special file, which might have been updated if someone expanded the
  431. * filesystem (via gfs2_grow utility), which adds new resource groups.
  432. *
  433. * Returns: 0 on success, error code otherwise
  434. */
  435. int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
  436. {
  437. struct gfs2_inode *ip = sdp->sd_rindex->u.generic_ip;
  438. struct gfs2_glock *gl = ip->i_gl;
  439. int error;
  440. error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
  441. if (error)
  442. return error;
  443. /* Read new copy from disk if we don't have the latest */
  444. if (sdp->sd_rindex_vn != gl->gl_vn) {
  445. mutex_lock(&sdp->sd_rindex_mutex);
  446. if (sdp->sd_rindex_vn != gl->gl_vn) {
  447. error = gfs2_ri_update(ip);
  448. if (error)
  449. gfs2_glock_dq_uninit(ri_gh);
  450. }
  451. mutex_unlock(&sdp->sd_rindex_mutex);
  452. }
  453. return error;
  454. }
  455. /**
  456. * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
  457. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  458. *
  459. * Read in all of a Resource Group's header and bitmap blocks.
  460. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
  461. *
  462. * Returns: errno
  463. */
  464. int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
  465. {
  466. struct gfs2_sbd *sdp = rgd->rd_sbd;
  467. struct gfs2_glock *gl = rgd->rd_gl;
  468. unsigned int length = rgd->rd_ri.ri_length;
  469. struct gfs2_bitmap *bi;
  470. unsigned int x, y;
  471. int error;
  472. mutex_lock(&rgd->rd_mutex);
  473. spin_lock(&sdp->sd_rindex_spin);
  474. if (rgd->rd_bh_count) {
  475. rgd->rd_bh_count++;
  476. spin_unlock(&sdp->sd_rindex_spin);
  477. mutex_unlock(&rgd->rd_mutex);
  478. return 0;
  479. }
  480. spin_unlock(&sdp->sd_rindex_spin);
  481. for (x = 0; x < length; x++) {
  482. bi = rgd->rd_bits + x;
  483. error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, DIO_START,
  484. &bi->bi_bh);
  485. if (error)
  486. goto fail;
  487. }
  488. for (y = length; y--;) {
  489. bi = rgd->rd_bits + y;
  490. error = gfs2_meta_reread(sdp, bi->bi_bh, DIO_WAIT);
  491. if (error)
  492. goto fail;
  493. if (gfs2_metatype_check(sdp, bi->bi_bh,
  494. (y) ? GFS2_METATYPE_RB :
  495. GFS2_METATYPE_RG)) {
  496. error = -EIO;
  497. goto fail;
  498. }
  499. }
  500. if (rgd->rd_rg_vn != gl->gl_vn) {
  501. gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
  502. rgd->rd_rg_vn = gl->gl_vn;
  503. }
  504. spin_lock(&sdp->sd_rindex_spin);
  505. rgd->rd_free_clone = rgd->rd_rg.rg_free;
  506. rgd->rd_bh_count++;
  507. spin_unlock(&sdp->sd_rindex_spin);
  508. mutex_unlock(&rgd->rd_mutex);
  509. return 0;
  510. fail:
  511. while (x--) {
  512. bi = rgd->rd_bits + x;
  513. brelse(bi->bi_bh);
  514. bi->bi_bh = NULL;
  515. gfs2_assert_warn(sdp, !bi->bi_clone);
  516. }
  517. mutex_unlock(&rgd->rd_mutex);
  518. return error;
  519. }
  520. void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
  521. {
  522. struct gfs2_sbd *sdp = rgd->rd_sbd;
  523. spin_lock(&sdp->sd_rindex_spin);
  524. gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
  525. rgd->rd_bh_count++;
  526. spin_unlock(&sdp->sd_rindex_spin);
  527. }
  528. /**
  529. * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
  530. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  531. *
  532. */
  533. void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
  534. {
  535. struct gfs2_sbd *sdp = rgd->rd_sbd;
  536. int x, length = rgd->rd_ri.ri_length;
  537. spin_lock(&sdp->sd_rindex_spin);
  538. gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
  539. if (--rgd->rd_bh_count) {
  540. spin_unlock(&sdp->sd_rindex_spin);
  541. return;
  542. }
  543. for (x = 0; x < length; x++) {
  544. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  545. kfree(bi->bi_clone);
  546. bi->bi_clone = NULL;
  547. brelse(bi->bi_bh);
  548. bi->bi_bh = NULL;
  549. }
  550. spin_unlock(&sdp->sd_rindex_spin);
  551. }
  552. void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
  553. {
  554. struct gfs2_sbd *sdp = rgd->rd_sbd;
  555. unsigned int length = rgd->rd_ri.ri_length;
  556. unsigned int x;
  557. for (x = 0; x < length; x++) {
  558. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  559. if (!bi->bi_clone)
  560. continue;
  561. memcpy(bi->bi_clone + bi->bi_offset,
  562. bi->bi_bh->b_data + bi->bi_offset,
  563. bi->bi_len);
  564. }
  565. spin_lock(&sdp->sd_rindex_spin);
  566. rgd->rd_free_clone = rgd->rd_rg.rg_free;
  567. spin_unlock(&sdp->sd_rindex_spin);
  568. }
  569. /**
  570. * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
  571. * @ip: the incore GFS2 inode structure
  572. *
  573. * Returns: the struct gfs2_alloc
  574. */
  575. struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
  576. {
  577. struct gfs2_alloc *al = &ip->i_alloc;
  578. /* FIXME: Should assert that the correct locks are held here... */
  579. memset(al, 0, sizeof(*al));
  580. return al;
  581. }
  582. /**
  583. * gfs2_alloc_put - throw away the struct gfs2_alloc for an inode
  584. * @ip: the inode
  585. *
  586. */
  587. void gfs2_alloc_put(struct gfs2_inode *ip)
  588. {
  589. return;
  590. }
  591. /**
  592. * try_rgrp_fit - See if a given reservation will fit in a given RG
  593. * @rgd: the RG data
  594. * @al: the struct gfs2_alloc structure describing the reservation
  595. *
  596. * If there's room for the requested blocks to be allocated from the RG:
  597. * Sets the $al_reserved_data field in @al.
  598. * Sets the $al_reserved_meta field in @al.
  599. * Sets the $al_rgd field in @al.
  600. *
  601. * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
  602. */
  603. static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
  604. {
  605. struct gfs2_sbd *sdp = rgd->rd_sbd;
  606. int ret = 0;
  607. spin_lock(&sdp->sd_rindex_spin);
  608. if (rgd->rd_free_clone >= al->al_requested) {
  609. al->al_rgd = rgd;
  610. ret = 1;
  611. }
  612. spin_unlock(&sdp->sd_rindex_spin);
  613. return ret;
  614. }
  615. /**
  616. * recent_rgrp_first - get first RG from "recent" list
  617. * @sdp: The GFS2 superblock
  618. * @rglast: address of the rgrp used last
  619. *
  620. * Returns: The first rgrp in the recent list
  621. */
  622. static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
  623. uint64_t rglast)
  624. {
  625. struct gfs2_rgrpd *rgd = NULL;
  626. spin_lock(&sdp->sd_rindex_spin);
  627. if (list_empty(&sdp->sd_rindex_recent_list))
  628. goto out;
  629. if (!rglast)
  630. goto first;
  631. list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
  632. if (rgd->rd_ri.ri_addr == rglast)
  633. goto out;
  634. }
  635. first:
  636. rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
  637. rd_recent);
  638. out:
  639. spin_unlock(&sdp->sd_rindex_spin);
  640. return rgd;
  641. }
  642. /**
  643. * recent_rgrp_next - get next RG from "recent" list
  644. * @cur_rgd: current rgrp
  645. * @remove:
  646. *
  647. * Returns: The next rgrp in the recent list
  648. */
  649. static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
  650. int remove)
  651. {
  652. struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
  653. struct list_head *head;
  654. struct gfs2_rgrpd *rgd;
  655. spin_lock(&sdp->sd_rindex_spin);
  656. head = &sdp->sd_rindex_recent_list;
  657. list_for_each_entry(rgd, head, rd_recent) {
  658. if (rgd == cur_rgd) {
  659. if (cur_rgd->rd_recent.next != head)
  660. rgd = list_entry(cur_rgd->rd_recent.next,
  661. struct gfs2_rgrpd, rd_recent);
  662. else
  663. rgd = NULL;
  664. if (remove)
  665. list_del(&cur_rgd->rd_recent);
  666. goto out;
  667. }
  668. }
  669. rgd = NULL;
  670. if (!list_empty(head))
  671. rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
  672. out:
  673. spin_unlock(&sdp->sd_rindex_spin);
  674. return rgd;
  675. }
  676. /**
  677. * recent_rgrp_add - add an RG to tail of "recent" list
  678. * @new_rgd: The rgrp to add
  679. *
  680. */
  681. static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
  682. {
  683. struct gfs2_sbd *sdp = new_rgd->rd_sbd;
  684. struct gfs2_rgrpd *rgd;
  685. unsigned int count = 0;
  686. unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
  687. spin_lock(&sdp->sd_rindex_spin);
  688. list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
  689. if (rgd == new_rgd)
  690. goto out;
  691. if (++count >= max)
  692. goto out;
  693. }
  694. list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
  695. out:
  696. spin_unlock(&sdp->sd_rindex_spin);
  697. }
  698. /**
  699. * forward_rgrp_get - get an rgrp to try next from full list
  700. * @sdp: The GFS2 superblock
  701. *
  702. * Returns: The rgrp to try next
  703. */
  704. static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
  705. {
  706. struct gfs2_rgrpd *rgd;
  707. unsigned int journals = gfs2_jindex_size(sdp);
  708. unsigned int rg = 0, x;
  709. spin_lock(&sdp->sd_rindex_spin);
  710. rgd = sdp->sd_rindex_forward;
  711. if (!rgd) {
  712. if (sdp->sd_rgrps >= journals)
  713. rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
  714. for (x = 0, rgd = gfs2_rgrpd_get_first(sdp);
  715. x < rg;
  716. x++, rgd = gfs2_rgrpd_get_next(rgd))
  717. /* Do Nothing */;
  718. sdp->sd_rindex_forward = rgd;
  719. }
  720. spin_unlock(&sdp->sd_rindex_spin);
  721. return rgd;
  722. }
  723. /**
  724. * forward_rgrp_set - set the forward rgrp pointer
  725. * @sdp: the filesystem
  726. * @rgd: The new forward rgrp
  727. *
  728. */
  729. static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
  730. {
  731. spin_lock(&sdp->sd_rindex_spin);
  732. sdp->sd_rindex_forward = rgd;
  733. spin_unlock(&sdp->sd_rindex_spin);
  734. }
  735. /**
  736. * get_local_rgrp - Choose and lock a rgrp for allocation
  737. * @ip: the inode to reserve space for
  738. * @rgp: the chosen and locked rgrp
  739. *
  740. * Try to acquire rgrp in way which avoids contending with others.
  741. *
  742. * Returns: errno
  743. */
  744. static int get_local_rgrp(struct gfs2_inode *ip)
  745. {
  746. struct gfs2_sbd *sdp = ip->i_sbd;
  747. struct gfs2_rgrpd *rgd, *begin = NULL;
  748. struct gfs2_alloc *al = &ip->i_alloc;
  749. int flags = LM_FLAG_TRY;
  750. int skipped = 0;
  751. int loops = 0;
  752. int error;
  753. /* Try recently successful rgrps */
  754. rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
  755. while (rgd) {
  756. error = gfs2_glock_nq_init(rgd->rd_gl,
  757. LM_ST_EXCLUSIVE, LM_FLAG_TRY,
  758. &al->al_rgd_gh);
  759. switch (error) {
  760. case 0:
  761. if (try_rgrp_fit(rgd, al))
  762. goto out;
  763. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  764. rgd = recent_rgrp_next(rgd, 1);
  765. break;
  766. case GLR_TRYFAILED:
  767. rgd = recent_rgrp_next(rgd, 0);
  768. break;
  769. default:
  770. return error;
  771. }
  772. }
  773. /* Go through full list of rgrps */
  774. begin = rgd = forward_rgrp_get(sdp);
  775. for (;;) {
  776. error = gfs2_glock_nq_init(rgd->rd_gl,
  777. LM_ST_EXCLUSIVE, flags,
  778. &al->al_rgd_gh);
  779. switch (error) {
  780. case 0:
  781. if (try_rgrp_fit(rgd, al))
  782. goto out;
  783. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  784. break;
  785. case GLR_TRYFAILED:
  786. skipped++;
  787. break;
  788. default:
  789. return error;
  790. }
  791. rgd = gfs2_rgrpd_get_next(rgd);
  792. if (!rgd)
  793. rgd = gfs2_rgrpd_get_first(sdp);
  794. if (rgd == begin) {
  795. if (++loops >= 2 || !skipped)
  796. return -ENOSPC;
  797. flags = 0;
  798. }
  799. }
  800. out:
  801. ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
  802. if (begin) {
  803. recent_rgrp_add(rgd);
  804. rgd = gfs2_rgrpd_get_next(rgd);
  805. if (!rgd)
  806. rgd = gfs2_rgrpd_get_first(sdp);
  807. forward_rgrp_set(sdp, rgd);
  808. }
  809. return 0;
  810. }
  811. /**
  812. * gfs2_inplace_reserve_i - Reserve space in the filesystem
  813. * @ip: the inode to reserve space for
  814. *
  815. * Returns: errno
  816. */
  817. int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
  818. {
  819. struct gfs2_sbd *sdp = ip->i_sbd;
  820. struct gfs2_alloc *al = &ip->i_alloc;
  821. int error;
  822. if (gfs2_assert_warn(sdp, al->al_requested))
  823. return -EINVAL;
  824. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  825. if (error)
  826. return error;
  827. error = get_local_rgrp(ip);
  828. if (error) {
  829. gfs2_glock_dq_uninit(&al->al_ri_gh);
  830. return error;
  831. }
  832. al->al_file = file;
  833. al->al_line = line;
  834. return 0;
  835. }
  836. /**
  837. * gfs2_inplace_release - release an inplace reservation
  838. * @ip: the inode the reservation was taken out on
  839. *
  840. * Release a reservation made by gfs2_inplace_reserve().
  841. */
  842. void gfs2_inplace_release(struct gfs2_inode *ip)
  843. {
  844. struct gfs2_sbd *sdp = ip->i_sbd;
  845. struct gfs2_alloc *al = &ip->i_alloc;
  846. if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
  847. fs_warn(sdp, "al_alloced = %u, al_requested = %u "
  848. "al_file = %s, al_line = %u\n",
  849. al->al_alloced, al->al_requested, al->al_file,
  850. al->al_line);
  851. al->al_rgd = NULL;
  852. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  853. gfs2_glock_dq_uninit(&al->al_ri_gh);
  854. }
  855. /**
  856. * gfs2_get_block_type - Check a block in a RG is of given type
  857. * @rgd: the resource group holding the block
  858. * @block: the block number
  859. *
  860. * Returns: The block type (GFS2_BLKST_*)
  861. */
  862. unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, uint64_t block)
  863. {
  864. struct gfs2_bitmap *bi = NULL;
  865. uint32_t length, rgrp_block, buf_block;
  866. unsigned int buf;
  867. unsigned char type;
  868. length = rgd->rd_ri.ri_length;
  869. rgrp_block = block - rgd->rd_ri.ri_data0;
  870. for (buf = 0; buf < length; buf++) {
  871. bi = rgd->rd_bits + buf;
  872. if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  873. break;
  874. }
  875. gfs2_assert(rgd->rd_sbd, buf < length);
  876. buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
  877. type = gfs2_testbit(rgd,
  878. bi->bi_bh->b_data + bi->bi_offset,
  879. bi->bi_len, buf_block);
  880. return type;
  881. }
  882. /**
  883. * rgblk_search - find a block in @old_state, change allocation
  884. * state to @new_state
  885. * @rgd: the resource group descriptor
  886. * @goal: the goal block within the RG (start here to search for avail block)
  887. * @old_state: GFS2_BLKST_XXX the before-allocation state to find
  888. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  889. *
  890. * Walk rgrp's bitmap to find bits that represent a block in @old_state.
  891. * Add the found bitmap buffer to the transaction.
  892. * Set the found bits to @new_state to change block's allocation state.
  893. *
  894. * This function never fails, because we wouldn't call it unless we
  895. * know (from reservation results, etc.) that a block is available.
  896. *
  897. * Scope of @goal and returned block is just within rgrp, not the whole
  898. * filesystem.
  899. *
  900. * Returns: the block number allocated
  901. */
  902. static uint32_t rgblk_search(struct gfs2_rgrpd *rgd, uint32_t goal,
  903. unsigned char old_state, unsigned char new_state)
  904. {
  905. struct gfs2_bitmap *bi = NULL;
  906. uint32_t length = rgd->rd_ri.ri_length;
  907. uint32_t blk = 0;
  908. unsigned int buf, x;
  909. /* Find bitmap block that contains bits for goal block */
  910. for (buf = 0; buf < length; buf++) {
  911. bi = rgd->rd_bits + buf;
  912. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  913. break;
  914. }
  915. gfs2_assert(rgd->rd_sbd, buf < length);
  916. /* Convert scope of "goal" from rgrp-wide to within found bit block */
  917. goal -= bi->bi_start * GFS2_NBBY;
  918. /* Search (up to entire) bitmap in this rgrp for allocatable block.
  919. "x <= length", instead of "x < length", because we typically start
  920. the search in the middle of a bit block, but if we can't find an
  921. allocatable block anywhere else, we want to be able wrap around and
  922. search in the first part of our first-searched bit block. */
  923. for (x = 0; x <= length; x++) {
  924. if (bi->bi_clone)
  925. blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset,
  926. bi->bi_len, goal, old_state);
  927. else
  928. blk = gfs2_bitfit(rgd,
  929. bi->bi_bh->b_data + bi->bi_offset,
  930. bi->bi_len, goal, old_state);
  931. if (blk != BFITNOENT)
  932. break;
  933. /* Try next bitmap block (wrap back to rgrp header if at end) */
  934. buf = (buf + 1) % length;
  935. bi = rgd->rd_bits + buf;
  936. goal = 0;
  937. }
  938. if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
  939. blk = 0;
  940. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  941. gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
  942. bi->bi_len, blk, new_state);
  943. if (bi->bi_clone)
  944. gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset,
  945. bi->bi_len, blk, new_state);
  946. return bi->bi_start * GFS2_NBBY + blk;
  947. }
  948. /**
  949. * rgblk_free - Change alloc state of given block(s)
  950. * @sdp: the filesystem
  951. * @bstart: the start of a run of blocks to free
  952. * @blen: the length of the block run (all must lie within ONE RG!)
  953. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  954. *
  955. * Returns: Resource group containing the block(s)
  956. */
  957. static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, uint64_t bstart,
  958. uint32_t blen, unsigned char new_state)
  959. {
  960. struct gfs2_rgrpd *rgd;
  961. struct gfs2_bitmap *bi = NULL;
  962. uint32_t length, rgrp_blk, buf_blk;
  963. unsigned int buf;
  964. rgd = gfs2_blk2rgrpd(sdp, bstart);
  965. if (!rgd) {
  966. if (gfs2_consist(sdp))
  967. fs_err(sdp, "block = %llu\n", bstart);
  968. return NULL;
  969. }
  970. length = rgd->rd_ri.ri_length;
  971. rgrp_blk = bstart - rgd->rd_ri.ri_data0;
  972. while (blen--) {
  973. for (buf = 0; buf < length; buf++) {
  974. bi = rgd->rd_bits + buf;
  975. if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  976. break;
  977. }
  978. gfs2_assert(rgd->rd_sbd, buf < length);
  979. buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
  980. rgrp_blk++;
  981. if (!bi->bi_clone) {
  982. bi->bi_clone = kmalloc(bi->bi_bh->b_size,
  983. GFP_KERNEL | __GFP_NOFAIL);
  984. memcpy(bi->bi_clone + bi->bi_offset,
  985. bi->bi_bh->b_data + bi->bi_offset,
  986. bi->bi_len);
  987. }
  988. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  989. gfs2_setbit(rgd,
  990. bi->bi_bh->b_data + bi->bi_offset,
  991. bi->bi_len, buf_blk, new_state);
  992. }
  993. return rgd;
  994. }
  995. /**
  996. * gfs2_alloc_data - Allocate a data block
  997. * @ip: the inode to allocate the data block for
  998. *
  999. * Returns: the allocated block
  1000. */
  1001. uint64_t gfs2_alloc_data(struct gfs2_inode *ip)
  1002. {
  1003. struct gfs2_sbd *sdp = ip->i_sbd;
  1004. struct gfs2_alloc *al = &ip->i_alloc;
  1005. struct gfs2_rgrpd *rgd = al->al_rgd;
  1006. uint32_t goal, blk;
  1007. uint64_t block;
  1008. if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
  1009. goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
  1010. else
  1011. goal = rgd->rd_last_alloc_data;
  1012. blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
  1013. rgd->rd_last_alloc_data = blk;
  1014. block = rgd->rd_ri.ri_data0 + blk;
  1015. ip->i_di.di_goal_data = block;
  1016. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  1017. rgd->rd_rg.rg_free--;
  1018. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1019. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1020. al->al_alloced++;
  1021. gfs2_statfs_change(sdp, 0, -1, 0);
  1022. gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
  1023. spin_lock(&sdp->sd_rindex_spin);
  1024. rgd->rd_free_clone--;
  1025. spin_unlock(&sdp->sd_rindex_spin);
  1026. return block;
  1027. }
  1028. /**
  1029. * gfs2_alloc_meta - Allocate a metadata block
  1030. * @ip: the inode to allocate the metadata block for
  1031. *
  1032. * Returns: the allocated block
  1033. */
  1034. uint64_t gfs2_alloc_meta(struct gfs2_inode *ip)
  1035. {
  1036. struct gfs2_sbd *sdp = ip->i_sbd;
  1037. struct gfs2_alloc *al = &ip->i_alloc;
  1038. struct gfs2_rgrpd *rgd = al->al_rgd;
  1039. uint32_t goal, blk;
  1040. uint64_t block;
  1041. if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
  1042. goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
  1043. else
  1044. goal = rgd->rd_last_alloc_meta;
  1045. blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
  1046. rgd->rd_last_alloc_meta = blk;
  1047. block = rgd->rd_ri.ri_data0 + blk;
  1048. ip->i_di.di_goal_meta = block;
  1049. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  1050. rgd->rd_rg.rg_free--;
  1051. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1052. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1053. al->al_alloced++;
  1054. gfs2_statfs_change(sdp, 0, -1, 0);
  1055. gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
  1056. gfs2_trans_add_unrevoke(sdp, block);
  1057. spin_lock(&sdp->sd_rindex_spin);
  1058. rgd->rd_free_clone--;
  1059. spin_unlock(&sdp->sd_rindex_spin);
  1060. return block;
  1061. }
  1062. /**
  1063. * gfs2_alloc_di - Allocate a dinode
  1064. * @dip: the directory that the inode is going in
  1065. *
  1066. * Returns: the block allocated
  1067. */
  1068. uint64_t gfs2_alloc_di(struct gfs2_inode *dip)
  1069. {
  1070. struct gfs2_sbd *sdp = dip->i_sbd;
  1071. struct gfs2_alloc *al = &dip->i_alloc;
  1072. struct gfs2_rgrpd *rgd = al->al_rgd;
  1073. uint32_t blk;
  1074. uint64_t block;
  1075. blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
  1076. GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
  1077. rgd->rd_last_alloc_meta = blk;
  1078. block = rgd->rd_ri.ri_data0 + blk;
  1079. gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
  1080. rgd->rd_rg.rg_free--;
  1081. rgd->rd_rg.rg_dinodes++;
  1082. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1083. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1084. al->al_alloced++;
  1085. gfs2_statfs_change(sdp, 0, -1, +1);
  1086. gfs2_trans_add_unrevoke(sdp, block);
  1087. spin_lock(&sdp->sd_rindex_spin);
  1088. rgd->rd_free_clone--;
  1089. spin_unlock(&sdp->sd_rindex_spin);
  1090. return block;
  1091. }
  1092. /**
  1093. * gfs2_free_data - free a contiguous run of data block(s)
  1094. * @ip: the inode these blocks are being freed from
  1095. * @bstart: first block of a run of contiguous blocks
  1096. * @blen: the length of the block run
  1097. *
  1098. */
  1099. void gfs2_free_data(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
  1100. {
  1101. struct gfs2_sbd *sdp = ip->i_sbd;
  1102. struct gfs2_rgrpd *rgd;
  1103. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  1104. if (!rgd)
  1105. return;
  1106. rgd->rd_rg.rg_free += blen;
  1107. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1108. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1109. gfs2_trans_add_rg(rgd);
  1110. gfs2_statfs_change(sdp, 0, +blen, 0);
  1111. gfs2_quota_change(ip, -(int64_t)blen,
  1112. ip->i_di.di_uid, ip->i_di.di_gid);
  1113. }
  1114. /**
  1115. * gfs2_free_meta - free a contiguous run of data block(s)
  1116. * @ip: the inode these blocks are being freed from
  1117. * @bstart: first block of a run of contiguous blocks
  1118. * @blen: the length of the block run
  1119. *
  1120. */
  1121. void gfs2_free_meta(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
  1122. {
  1123. struct gfs2_sbd *sdp = ip->i_sbd;
  1124. struct gfs2_rgrpd *rgd;
  1125. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  1126. if (!rgd)
  1127. return;
  1128. rgd->rd_rg.rg_free += blen;
  1129. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1130. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1131. gfs2_trans_add_rg(rgd);
  1132. gfs2_statfs_change(sdp, 0, +blen, 0);
  1133. gfs2_quota_change(ip, -(int64_t)blen,
  1134. ip->i_di.di_uid, ip->i_di.di_gid);
  1135. gfs2_meta_wipe(ip, bstart, blen);
  1136. }
  1137. void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, uint64_t blkno)
  1138. {
  1139. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1140. struct gfs2_rgrpd *tmp_rgd;
  1141. tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
  1142. if (!tmp_rgd)
  1143. return;
  1144. gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
  1145. if (!rgd->rd_rg.rg_dinodes)
  1146. gfs2_consist_rgrpd(rgd);
  1147. rgd->rd_rg.rg_dinodes--;
  1148. rgd->rd_rg.rg_free++;
  1149. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1150. gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
  1151. gfs2_statfs_change(sdp, 0, +1, -1);
  1152. gfs2_trans_add_rg(rgd);
  1153. }
  1154. /**
  1155. * gfs2_free_uninit_di - free a dinode block
  1156. * @rgd: the resource group that contains the dinode
  1157. * @ip: the inode
  1158. *
  1159. */
  1160. void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
  1161. {
  1162. gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
  1163. gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
  1164. gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
  1165. }
  1166. /**
  1167. * gfs2_rlist_add - add a RG to a list of RGs
  1168. * @sdp: the filesystem
  1169. * @rlist: the list of resource groups
  1170. * @block: the block
  1171. *
  1172. * Figure out what RG a block belongs to and add that RG to the list
  1173. *
  1174. * FIXME: Don't use NOFAIL
  1175. *
  1176. */
  1177. void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
  1178. uint64_t block)
  1179. {
  1180. struct gfs2_rgrpd *rgd;
  1181. struct gfs2_rgrpd **tmp;
  1182. unsigned int new_space;
  1183. unsigned int x;
  1184. if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
  1185. return;
  1186. rgd = gfs2_blk2rgrpd(sdp, block);
  1187. if (!rgd) {
  1188. if (gfs2_consist(sdp))
  1189. fs_err(sdp, "block = %llu\n", block);
  1190. return;
  1191. }
  1192. for (x = 0; x < rlist->rl_rgrps; x++)
  1193. if (rlist->rl_rgd[x] == rgd)
  1194. return;
  1195. if (rlist->rl_rgrps == rlist->rl_space) {
  1196. new_space = rlist->rl_space + 10;
  1197. tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
  1198. GFP_KERNEL | __GFP_NOFAIL);
  1199. if (rlist->rl_rgd) {
  1200. memcpy(tmp, rlist->rl_rgd,
  1201. rlist->rl_space * sizeof(struct gfs2_rgrpd *));
  1202. kfree(rlist->rl_rgd);
  1203. }
  1204. rlist->rl_space = new_space;
  1205. rlist->rl_rgd = tmp;
  1206. }
  1207. rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
  1208. }
  1209. /**
  1210. * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
  1211. * and initialize an array of glock holders for them
  1212. * @rlist: the list of resource groups
  1213. * @state: the lock state to acquire the RG lock in
  1214. * @flags: the modifier flags for the holder structures
  1215. *
  1216. * FIXME: Don't use NOFAIL
  1217. *
  1218. */
  1219. void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
  1220. int flags)
  1221. {
  1222. unsigned int x;
  1223. rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
  1224. GFP_KERNEL | __GFP_NOFAIL);
  1225. for (x = 0; x < rlist->rl_rgrps; x++)
  1226. gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
  1227. state, flags,
  1228. &rlist->rl_ghs[x]);
  1229. }
  1230. /**
  1231. * gfs2_rlist_free - free a resource group list
  1232. * @list: the list of resource groups
  1233. *
  1234. */
  1235. void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
  1236. {
  1237. unsigned int x;
  1238. kfree(rlist->rl_rgd);
  1239. if (rlist->rl_ghs) {
  1240. for (x = 0; x < rlist->rl_rgrps; x++)
  1241. gfs2_holder_uninit(&rlist->rl_ghs[x]);
  1242. kfree(rlist->rl_ghs);
  1243. }
  1244. }