rgrp.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/fs.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/rbtree.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "glock.h"
  21. #include "glops.h"
  22. #include "lops.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "rgrp.h"
  26. #include "super.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. #include "log.h"
  30. #include "inode.h"
  31. #include "trace_gfs2.h"
  32. #define BFITNOENT ((u32)~0)
  33. #define NO_BLOCK ((u64)~0)
  34. #define RSRV_CONTENTION_FACTOR 4
  35. #define RGRP_RSRV_MAX_CONTENDERS 2
  36. #if BITS_PER_LONG == 32
  37. #define LBITMASK (0x55555555UL)
  38. #define LBITSKIP55 (0x55555555UL)
  39. #define LBITSKIP00 (0x00000000UL)
  40. #else
  41. #define LBITMASK (0x5555555555555555UL)
  42. #define LBITSKIP55 (0x5555555555555555UL)
  43. #define LBITSKIP00 (0x0000000000000000UL)
  44. #endif
  45. /*
  46. * These routines are used by the resource group routines (rgrp.c)
  47. * to keep track of block allocation. Each block is represented by two
  48. * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
  49. *
  50. * 0 = Free
  51. * 1 = Used (not metadata)
  52. * 2 = Unlinked (still in use) inode
  53. * 3 = Used (metadata)
  54. */
  55. static const char valid_change[16] = {
  56. /* current */
  57. /* n */ 0, 1, 1, 1,
  58. /* e */ 1, 0, 0, 0,
  59. /* w */ 0, 0, 0, 1,
  60. 1, 0, 0, 0
  61. };
  62. /**
  63. * gfs2_setbit - Set a bit in the bitmaps
  64. * @rgd: the resource group descriptor
  65. * @buf2: the clone buffer that holds the bitmaps
  66. * @bi: the bitmap structure
  67. * @block: the block to set
  68. * @new_state: the new state of the block
  69. *
  70. */
  71. static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
  72. struct gfs2_bitmap *bi, u32 block,
  73. unsigned char new_state)
  74. {
  75. unsigned char *byte1, *byte2, *end, cur_state;
  76. unsigned int buflen = bi->bi_len;
  77. const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  78. byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
  79. end = bi->bi_bh->b_data + bi->bi_offset + buflen;
  80. BUG_ON(byte1 >= end);
  81. cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
  82. if (unlikely(!valid_change[new_state * 4 + cur_state])) {
  83. printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
  84. "new_state=%d\n",
  85. (unsigned long long)block, cur_state, new_state);
  86. printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
  87. (unsigned long long)rgd->rd_addr,
  88. (unsigned long)bi->bi_start);
  89. printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
  90. (unsigned long)bi->bi_offset,
  91. (unsigned long)bi->bi_len);
  92. dump_stack();
  93. gfs2_consist_rgrpd(rgd);
  94. return;
  95. }
  96. *byte1 ^= (cur_state ^ new_state) << bit;
  97. if (buf2) {
  98. byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
  99. cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
  100. *byte2 ^= (cur_state ^ new_state) << bit;
  101. }
  102. }
  103. /**
  104. * gfs2_testbit - test a bit in the bitmaps
  105. * @rgd: the resource group descriptor
  106. * @buffer: the buffer that holds the bitmaps
  107. * @buflen: the length (in bytes) of the buffer
  108. * @block: the block to read
  109. *
  110. */
  111. static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
  112. const unsigned char *buffer,
  113. unsigned int buflen, u32 block)
  114. {
  115. const unsigned char *byte, *end;
  116. unsigned char cur_state;
  117. unsigned int bit;
  118. byte = buffer + (block / GFS2_NBBY);
  119. bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  120. end = buffer + buflen;
  121. gfs2_assert(rgd->rd_sbd, byte < end);
  122. cur_state = (*byte >> bit) & GFS2_BIT_MASK;
  123. return cur_state;
  124. }
  125. /**
  126. * gfs2_bit_search
  127. * @ptr: Pointer to bitmap data
  128. * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
  129. * @state: The state we are searching for
  130. *
  131. * We xor the bitmap data with a patter which is the bitwise opposite
  132. * of what we are looking for, this gives rise to a pattern of ones
  133. * wherever there is a match. Since we have two bits per entry, we
  134. * take this pattern, shift it down by one place and then and it with
  135. * the original. All the even bit positions (0,2,4, etc) then represent
  136. * successful matches, so we mask with 0x55555..... to remove the unwanted
  137. * odd bit positions.
  138. *
  139. * This allows searching of a whole u64 at once (32 blocks) with a
  140. * single test (on 64 bit arches).
  141. */
  142. static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
  143. {
  144. u64 tmp;
  145. static const u64 search[] = {
  146. [0] = 0xffffffffffffffffULL,
  147. [1] = 0xaaaaaaaaaaaaaaaaULL,
  148. [2] = 0x5555555555555555ULL,
  149. [3] = 0x0000000000000000ULL,
  150. };
  151. tmp = le64_to_cpu(*ptr) ^ search[state];
  152. tmp &= (tmp >> 1);
  153. tmp &= mask;
  154. return tmp;
  155. }
  156. /**
  157. * rs_cmp - multi-block reservation range compare
  158. * @blk: absolute file system block number of the new reservation
  159. * @len: number of blocks in the new reservation
  160. * @rs: existing reservation to compare against
  161. *
  162. * returns: 1 if the block range is beyond the reach of the reservation
  163. * -1 if the block range is before the start of the reservation
  164. * 0 if the block range overlaps with the reservation
  165. */
  166. static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
  167. {
  168. u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
  169. if (blk >= startblk + rs->rs_free)
  170. return 1;
  171. if (blk + len - 1 < startblk)
  172. return -1;
  173. return 0;
  174. }
  175. /**
  176. * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
  177. * a block in a given allocation state.
  178. * @buf: the buffer that holds the bitmaps
  179. * @len: the length (in bytes) of the buffer
  180. * @goal: start search at this block's bit-pair (within @buffer)
  181. * @state: GFS2_BLKST_XXX the state of the block we're looking for.
  182. *
  183. * Scope of @goal and returned block number is only within this bitmap buffer,
  184. * not entire rgrp or filesystem. @buffer will be offset from the actual
  185. * beginning of a bitmap block buffer, skipping any header structures, but
  186. * headers are always a multiple of 64 bits long so that the buffer is
  187. * always aligned to a 64 bit boundary.
  188. *
  189. * The size of the buffer is in bytes, but is it assumed that it is
  190. * always ok to read a complete multiple of 64 bits at the end
  191. * of the block in case the end is no aligned to a natural boundary.
  192. *
  193. * Return: the block number (bitmap buffer scope) that was found
  194. */
  195. static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
  196. u32 goal, u8 state)
  197. {
  198. u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
  199. const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
  200. const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
  201. u64 tmp;
  202. u64 mask = 0x5555555555555555ULL;
  203. u32 bit;
  204. BUG_ON(state > 3);
  205. /* Mask off bits we don't care about at the start of the search */
  206. mask <<= spoint;
  207. tmp = gfs2_bit_search(ptr, mask, state);
  208. ptr++;
  209. while(tmp == 0 && ptr < end) {
  210. tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
  211. ptr++;
  212. }
  213. /* Mask off any bits which are more than len bytes from the start */
  214. if (ptr == end && (len & (sizeof(u64) - 1)))
  215. tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
  216. /* Didn't find anything, so return */
  217. if (tmp == 0)
  218. return BFITNOENT;
  219. ptr--;
  220. bit = __ffs64(tmp);
  221. bit /= 2; /* two bits per entry in the bitmap */
  222. return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
  223. }
  224. /**
  225. * gfs2_bitcount - count the number of bits in a certain state
  226. * @rgd: the resource group descriptor
  227. * @buffer: the buffer that holds the bitmaps
  228. * @buflen: the length (in bytes) of the buffer
  229. * @state: the state of the block we're looking for
  230. *
  231. * Returns: The number of bits
  232. */
  233. static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
  234. unsigned int buflen, u8 state)
  235. {
  236. const u8 *byte = buffer;
  237. const u8 *end = buffer + buflen;
  238. const u8 state1 = state << 2;
  239. const u8 state2 = state << 4;
  240. const u8 state3 = state << 6;
  241. u32 count = 0;
  242. for (; byte < end; byte++) {
  243. if (((*byte) & 0x03) == state)
  244. count++;
  245. if (((*byte) & 0x0C) == state1)
  246. count++;
  247. if (((*byte) & 0x30) == state2)
  248. count++;
  249. if (((*byte) & 0xC0) == state3)
  250. count++;
  251. }
  252. return count;
  253. }
  254. /**
  255. * gfs2_rgrp_verify - Verify that a resource group is consistent
  256. * @rgd: the rgrp
  257. *
  258. */
  259. void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
  260. {
  261. struct gfs2_sbd *sdp = rgd->rd_sbd;
  262. struct gfs2_bitmap *bi = NULL;
  263. u32 length = rgd->rd_length;
  264. u32 count[4], tmp;
  265. int buf, x;
  266. memset(count, 0, 4 * sizeof(u32));
  267. /* Count # blocks in each of 4 possible allocation states */
  268. for (buf = 0; buf < length; buf++) {
  269. bi = rgd->rd_bits + buf;
  270. for (x = 0; x < 4; x++)
  271. count[x] += gfs2_bitcount(rgd,
  272. bi->bi_bh->b_data +
  273. bi->bi_offset,
  274. bi->bi_len, x);
  275. }
  276. if (count[0] != rgd->rd_free) {
  277. if (gfs2_consist_rgrpd(rgd))
  278. fs_err(sdp, "free data mismatch: %u != %u\n",
  279. count[0], rgd->rd_free);
  280. return;
  281. }
  282. tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
  283. if (count[1] != tmp) {
  284. if (gfs2_consist_rgrpd(rgd))
  285. fs_err(sdp, "used data mismatch: %u != %u\n",
  286. count[1], tmp);
  287. return;
  288. }
  289. if (count[2] + count[3] != rgd->rd_dinodes) {
  290. if (gfs2_consist_rgrpd(rgd))
  291. fs_err(sdp, "used metadata mismatch: %u != %u\n",
  292. count[2] + count[3], rgd->rd_dinodes);
  293. return;
  294. }
  295. }
  296. static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
  297. {
  298. u64 first = rgd->rd_data0;
  299. u64 last = first + rgd->rd_data;
  300. return first <= block && block < last;
  301. }
  302. /**
  303. * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  304. * @sdp: The GFS2 superblock
  305. * @blk: The data block number
  306. * @exact: True if this needs to be an exact match
  307. *
  308. * Returns: The resource group, or NULL if not found
  309. */
  310. struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
  311. {
  312. struct rb_node *n, *next;
  313. struct gfs2_rgrpd *cur;
  314. spin_lock(&sdp->sd_rindex_spin);
  315. n = sdp->sd_rindex_tree.rb_node;
  316. while (n) {
  317. cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
  318. next = NULL;
  319. if (blk < cur->rd_addr)
  320. next = n->rb_left;
  321. else if (blk >= cur->rd_data0 + cur->rd_data)
  322. next = n->rb_right;
  323. if (next == NULL) {
  324. spin_unlock(&sdp->sd_rindex_spin);
  325. if (exact) {
  326. if (blk < cur->rd_addr)
  327. return NULL;
  328. if (blk >= cur->rd_data0 + cur->rd_data)
  329. return NULL;
  330. }
  331. return cur;
  332. }
  333. n = next;
  334. }
  335. spin_unlock(&sdp->sd_rindex_spin);
  336. return NULL;
  337. }
  338. /**
  339. * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
  340. * @sdp: The GFS2 superblock
  341. *
  342. * Returns: The first rgrp in the filesystem
  343. */
  344. struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
  345. {
  346. const struct rb_node *n;
  347. struct gfs2_rgrpd *rgd;
  348. spin_lock(&sdp->sd_rindex_spin);
  349. n = rb_first(&sdp->sd_rindex_tree);
  350. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  351. spin_unlock(&sdp->sd_rindex_spin);
  352. return rgd;
  353. }
  354. /**
  355. * gfs2_rgrpd_get_next - get the next RG
  356. * @rgd: the resource group descriptor
  357. *
  358. * Returns: The next rgrp
  359. */
  360. struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
  361. {
  362. struct gfs2_sbd *sdp = rgd->rd_sbd;
  363. const struct rb_node *n;
  364. spin_lock(&sdp->sd_rindex_spin);
  365. n = rb_next(&rgd->rd_node);
  366. if (n == NULL)
  367. n = rb_first(&sdp->sd_rindex_tree);
  368. if (unlikely(&rgd->rd_node == n)) {
  369. spin_unlock(&sdp->sd_rindex_spin);
  370. return NULL;
  371. }
  372. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  373. spin_unlock(&sdp->sd_rindex_spin);
  374. return rgd;
  375. }
  376. void gfs2_free_clones(struct gfs2_rgrpd *rgd)
  377. {
  378. int x;
  379. for (x = 0; x < rgd->rd_length; x++) {
  380. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  381. kfree(bi->bi_clone);
  382. bi->bi_clone = NULL;
  383. }
  384. }
  385. /**
  386. * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
  387. * @ip: the inode for this reservation
  388. */
  389. int gfs2_rs_alloc(struct gfs2_inode *ip)
  390. {
  391. int error = 0;
  392. struct gfs2_blkreserv *res;
  393. if (ip->i_res)
  394. return 0;
  395. res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
  396. if (!res)
  397. error = -ENOMEM;
  398. RB_CLEAR_NODE(&res->rs_node);
  399. down_write(&ip->i_rw_mutex);
  400. if (ip->i_res)
  401. kmem_cache_free(gfs2_rsrv_cachep, res);
  402. else
  403. ip->i_res = res;
  404. up_write(&ip->i_rw_mutex);
  405. return error;
  406. }
  407. static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
  408. {
  409. gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
  410. rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
  411. rs->rs_rbm.offset, rs->rs_free);
  412. }
  413. /**
  414. * __rs_deltree - remove a multi-block reservation from the rgd tree
  415. * @rs: The reservation to remove
  416. *
  417. */
  418. static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  419. {
  420. struct gfs2_rgrpd *rgd;
  421. if (!gfs2_rs_active(rs))
  422. return;
  423. rgd = rs->rs_rbm.rgd;
  424. trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
  425. rb_erase(&rs->rs_node, &rgd->rd_rstree);
  426. RB_CLEAR_NODE(&rs->rs_node);
  427. BUG_ON(!rgd->rd_rs_cnt);
  428. rgd->rd_rs_cnt--;
  429. if (rs->rs_free) {
  430. /* return reserved blocks to the rgrp and the ip */
  431. BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
  432. rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
  433. rs->rs_free = 0;
  434. clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
  435. smp_mb__after_clear_bit();
  436. }
  437. }
  438. /**
  439. * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
  440. * @rs: The reservation to remove
  441. *
  442. */
  443. void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  444. {
  445. struct gfs2_rgrpd *rgd;
  446. rgd = rs->rs_rbm.rgd;
  447. if (rgd) {
  448. spin_lock(&rgd->rd_rsspin);
  449. __rs_deltree(ip, rs);
  450. spin_unlock(&rgd->rd_rsspin);
  451. }
  452. }
  453. /**
  454. * gfs2_rs_delete - delete a multi-block reservation
  455. * @ip: The inode for this reservation
  456. *
  457. */
  458. void gfs2_rs_delete(struct gfs2_inode *ip)
  459. {
  460. down_write(&ip->i_rw_mutex);
  461. if (ip->i_res) {
  462. gfs2_rs_deltree(ip, ip->i_res);
  463. trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
  464. BUG_ON(ip->i_res->rs_free);
  465. kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
  466. ip->i_res = NULL;
  467. }
  468. up_write(&ip->i_rw_mutex);
  469. }
  470. /**
  471. * return_all_reservations - return all reserved blocks back to the rgrp.
  472. * @rgd: the rgrp that needs its space back
  473. *
  474. * We previously reserved a bunch of blocks for allocation. Now we need to
  475. * give them back. This leave the reservation structures in tact, but removes
  476. * all of their corresponding "no-fly zones".
  477. */
  478. static void return_all_reservations(struct gfs2_rgrpd *rgd)
  479. {
  480. struct rb_node *n;
  481. struct gfs2_blkreserv *rs;
  482. spin_lock(&rgd->rd_rsspin);
  483. while ((n = rb_first(&rgd->rd_rstree))) {
  484. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  485. __rs_deltree(NULL, rs);
  486. }
  487. spin_unlock(&rgd->rd_rsspin);
  488. }
  489. void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
  490. {
  491. struct rb_node *n;
  492. struct gfs2_rgrpd *rgd;
  493. struct gfs2_glock *gl;
  494. while ((n = rb_first(&sdp->sd_rindex_tree))) {
  495. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  496. gl = rgd->rd_gl;
  497. rb_erase(n, &sdp->sd_rindex_tree);
  498. if (gl) {
  499. spin_lock(&gl->gl_spin);
  500. gl->gl_object = NULL;
  501. spin_unlock(&gl->gl_spin);
  502. gfs2_glock_add_to_lru(gl);
  503. gfs2_glock_put(gl);
  504. }
  505. gfs2_free_clones(rgd);
  506. kfree(rgd->rd_bits);
  507. return_all_reservations(rgd);
  508. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  509. }
  510. }
  511. static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
  512. {
  513. printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
  514. printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
  515. printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
  516. printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
  517. printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
  518. }
  519. /**
  520. * gfs2_compute_bitstructs - Compute the bitmap sizes
  521. * @rgd: The resource group descriptor
  522. *
  523. * Calculates bitmap descriptors, one for each block that contains bitmap data
  524. *
  525. * Returns: errno
  526. */
  527. static int compute_bitstructs(struct gfs2_rgrpd *rgd)
  528. {
  529. struct gfs2_sbd *sdp = rgd->rd_sbd;
  530. struct gfs2_bitmap *bi;
  531. u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
  532. u32 bytes_left, bytes;
  533. int x;
  534. if (!length)
  535. return -EINVAL;
  536. rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
  537. if (!rgd->rd_bits)
  538. return -ENOMEM;
  539. bytes_left = rgd->rd_bitbytes;
  540. for (x = 0; x < length; x++) {
  541. bi = rgd->rd_bits + x;
  542. bi->bi_flags = 0;
  543. /* small rgrp; bitmap stored completely in header block */
  544. if (length == 1) {
  545. bytes = bytes_left;
  546. bi->bi_offset = sizeof(struct gfs2_rgrp);
  547. bi->bi_start = 0;
  548. bi->bi_len = bytes;
  549. /* header block */
  550. } else if (x == 0) {
  551. bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
  552. bi->bi_offset = sizeof(struct gfs2_rgrp);
  553. bi->bi_start = 0;
  554. bi->bi_len = bytes;
  555. /* last block */
  556. } else if (x + 1 == length) {
  557. bytes = bytes_left;
  558. bi->bi_offset = sizeof(struct gfs2_meta_header);
  559. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  560. bi->bi_len = bytes;
  561. /* other blocks */
  562. } else {
  563. bytes = sdp->sd_sb.sb_bsize -
  564. sizeof(struct gfs2_meta_header);
  565. bi->bi_offset = sizeof(struct gfs2_meta_header);
  566. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  567. bi->bi_len = bytes;
  568. }
  569. bytes_left -= bytes;
  570. }
  571. if (bytes_left) {
  572. gfs2_consist_rgrpd(rgd);
  573. return -EIO;
  574. }
  575. bi = rgd->rd_bits + (length - 1);
  576. if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
  577. if (gfs2_consist_rgrpd(rgd)) {
  578. gfs2_rindex_print(rgd);
  579. fs_err(sdp, "start=%u len=%u offset=%u\n",
  580. bi->bi_start, bi->bi_len, bi->bi_offset);
  581. }
  582. return -EIO;
  583. }
  584. return 0;
  585. }
  586. /**
  587. * gfs2_ri_total - Total up the file system space, according to the rindex.
  588. * @sdp: the filesystem
  589. *
  590. */
  591. u64 gfs2_ri_total(struct gfs2_sbd *sdp)
  592. {
  593. u64 total_data = 0;
  594. struct inode *inode = sdp->sd_rindex;
  595. struct gfs2_inode *ip = GFS2_I(inode);
  596. char buf[sizeof(struct gfs2_rindex)];
  597. int error, rgrps;
  598. for (rgrps = 0;; rgrps++) {
  599. loff_t pos = rgrps * sizeof(struct gfs2_rindex);
  600. if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
  601. break;
  602. error = gfs2_internal_read(ip, buf, &pos,
  603. sizeof(struct gfs2_rindex));
  604. if (error != sizeof(struct gfs2_rindex))
  605. break;
  606. total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
  607. }
  608. return total_data;
  609. }
  610. static int rgd_insert(struct gfs2_rgrpd *rgd)
  611. {
  612. struct gfs2_sbd *sdp = rgd->rd_sbd;
  613. struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
  614. /* Figure out where to put new node */
  615. while (*newn) {
  616. struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
  617. rd_node);
  618. parent = *newn;
  619. if (rgd->rd_addr < cur->rd_addr)
  620. newn = &((*newn)->rb_left);
  621. else if (rgd->rd_addr > cur->rd_addr)
  622. newn = &((*newn)->rb_right);
  623. else
  624. return -EEXIST;
  625. }
  626. rb_link_node(&rgd->rd_node, parent, newn);
  627. rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
  628. sdp->sd_rgrps++;
  629. return 0;
  630. }
  631. /**
  632. * read_rindex_entry - Pull in a new resource index entry from the disk
  633. * @ip: Pointer to the rindex inode
  634. *
  635. * Returns: 0 on success, > 0 on EOF, error code otherwise
  636. */
  637. static int read_rindex_entry(struct gfs2_inode *ip)
  638. {
  639. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  640. loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
  641. struct gfs2_rindex buf;
  642. int error;
  643. struct gfs2_rgrpd *rgd;
  644. if (pos >= i_size_read(&ip->i_inode))
  645. return 1;
  646. error = gfs2_internal_read(ip, (char *)&buf, &pos,
  647. sizeof(struct gfs2_rindex));
  648. if (error != sizeof(struct gfs2_rindex))
  649. return (error == 0) ? 1 : error;
  650. rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
  651. error = -ENOMEM;
  652. if (!rgd)
  653. return error;
  654. rgd->rd_sbd = sdp;
  655. rgd->rd_addr = be64_to_cpu(buf.ri_addr);
  656. rgd->rd_length = be32_to_cpu(buf.ri_length);
  657. rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
  658. rgd->rd_data = be32_to_cpu(buf.ri_data);
  659. rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
  660. spin_lock_init(&rgd->rd_rsspin);
  661. error = compute_bitstructs(rgd);
  662. if (error)
  663. goto fail;
  664. error = gfs2_glock_get(sdp, rgd->rd_addr,
  665. &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
  666. if (error)
  667. goto fail;
  668. rgd->rd_gl->gl_object = rgd;
  669. rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
  670. rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
  671. if (rgd->rd_data > sdp->sd_max_rg_data)
  672. sdp->sd_max_rg_data = rgd->rd_data;
  673. spin_lock(&sdp->sd_rindex_spin);
  674. error = rgd_insert(rgd);
  675. spin_unlock(&sdp->sd_rindex_spin);
  676. if (!error)
  677. return 0;
  678. error = 0; /* someone else read in the rgrp; free it and ignore it */
  679. gfs2_glock_put(rgd->rd_gl);
  680. fail:
  681. kfree(rgd->rd_bits);
  682. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  683. return error;
  684. }
  685. /**
  686. * gfs2_ri_update - Pull in a new resource index from the disk
  687. * @ip: pointer to the rindex inode
  688. *
  689. * Returns: 0 on successful update, error code otherwise
  690. */
  691. static int gfs2_ri_update(struct gfs2_inode *ip)
  692. {
  693. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  694. int error;
  695. do {
  696. error = read_rindex_entry(ip);
  697. } while (error == 0);
  698. if (error < 0)
  699. return error;
  700. sdp->sd_rindex_uptodate = 1;
  701. return 0;
  702. }
  703. /**
  704. * gfs2_rindex_update - Update the rindex if required
  705. * @sdp: The GFS2 superblock
  706. *
  707. * We grab a lock on the rindex inode to make sure that it doesn't
  708. * change whilst we are performing an operation. We keep this lock
  709. * for quite long periods of time compared to other locks. This
  710. * doesn't matter, since it is shared and it is very, very rarely
  711. * accessed in the exclusive mode (i.e. only when expanding the filesystem).
  712. *
  713. * This makes sure that we're using the latest copy of the resource index
  714. * special file, which might have been updated if someone expanded the
  715. * filesystem (via gfs2_grow utility), which adds new resource groups.
  716. *
  717. * Returns: 0 on succeess, error code otherwise
  718. */
  719. int gfs2_rindex_update(struct gfs2_sbd *sdp)
  720. {
  721. struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
  722. struct gfs2_glock *gl = ip->i_gl;
  723. struct gfs2_holder ri_gh;
  724. int error = 0;
  725. int unlock_required = 0;
  726. /* Read new copy from disk if we don't have the latest */
  727. if (!sdp->sd_rindex_uptodate) {
  728. if (!gfs2_glock_is_locked_by_me(gl)) {
  729. error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
  730. if (error)
  731. return error;
  732. unlock_required = 1;
  733. }
  734. if (!sdp->sd_rindex_uptodate)
  735. error = gfs2_ri_update(ip);
  736. if (unlock_required)
  737. gfs2_glock_dq_uninit(&ri_gh);
  738. }
  739. return error;
  740. }
  741. static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
  742. {
  743. const struct gfs2_rgrp *str = buf;
  744. u32 rg_flags;
  745. rg_flags = be32_to_cpu(str->rg_flags);
  746. rg_flags &= ~GFS2_RDF_MASK;
  747. rgd->rd_flags &= GFS2_RDF_MASK;
  748. rgd->rd_flags |= rg_flags;
  749. rgd->rd_free = be32_to_cpu(str->rg_free);
  750. rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
  751. rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
  752. }
  753. static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
  754. {
  755. struct gfs2_rgrp *str = buf;
  756. str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
  757. str->rg_free = cpu_to_be32(rgd->rd_free);
  758. str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
  759. str->__pad = cpu_to_be32(0);
  760. str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
  761. memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
  762. }
  763. static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
  764. {
  765. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  766. struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
  767. if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
  768. rgl->rl_dinodes != str->rg_dinodes ||
  769. rgl->rl_igeneration != str->rg_igeneration)
  770. return 0;
  771. return 1;
  772. }
  773. static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
  774. {
  775. const struct gfs2_rgrp *str = buf;
  776. rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
  777. rgl->rl_flags = str->rg_flags;
  778. rgl->rl_free = str->rg_free;
  779. rgl->rl_dinodes = str->rg_dinodes;
  780. rgl->rl_igeneration = str->rg_igeneration;
  781. rgl->__pad = 0UL;
  782. }
  783. static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
  784. {
  785. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  786. u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
  787. rgl->rl_unlinked = cpu_to_be32(unlinked);
  788. }
  789. static u32 count_unlinked(struct gfs2_rgrpd *rgd)
  790. {
  791. struct gfs2_bitmap *bi;
  792. const u32 length = rgd->rd_length;
  793. const u8 *buffer = NULL;
  794. u32 i, goal, count = 0;
  795. for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
  796. goal = 0;
  797. buffer = bi->bi_bh->b_data + bi->bi_offset;
  798. WARN_ON(!buffer_uptodate(bi->bi_bh));
  799. while (goal < bi->bi_len * GFS2_NBBY) {
  800. goal = gfs2_bitfit(buffer, bi->bi_len, goal,
  801. GFS2_BLKST_UNLINKED);
  802. if (goal == BFITNOENT)
  803. break;
  804. count++;
  805. goal++;
  806. }
  807. }
  808. return count;
  809. }
  810. /**
  811. * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
  812. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  813. *
  814. * Read in all of a Resource Group's header and bitmap blocks.
  815. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
  816. *
  817. * Returns: errno
  818. */
  819. int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
  820. {
  821. struct gfs2_sbd *sdp = rgd->rd_sbd;
  822. struct gfs2_glock *gl = rgd->rd_gl;
  823. unsigned int length = rgd->rd_length;
  824. struct gfs2_bitmap *bi;
  825. unsigned int x, y;
  826. int error;
  827. if (rgd->rd_bits[0].bi_bh != NULL)
  828. return 0;
  829. for (x = 0; x < length; x++) {
  830. bi = rgd->rd_bits + x;
  831. error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
  832. if (error)
  833. goto fail;
  834. }
  835. for (y = length; y--;) {
  836. bi = rgd->rd_bits + y;
  837. error = gfs2_meta_wait(sdp, bi->bi_bh);
  838. if (error)
  839. goto fail;
  840. if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
  841. GFS2_METATYPE_RG)) {
  842. error = -EIO;
  843. goto fail;
  844. }
  845. }
  846. if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
  847. for (x = 0; x < length; x++)
  848. clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
  849. gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
  850. rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  851. rgd->rd_free_clone = rgd->rd_free;
  852. }
  853. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
  854. rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
  855. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
  856. rgd->rd_bits[0].bi_bh->b_data);
  857. }
  858. else if (sdp->sd_args.ar_rgrplvb) {
  859. if (!gfs2_rgrp_lvb_valid(rgd)){
  860. gfs2_consist_rgrpd(rgd);
  861. error = -EIO;
  862. goto fail;
  863. }
  864. if (rgd->rd_rgl->rl_unlinked == 0)
  865. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  866. }
  867. return 0;
  868. fail:
  869. while (x--) {
  870. bi = rgd->rd_bits + x;
  871. brelse(bi->bi_bh);
  872. bi->bi_bh = NULL;
  873. gfs2_assert_warn(sdp, !bi->bi_clone);
  874. }
  875. return error;
  876. }
  877. int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
  878. {
  879. u32 rl_flags;
  880. if (rgd->rd_flags & GFS2_RDF_UPTODATE)
  881. return 0;
  882. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
  883. return gfs2_rgrp_bh_get(rgd);
  884. rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
  885. rl_flags &= ~GFS2_RDF_MASK;
  886. rgd->rd_flags &= GFS2_RDF_MASK;
  887. rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  888. if (rgd->rd_rgl->rl_unlinked == 0)
  889. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  890. rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
  891. rgd->rd_free_clone = rgd->rd_free;
  892. rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
  893. rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
  894. return 0;
  895. }
  896. int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
  897. {
  898. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  899. struct gfs2_sbd *sdp = rgd->rd_sbd;
  900. if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
  901. return 0;
  902. return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
  903. }
  904. /**
  905. * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
  906. * @gh: The glock holder for the resource group
  907. *
  908. */
  909. void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
  910. {
  911. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  912. int x, length = rgd->rd_length;
  913. for (x = 0; x < length; x++) {
  914. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  915. if (bi->bi_bh) {
  916. brelse(bi->bi_bh);
  917. bi->bi_bh = NULL;
  918. }
  919. }
  920. }
  921. int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
  922. struct buffer_head *bh,
  923. const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
  924. {
  925. struct super_block *sb = sdp->sd_vfs;
  926. struct block_device *bdev = sb->s_bdev;
  927. const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
  928. bdev_logical_block_size(sb->s_bdev);
  929. u64 blk;
  930. sector_t start = 0;
  931. sector_t nr_sects = 0;
  932. int rv;
  933. unsigned int x;
  934. u32 trimmed = 0;
  935. u8 diff;
  936. for (x = 0; x < bi->bi_len; x++) {
  937. const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
  938. clone += bi->bi_offset;
  939. clone += x;
  940. if (bh) {
  941. const u8 *orig = bh->b_data + bi->bi_offset + x;
  942. diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
  943. } else {
  944. diff = ~(*clone | (*clone >> 1));
  945. }
  946. diff &= 0x55;
  947. if (diff == 0)
  948. continue;
  949. blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
  950. blk *= sects_per_blk; /* convert to sectors */
  951. while(diff) {
  952. if (diff & 1) {
  953. if (nr_sects == 0)
  954. goto start_new_extent;
  955. if ((start + nr_sects) != blk) {
  956. if (nr_sects >= minlen) {
  957. rv = blkdev_issue_discard(bdev,
  958. start, nr_sects,
  959. GFP_NOFS, 0);
  960. if (rv)
  961. goto fail;
  962. trimmed += nr_sects;
  963. }
  964. nr_sects = 0;
  965. start_new_extent:
  966. start = blk;
  967. }
  968. nr_sects += sects_per_blk;
  969. }
  970. diff >>= 2;
  971. blk += sects_per_blk;
  972. }
  973. }
  974. if (nr_sects >= minlen) {
  975. rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
  976. if (rv)
  977. goto fail;
  978. trimmed += nr_sects;
  979. }
  980. if (ptrimmed)
  981. *ptrimmed = trimmed;
  982. return 0;
  983. fail:
  984. if (sdp->sd_args.ar_discard)
  985. fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
  986. sdp->sd_args.ar_discard = 0;
  987. return -EIO;
  988. }
  989. /**
  990. * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
  991. * @filp: Any file on the filesystem
  992. * @argp: Pointer to the arguments (also used to pass result)
  993. *
  994. * Returns: 0 on success, otherwise error code
  995. */
  996. int gfs2_fitrim(struct file *filp, void __user *argp)
  997. {
  998. struct inode *inode = filp->f_dentry->d_inode;
  999. struct gfs2_sbd *sdp = GFS2_SB(inode);
  1000. struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
  1001. struct buffer_head *bh;
  1002. struct gfs2_rgrpd *rgd;
  1003. struct gfs2_rgrpd *rgd_end;
  1004. struct gfs2_holder gh;
  1005. struct fstrim_range r;
  1006. int ret = 0;
  1007. u64 amt;
  1008. u64 trimmed = 0;
  1009. unsigned int x;
  1010. if (!capable(CAP_SYS_ADMIN))
  1011. return -EPERM;
  1012. if (!blk_queue_discard(q))
  1013. return -EOPNOTSUPP;
  1014. if (argp == NULL) {
  1015. r.start = 0;
  1016. r.len = ULLONG_MAX;
  1017. r.minlen = 0;
  1018. } else if (copy_from_user(&r, argp, sizeof(r)))
  1019. return -EFAULT;
  1020. ret = gfs2_rindex_update(sdp);
  1021. if (ret)
  1022. return ret;
  1023. rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
  1024. rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
  1025. while (1) {
  1026. ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1027. if (ret)
  1028. goto out;
  1029. if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
  1030. /* Trim each bitmap in the rgrp */
  1031. for (x = 0; x < rgd->rd_length; x++) {
  1032. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  1033. ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
  1034. if (ret) {
  1035. gfs2_glock_dq_uninit(&gh);
  1036. goto out;
  1037. }
  1038. trimmed += amt;
  1039. }
  1040. /* Mark rgrp as having been trimmed */
  1041. ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
  1042. if (ret == 0) {
  1043. bh = rgd->rd_bits[0].bi_bh;
  1044. rgd->rd_flags |= GFS2_RGF_TRIMMED;
  1045. gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
  1046. gfs2_rgrp_out(rgd, bh->b_data);
  1047. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
  1048. gfs2_trans_end(sdp);
  1049. }
  1050. }
  1051. gfs2_glock_dq_uninit(&gh);
  1052. if (rgd == rgd_end)
  1053. break;
  1054. rgd = gfs2_rgrpd_get_next(rgd);
  1055. }
  1056. out:
  1057. r.len = trimmed << 9;
  1058. if (argp && copy_to_user(argp, &r, sizeof(r)))
  1059. return -EFAULT;
  1060. return ret;
  1061. }
  1062. /**
  1063. * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
  1064. * @bi: the bitmap with the blocks
  1065. * @ip: the inode structure
  1066. * @biblk: the 32-bit block number relative to the start of the bitmap
  1067. * @amount: the number of blocks to reserve
  1068. *
  1069. * Returns: NULL - reservation was already taken, so not inserted
  1070. * pointer to the inserted reservation
  1071. */
  1072. static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
  1073. struct gfs2_inode *ip, u32 biblk,
  1074. int amount)
  1075. {
  1076. struct rb_node **newn, *parent = NULL;
  1077. int rc;
  1078. struct gfs2_blkreserv *rs = ip->i_res;
  1079. struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
  1080. u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
  1081. spin_lock(&rgd->rd_rsspin);
  1082. newn = &rgd->rd_rstree.rb_node;
  1083. BUG_ON(!ip->i_res);
  1084. BUG_ON(gfs2_rs_active(rs));
  1085. /* Figure out where to put new node */
  1086. /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
  1087. while (*newn) {
  1088. struct gfs2_blkreserv *cur =
  1089. rb_entry(*newn, struct gfs2_blkreserv, rs_node);
  1090. parent = *newn;
  1091. rc = rs_cmp(fsblock, amount, cur);
  1092. if (rc > 0)
  1093. newn = &((*newn)->rb_right);
  1094. else if (rc < 0)
  1095. newn = &((*newn)->rb_left);
  1096. else {
  1097. spin_unlock(&rgd->rd_rsspin);
  1098. return NULL; /* reservation already in use */
  1099. }
  1100. }
  1101. /* Do our reservation work */
  1102. rs = ip->i_res;
  1103. rs->rs_free = amount;
  1104. rs->rs_rbm.offset = biblk;
  1105. rs->rs_rbm.bi = bi;
  1106. rb_link_node(&rs->rs_node, parent, newn);
  1107. rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
  1108. /* Do our rgrp accounting for the reservation */
  1109. rgd->rd_reserved += amount; /* blocks reserved */
  1110. rgd->rd_rs_cnt++; /* number of in-tree reservations */
  1111. spin_unlock(&rgd->rd_rsspin);
  1112. trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
  1113. return rs;
  1114. }
  1115. /**
  1116. * unclaimed_blocks - return number of blocks that aren't spoken for
  1117. */
  1118. static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
  1119. {
  1120. return rgd->rd_free_clone - rgd->rd_reserved;
  1121. }
  1122. /**
  1123. * rg_mblk_search - find a group of multiple free blocks
  1124. * @rgd: the resource group descriptor
  1125. * @rs: the block reservation
  1126. * @ip: pointer to the inode for which we're reserving blocks
  1127. *
  1128. * This is very similar to rgblk_search, except we're looking for whole
  1129. * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
  1130. * on aligned dwords for speed's sake.
  1131. *
  1132. * Returns: 0 if successful or BFITNOENT if there isn't enough free space
  1133. */
  1134. static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested)
  1135. {
  1136. struct gfs2_bitmap *bi = rgd->rd_bits;
  1137. const u32 length = rgd->rd_length;
  1138. u32 blk;
  1139. unsigned int buf, x, search_bytes;
  1140. u8 *buffer = NULL;
  1141. u8 *ptr, *end, *nonzero;
  1142. u32 goal, rsv_bytes;
  1143. struct gfs2_blkreserv *rs;
  1144. u32 best_rs_bytes, unclaimed;
  1145. int best_rs_blocks;
  1146. /* Find bitmap block that contains bits for goal block */
  1147. if (rgrp_contains_block(rgd, ip->i_goal))
  1148. goal = ip->i_goal - rgd->rd_data0;
  1149. else
  1150. goal = rgd->rd_last_alloc;
  1151. for (buf = 0; buf < length; buf++) {
  1152. bi = rgd->rd_bits + buf;
  1153. /* Convert scope of "goal" from rgrp-wide to within
  1154. found bit block */
  1155. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
  1156. goal -= bi->bi_start * GFS2_NBBY;
  1157. goto do_search;
  1158. }
  1159. }
  1160. buf = 0;
  1161. goal = 0;
  1162. do_search:
  1163. best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
  1164. (RGRP_RSRV_MINBLKS * rgd->rd_length));
  1165. best_rs_bytes = (best_rs_blocks *
  1166. (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
  1167. GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
  1168. best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
  1169. unclaimed = unclaimed_blocks(rgd);
  1170. if (best_rs_bytes * GFS2_NBBY > unclaimed)
  1171. best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
  1172. for (x = 0; x <= length; x++) {
  1173. bi = rgd->rd_bits + buf;
  1174. if (test_bit(GBF_FULL, &bi->bi_flags))
  1175. goto skip;
  1176. WARN_ON(!buffer_uptodate(bi->bi_bh));
  1177. if (bi->bi_clone)
  1178. buffer = bi->bi_clone + bi->bi_offset;
  1179. else
  1180. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1181. /* We have to keep the reservations aligned on u64 boundaries
  1182. otherwise we could get situations where a byte can't be
  1183. used because it's after a reservation, but a free bit still
  1184. is within the reservation's area. */
  1185. ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
  1186. end = (buffer + bi->bi_len);
  1187. while (ptr < end) {
  1188. rsv_bytes = 0;
  1189. if ((ptr + best_rs_bytes) <= end)
  1190. search_bytes = best_rs_bytes;
  1191. else
  1192. search_bytes = end - ptr;
  1193. BUG_ON(!search_bytes);
  1194. nonzero = memchr_inv(ptr, 0, search_bytes);
  1195. /* If the lot is all zeroes, reserve the whole size. If
  1196. there's enough zeroes to satisfy the request, use
  1197. what we can. If there's not enough, keep looking. */
  1198. if (nonzero == NULL)
  1199. rsv_bytes = search_bytes;
  1200. else if ((nonzero - ptr) * GFS2_NBBY >= requested)
  1201. rsv_bytes = (nonzero - ptr);
  1202. if (rsv_bytes) {
  1203. blk = ((ptr - buffer) * GFS2_NBBY);
  1204. BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
  1205. rs = rs_insert(bi, ip, blk,
  1206. rsv_bytes * GFS2_NBBY);
  1207. if (IS_ERR(rs))
  1208. return PTR_ERR(rs);
  1209. if (rs)
  1210. return 0;
  1211. }
  1212. ptr += ALIGN(search_bytes, sizeof(u64));
  1213. }
  1214. skip:
  1215. /* Try next bitmap block (wrap back to rgrp header
  1216. if at end) */
  1217. buf++;
  1218. buf %= length;
  1219. goal = 0;
  1220. }
  1221. return BFITNOENT;
  1222. }
  1223. /**
  1224. * try_rgrp_fit - See if a given reservation will fit in a given RG
  1225. * @rgd: the RG data
  1226. * @ip: the inode
  1227. *
  1228. * If there's room for the requested blocks to be allocated from the RG:
  1229. * This will try to get a multi-block reservation first, and if that doesn't
  1230. * fit, it will take what it can.
  1231. *
  1232. * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
  1233. */
  1234. static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
  1235. unsigned requested)
  1236. {
  1237. if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
  1238. return 0;
  1239. /* Look for a multi-block reservation. */
  1240. if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
  1241. rg_mblk_search(rgd, ip, requested) != BFITNOENT)
  1242. return 1;
  1243. if (unclaimed_blocks(rgd) >= requested)
  1244. return 1;
  1245. return 0;
  1246. }
  1247. /**
  1248. * gfs2_next_unreserved_block - Return next block that is not reserved
  1249. * @rgd: The resource group
  1250. * @block: The starting block
  1251. * @ip: Ignore any reservations for this inode
  1252. *
  1253. * If the block does not appear in any reservation, then return the
  1254. * block number unchanged. If it does appear in the reservation, then
  1255. * keep looking through the tree of reservations in order to find the
  1256. * first block number which is not reserved.
  1257. */
  1258. static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
  1259. const struct gfs2_inode *ip)
  1260. {
  1261. struct gfs2_blkreserv *rs;
  1262. struct rb_node *n;
  1263. int rc;
  1264. spin_lock(&rgd->rd_rsspin);
  1265. n = rb_first(&rgd->rd_rstree);
  1266. while (n) {
  1267. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1268. rc = rs_cmp(block, 1, rs);
  1269. if (rc < 0)
  1270. n = n->rb_left;
  1271. else if (rc > 0)
  1272. n = n->rb_right;
  1273. else
  1274. break;
  1275. }
  1276. if (n) {
  1277. while ((rs_cmp(block, 1, rs) == 0) && (ip->i_res != rs)) {
  1278. block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
  1279. n = rb_next(&rs->rs_node);
  1280. if (n == NULL)
  1281. break;
  1282. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1283. }
  1284. }
  1285. spin_unlock(&rgd->rd_rsspin);
  1286. return block;
  1287. }
  1288. /**
  1289. * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
  1290. * @rbm: The rbm with rgd already set correctly
  1291. * @block: The block number (filesystem relative)
  1292. *
  1293. * This sets the bi and offset members of an rbm based on a
  1294. * resource group and a filesystem relative block number. The
  1295. * resource group must be set in the rbm on entry, the bi and
  1296. * offset members will be set by this function.
  1297. *
  1298. * Returns: 0 on success, or an error code
  1299. */
  1300. static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
  1301. {
  1302. u64 rblock = block - rbm->rgd->rd_data0;
  1303. u32 goal = (u32)rblock;
  1304. int x;
  1305. if (WARN_ON_ONCE(rblock > UINT_MAX))
  1306. return -EINVAL;
  1307. for (x = 0; x < rbm->rgd->rd_length; x++) {
  1308. rbm->bi = rbm->rgd->rd_bits + x;
  1309. if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
  1310. rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
  1311. return 0;
  1312. }
  1313. }
  1314. return -E2BIG;
  1315. }
  1316. /**
  1317. * gfs2_reservation_check_and_update - Check for reservations during block alloc
  1318. * @rbm: The current position in the resource group
  1319. *
  1320. * This checks the current position in the rgrp to see whether there is
  1321. * a reservation covering this block. If not then this function is a
  1322. * no-op. If there is, then the position is moved to the end of the
  1323. * contiguous reservation(s) so that we are pointing at the first
  1324. * non-reserved block.
  1325. *
  1326. * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
  1327. */
  1328. static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
  1329. const struct gfs2_inode *ip)
  1330. {
  1331. u64 block = gfs2_rbm_to_block(rbm);
  1332. u64 nblock;
  1333. int ret;
  1334. nblock = gfs2_next_unreserved_block(rbm->rgd, block, ip);
  1335. if (nblock == block)
  1336. return 0;
  1337. ret = gfs2_rbm_from_block(rbm, nblock);
  1338. if (ret < 0)
  1339. return ret;
  1340. return 1;
  1341. }
  1342. /**
  1343. * gfs2_rbm_find - Look for blocks of a particular state
  1344. * @rbm: Value/result starting position and final position
  1345. * @state: The state which we want to find
  1346. * @ip: If set, check for reservations
  1347. * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
  1348. * around until we've reached the starting point.
  1349. *
  1350. * Side effects:
  1351. * - If looking for free blocks, we set GBF_FULL on each bitmap which
  1352. * has no free blocks in it.
  1353. *
  1354. * Returns: 0 on success, -ENOSPC if there is no block of the requested state
  1355. */
  1356. static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state,
  1357. const struct gfs2_inode *ip, bool nowrap)
  1358. {
  1359. struct buffer_head *bh;
  1360. struct gfs2_bitmap *initial_bi;
  1361. u32 initial_offset;
  1362. u32 offset;
  1363. u8 *buffer;
  1364. int index;
  1365. int n = 0;
  1366. int iters = rbm->rgd->rd_length;
  1367. int ret;
  1368. /* If we are not starting at the beginning of a bitmap, then we
  1369. * need to add one to the bitmap count to ensure that we search
  1370. * the starting bitmap twice.
  1371. */
  1372. if (rbm->offset != 0)
  1373. iters++;
  1374. while(1) {
  1375. if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
  1376. (state == GFS2_BLKST_FREE))
  1377. goto next_bitmap;
  1378. bh = rbm->bi->bi_bh;
  1379. buffer = bh->b_data + rbm->bi->bi_offset;
  1380. WARN_ON(!buffer_uptodate(bh));
  1381. if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
  1382. buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
  1383. find_next:
  1384. initial_offset = rbm->offset;
  1385. offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
  1386. if (offset == BFITNOENT)
  1387. goto bitmap_full;
  1388. rbm->offset = offset;
  1389. if (ip == NULL)
  1390. return 0;
  1391. initial_bi = rbm->bi;
  1392. ret = gfs2_reservation_check_and_update(rbm, ip);
  1393. if (ret == 0)
  1394. return 0;
  1395. if (ret > 0) {
  1396. n += (rbm->bi - initial_bi);
  1397. goto find_next;
  1398. }
  1399. return ret;
  1400. bitmap_full: /* Mark bitmap as full and fall through */
  1401. if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
  1402. set_bit(GBF_FULL, &rbm->bi->bi_flags);
  1403. next_bitmap: /* Find next bitmap in the rgrp */
  1404. rbm->offset = 0;
  1405. index = rbm->bi - rbm->rgd->rd_bits;
  1406. index++;
  1407. if (index == rbm->rgd->rd_length)
  1408. index = 0;
  1409. rbm->bi = &rbm->rgd->rd_bits[index];
  1410. if ((index == 0) && nowrap)
  1411. break;
  1412. n++;
  1413. if (n >= iters)
  1414. break;
  1415. }
  1416. return -ENOSPC;
  1417. }
  1418. /**
  1419. * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
  1420. * @rgd: The rgrp
  1421. * @last_unlinked: block address of the last dinode we unlinked
  1422. * @skip: block address we should explicitly not unlink
  1423. *
  1424. * Returns: 0 if no error
  1425. * The inode, if one has been found, in inode.
  1426. */
  1427. static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
  1428. {
  1429. u64 block;
  1430. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1431. struct gfs2_glock *gl;
  1432. struct gfs2_inode *ip;
  1433. int error;
  1434. int found = 0;
  1435. struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
  1436. while (1) {
  1437. down_write(&sdp->sd_log_flush_lock);
  1438. error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, true);
  1439. up_write(&sdp->sd_log_flush_lock);
  1440. if (error == -ENOSPC)
  1441. break;
  1442. if (WARN_ON_ONCE(error))
  1443. break;
  1444. block = gfs2_rbm_to_block(&rbm);
  1445. if (gfs2_rbm_from_block(&rbm, block + 1))
  1446. break;
  1447. if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
  1448. continue;
  1449. if (block == skip)
  1450. continue;
  1451. *last_unlinked = block;
  1452. error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
  1453. if (error)
  1454. continue;
  1455. /* If the inode is already in cache, we can ignore it here
  1456. * because the existing inode disposal code will deal with
  1457. * it when all refs have gone away. Accessing gl_object like
  1458. * this is not safe in general. Here it is ok because we do
  1459. * not dereference the pointer, and we only need an approx
  1460. * answer to whether it is NULL or not.
  1461. */
  1462. ip = gl->gl_object;
  1463. if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  1464. gfs2_glock_put(gl);
  1465. else
  1466. found++;
  1467. /* Limit reclaim to sensible number of tasks */
  1468. if (found > NR_CPUS)
  1469. return;
  1470. }
  1471. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  1472. return;
  1473. }
  1474. /**
  1475. * gfs2_inplace_reserve - Reserve space in the filesystem
  1476. * @ip: the inode to reserve space for
  1477. * @requested: the number of blocks to be reserved
  1478. *
  1479. * Returns: errno
  1480. */
  1481. int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
  1482. {
  1483. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1484. struct gfs2_rgrpd *begin = NULL;
  1485. struct gfs2_blkreserv *rs = ip->i_res;
  1486. int error = 0, rg_locked, flags = LM_FLAG_TRY;
  1487. u64 last_unlinked = NO_BLOCK;
  1488. int loops = 0;
  1489. if (sdp->sd_args.ar_rgrplvb)
  1490. flags |= GL_SKIP;
  1491. if (gfs2_assert_warn(sdp, requested)) {
  1492. error = -EINVAL;
  1493. goto out;
  1494. }
  1495. if (gfs2_rs_active(rs)) {
  1496. begin = rs->rs_rbm.rgd;
  1497. flags = 0; /* Yoda: Do or do not. There is no try */
  1498. } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
  1499. rs->rs_rbm.rgd = begin = ip->i_rgd;
  1500. } else {
  1501. rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
  1502. }
  1503. if (rs->rs_rbm.rgd == NULL)
  1504. return -EBADSLT;
  1505. while (loops < 3) {
  1506. rg_locked = 0;
  1507. if (gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
  1508. rg_locked = 1;
  1509. error = 0;
  1510. } else if (!loops && !gfs2_rs_active(rs) &&
  1511. rs->rs_rbm.rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
  1512. /* If the rgrp already is maxed out for contenders,
  1513. we can eliminate it as a "first pass" without even
  1514. requesting the rgrp glock. */
  1515. error = GLR_TRYFAILED;
  1516. } else {
  1517. error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
  1518. LM_ST_EXCLUSIVE, flags,
  1519. &rs->rs_rgd_gh);
  1520. if (!error && sdp->sd_args.ar_rgrplvb) {
  1521. error = update_rgrp_lvb(rs->rs_rbm.rgd);
  1522. if (error) {
  1523. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1524. return error;
  1525. }
  1526. }
  1527. }
  1528. switch (error) {
  1529. case 0:
  1530. if (gfs2_rs_active(rs)) {
  1531. if (unclaimed_blocks(rs->rs_rbm.rgd) +
  1532. rs->rs_free >= requested) {
  1533. ip->i_rgd = rs->rs_rbm.rgd;
  1534. return 0;
  1535. }
  1536. /* We have a multi-block reservation, but the
  1537. rgrp doesn't have enough free blocks to
  1538. satisfy the request. Free the reservation
  1539. and look for a suitable rgrp. */
  1540. gfs2_rs_deltree(ip, rs);
  1541. }
  1542. if (try_rgrp_fit(rs->rs_rbm.rgd, ip, requested)) {
  1543. if (sdp->sd_args.ar_rgrplvb)
  1544. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1545. ip->i_rgd = rs->rs_rbm.rgd;
  1546. return 0;
  1547. }
  1548. if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) {
  1549. if (sdp->sd_args.ar_rgrplvb)
  1550. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1551. try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
  1552. ip->i_no_addr);
  1553. }
  1554. if (!rg_locked)
  1555. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1556. /* fall through */
  1557. case GLR_TRYFAILED:
  1558. rs->rs_rbm.rgd = gfs2_rgrpd_get_next(rs->rs_rbm.rgd);
  1559. rs->rs_rbm.rgd = rs->rs_rbm.rgd ? : begin; /* if NULL, wrap */
  1560. if (rs->rs_rbm.rgd != begin) /* If we didn't wrap */
  1561. break;
  1562. flags &= ~LM_FLAG_TRY;
  1563. loops++;
  1564. /* Check that fs hasn't grown if writing to rindex */
  1565. if (ip == GFS2_I(sdp->sd_rindex) &&
  1566. !sdp->sd_rindex_uptodate) {
  1567. error = gfs2_ri_update(ip);
  1568. if (error)
  1569. goto out;
  1570. } else if (loops == 2)
  1571. /* Flushing the log may release space */
  1572. gfs2_log_flush(sdp, NULL);
  1573. break;
  1574. default:
  1575. goto out;
  1576. }
  1577. }
  1578. error = -ENOSPC;
  1579. out:
  1580. return error;
  1581. }
  1582. /**
  1583. * gfs2_inplace_release - release an inplace reservation
  1584. * @ip: the inode the reservation was taken out on
  1585. *
  1586. * Release a reservation made by gfs2_inplace_reserve().
  1587. */
  1588. void gfs2_inplace_release(struct gfs2_inode *ip)
  1589. {
  1590. struct gfs2_blkreserv *rs = ip->i_res;
  1591. if (rs->rs_rgd_gh.gh_gl)
  1592. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1593. }
  1594. /**
  1595. * gfs2_get_block_type - Check a block in a RG is of given type
  1596. * @rgd: the resource group holding the block
  1597. * @block: the block number
  1598. *
  1599. * Returns: The block type (GFS2_BLKST_*)
  1600. */
  1601. static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
  1602. {
  1603. struct gfs2_rbm rbm = { .rgd = rgd, };
  1604. int ret;
  1605. ret = gfs2_rbm_from_block(&rbm, block);
  1606. WARN_ON_ONCE(ret != 0);
  1607. return gfs2_testbit(rgd, rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
  1608. rbm.bi->bi_len, rbm.offset);
  1609. }
  1610. /**
  1611. * gfs2_alloc_extent - allocate an extent from a given bitmap
  1612. * @rbm: the resource group information
  1613. * @dinode: TRUE if the first block we allocate is for a dinode
  1614. * @n: The extent length
  1615. *
  1616. * Add the found bitmap buffer to the transaction.
  1617. * Set the found bits to @new_state to change block's allocation state.
  1618. * Returns: starting block number of the extent (fs scope)
  1619. */
  1620. static u64 gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
  1621. unsigned int *n)
  1622. {
  1623. struct gfs2_rgrpd *rgd = rbm->rgd;
  1624. struct gfs2_bitmap *bi = rbm->bi;
  1625. u32 blk = rbm->offset;
  1626. const unsigned int elen = *n;
  1627. u32 goal;
  1628. const u8 *buffer = NULL;
  1629. *n = 0;
  1630. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1631. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  1632. gfs2_setbit(rgd, bi->bi_clone, bi, blk,
  1633. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1634. (*n)++;
  1635. goal = blk;
  1636. while (*n < elen) {
  1637. goal++;
  1638. if (goal >= (bi->bi_len * GFS2_NBBY))
  1639. break;
  1640. if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
  1641. GFS2_BLKST_FREE)
  1642. break;
  1643. gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
  1644. (*n)++;
  1645. }
  1646. blk = gfs2_bi2rgd_blk(bi, blk);
  1647. rgd->rd_last_alloc = blk + *n - 1;
  1648. return rgd->rd_data0 + blk;
  1649. }
  1650. /**
  1651. * rgblk_free - Change alloc state of given block(s)
  1652. * @sdp: the filesystem
  1653. * @bstart: the start of a run of blocks to free
  1654. * @blen: the length of the block run (all must lie within ONE RG!)
  1655. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  1656. *
  1657. * Returns: Resource group containing the block(s)
  1658. */
  1659. static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
  1660. u32 blen, unsigned char new_state)
  1661. {
  1662. struct gfs2_rbm rbm;
  1663. rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
  1664. if (!rbm.rgd) {
  1665. if (gfs2_consist(sdp))
  1666. fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
  1667. return NULL;
  1668. }
  1669. while (blen--) {
  1670. gfs2_rbm_from_block(&rbm, bstart);
  1671. bstart++;
  1672. if (!rbm.bi->bi_clone) {
  1673. rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
  1674. GFP_NOFS | __GFP_NOFAIL);
  1675. memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
  1676. rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
  1677. rbm.bi->bi_len);
  1678. }
  1679. gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
  1680. gfs2_setbit(rbm.rgd, NULL, rbm.bi, rbm.offset, new_state);
  1681. }
  1682. return rbm.rgd;
  1683. }
  1684. /**
  1685. * gfs2_rgrp_dump - print out an rgrp
  1686. * @seq: The iterator
  1687. * @gl: The glock in question
  1688. *
  1689. */
  1690. int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
  1691. {
  1692. struct gfs2_rgrpd *rgd = gl->gl_object;
  1693. struct gfs2_blkreserv *trs;
  1694. const struct rb_node *n;
  1695. if (rgd == NULL)
  1696. return 0;
  1697. gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
  1698. (unsigned long long)rgd->rd_addr, rgd->rd_flags,
  1699. rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
  1700. rgd->rd_reserved);
  1701. spin_lock(&rgd->rd_rsspin);
  1702. for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
  1703. trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1704. dump_rs(seq, trs);
  1705. }
  1706. spin_unlock(&rgd->rd_rsspin);
  1707. return 0;
  1708. }
  1709. static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
  1710. {
  1711. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1712. fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
  1713. (unsigned long long)rgd->rd_addr);
  1714. fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
  1715. gfs2_rgrp_dump(NULL, rgd->rd_gl);
  1716. rgd->rd_flags |= GFS2_RDF_ERROR;
  1717. }
  1718. /**
  1719. * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
  1720. * @ip: The inode we have just allocated blocks for
  1721. * @rbm: The start of the allocated blocks
  1722. * @len: The extent length
  1723. *
  1724. * Adjusts a reservation after an allocation has taken place. If the
  1725. * reservation does not match the allocation, or if it is now empty
  1726. * then it is removed.
  1727. */
  1728. static void gfs2_adjust_reservation(struct gfs2_inode *ip,
  1729. const struct gfs2_rbm *rbm, unsigned len)
  1730. {
  1731. struct gfs2_blkreserv *rs = ip->i_res;
  1732. struct gfs2_rgrpd *rgd = rbm->rgd;
  1733. unsigned rlen;
  1734. u64 block;
  1735. int ret;
  1736. spin_lock(&rgd->rd_rsspin);
  1737. if (gfs2_rs_active(rs)) {
  1738. if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
  1739. block = gfs2_rbm_to_block(rbm);
  1740. ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
  1741. rlen = min(rs->rs_free, len);
  1742. rs->rs_free -= rlen;
  1743. rgd->rd_reserved -= rlen;
  1744. trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
  1745. if (rs->rs_free && !ret)
  1746. goto out;
  1747. }
  1748. __rs_deltree(ip, rs);
  1749. }
  1750. out:
  1751. spin_unlock(&rgd->rd_rsspin);
  1752. }
  1753. /**
  1754. * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  1755. * @ip: the inode to allocate the block for
  1756. * @bn: Used to return the starting block number
  1757. * @nblocks: requested number of blocks/extent length (value/result)
  1758. * @dinode: 1 if we're allocating a dinode block, else 0
  1759. * @generation: the generation number of the inode
  1760. *
  1761. * Returns: 0 or error
  1762. */
  1763. int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
  1764. bool dinode, u64 *generation)
  1765. {
  1766. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1767. struct buffer_head *dibh;
  1768. struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
  1769. unsigned int ndata;
  1770. u64 goal;
  1771. u64 block; /* block, within the file system scope */
  1772. int error;
  1773. if (gfs2_rs_active(ip->i_res))
  1774. goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
  1775. else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
  1776. goal = ip->i_goal;
  1777. else
  1778. goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
  1779. gfs2_rbm_from_block(&rbm, goal);
  1780. error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false);
  1781. /* Since all blocks are reserved in advance, this shouldn't happen */
  1782. if (error) {
  1783. fs_warn(sdp, "error=%d, nblocks=%u, full=%d\n", error, *nblocks,
  1784. test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
  1785. goto rgrp_error;
  1786. }
  1787. block = gfs2_alloc_extent(&rbm, dinode, nblocks);
  1788. if (gfs2_rs_active(ip->i_res))
  1789. gfs2_adjust_reservation(ip, &rbm, *nblocks);
  1790. ndata = *nblocks;
  1791. if (dinode)
  1792. ndata--;
  1793. if (!dinode) {
  1794. ip->i_goal = block + ndata - 1;
  1795. error = gfs2_meta_inode_buffer(ip, &dibh);
  1796. if (error == 0) {
  1797. struct gfs2_dinode *di =
  1798. (struct gfs2_dinode *)dibh->b_data;
  1799. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1800. di->di_goal_meta = di->di_goal_data =
  1801. cpu_to_be64(ip->i_goal);
  1802. brelse(dibh);
  1803. }
  1804. }
  1805. if (rbm.rgd->rd_free < *nblocks) {
  1806. printk(KERN_WARNING "nblocks=%u\n", *nblocks);
  1807. goto rgrp_error;
  1808. }
  1809. rbm.rgd->rd_free -= *nblocks;
  1810. if (dinode) {
  1811. rbm.rgd->rd_dinodes++;
  1812. *generation = rbm.rgd->rd_igeneration++;
  1813. if (*generation == 0)
  1814. *generation = rbm.rgd->rd_igeneration++;
  1815. }
  1816. gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
  1817. gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1818. gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1819. gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
  1820. if (dinode)
  1821. gfs2_trans_add_unrevoke(sdp, block, 1);
  1822. /*
  1823. * This needs reviewing to see why we cannot do the quota change
  1824. * at this point in the dinode case.
  1825. */
  1826. if (ndata)
  1827. gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
  1828. ip->i_inode.i_gid);
  1829. rbm.rgd->rd_free_clone -= *nblocks;
  1830. trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
  1831. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1832. *bn = block;
  1833. return 0;
  1834. rgrp_error:
  1835. gfs2_rgrp_error(rbm.rgd);
  1836. return -EIO;
  1837. }
  1838. /**
  1839. * __gfs2_free_blocks - free a contiguous run of block(s)
  1840. * @ip: the inode these blocks are being freed from
  1841. * @bstart: first block of a run of contiguous blocks
  1842. * @blen: the length of the block run
  1843. * @meta: 1 if the blocks represent metadata
  1844. *
  1845. */
  1846. void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
  1847. {
  1848. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1849. struct gfs2_rgrpd *rgd;
  1850. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  1851. if (!rgd)
  1852. return;
  1853. trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
  1854. rgd->rd_free += blen;
  1855. rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
  1856. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1857. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1858. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1859. /* Directories keep their data in the metadata address space */
  1860. if (meta || ip->i_depth)
  1861. gfs2_meta_wipe(ip, bstart, blen);
  1862. }
  1863. /**
  1864. * gfs2_free_meta - free a contiguous run of data block(s)
  1865. * @ip: the inode these blocks are being freed from
  1866. * @bstart: first block of a run of contiguous blocks
  1867. * @blen: the length of the block run
  1868. *
  1869. */
  1870. void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
  1871. {
  1872. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1873. __gfs2_free_blocks(ip, bstart, blen, 1);
  1874. gfs2_statfs_change(sdp, 0, +blen, 0);
  1875. gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1876. }
  1877. void gfs2_unlink_di(struct inode *inode)
  1878. {
  1879. struct gfs2_inode *ip = GFS2_I(inode);
  1880. struct gfs2_sbd *sdp = GFS2_SB(inode);
  1881. struct gfs2_rgrpd *rgd;
  1882. u64 blkno = ip->i_no_addr;
  1883. rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
  1884. if (!rgd)
  1885. return;
  1886. trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
  1887. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1888. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1889. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1890. update_rgrp_lvb_unlinked(rgd, 1);
  1891. }
  1892. static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
  1893. {
  1894. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1895. struct gfs2_rgrpd *tmp_rgd;
  1896. tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
  1897. if (!tmp_rgd)
  1898. return;
  1899. gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
  1900. if (!rgd->rd_dinodes)
  1901. gfs2_consist_rgrpd(rgd);
  1902. rgd->rd_dinodes--;
  1903. rgd->rd_free++;
  1904. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1905. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1906. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1907. update_rgrp_lvb_unlinked(rgd, -1);
  1908. gfs2_statfs_change(sdp, 0, +1, -1);
  1909. }
  1910. void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
  1911. {
  1912. gfs2_free_uninit_di(rgd, ip->i_no_addr);
  1913. trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
  1914. gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1915. gfs2_meta_wipe(ip, ip->i_no_addr, 1);
  1916. }
  1917. /**
  1918. * gfs2_check_blk_type - Check the type of a block
  1919. * @sdp: The superblock
  1920. * @no_addr: The block number to check
  1921. * @type: The block type we are looking for
  1922. *
  1923. * Returns: 0 if the block type matches the expected type
  1924. * -ESTALE if it doesn't match
  1925. * or -ve errno if something went wrong while checking
  1926. */
  1927. int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
  1928. {
  1929. struct gfs2_rgrpd *rgd;
  1930. struct gfs2_holder rgd_gh;
  1931. int error = -EINVAL;
  1932. rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
  1933. if (!rgd)
  1934. goto fail;
  1935. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
  1936. if (error)
  1937. goto fail;
  1938. if (gfs2_get_block_type(rgd, no_addr) != type)
  1939. error = -ESTALE;
  1940. gfs2_glock_dq_uninit(&rgd_gh);
  1941. fail:
  1942. return error;
  1943. }
  1944. /**
  1945. * gfs2_rlist_add - add a RG to a list of RGs
  1946. * @ip: the inode
  1947. * @rlist: the list of resource groups
  1948. * @block: the block
  1949. *
  1950. * Figure out what RG a block belongs to and add that RG to the list
  1951. *
  1952. * FIXME: Don't use NOFAIL
  1953. *
  1954. */
  1955. void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
  1956. u64 block)
  1957. {
  1958. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1959. struct gfs2_rgrpd *rgd;
  1960. struct gfs2_rgrpd **tmp;
  1961. unsigned int new_space;
  1962. unsigned int x;
  1963. if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
  1964. return;
  1965. if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
  1966. rgd = ip->i_rgd;
  1967. else
  1968. rgd = gfs2_blk2rgrpd(sdp, block, 1);
  1969. if (!rgd) {
  1970. fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
  1971. return;
  1972. }
  1973. ip->i_rgd = rgd;
  1974. for (x = 0; x < rlist->rl_rgrps; x++)
  1975. if (rlist->rl_rgd[x] == rgd)
  1976. return;
  1977. if (rlist->rl_rgrps == rlist->rl_space) {
  1978. new_space = rlist->rl_space + 10;
  1979. tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
  1980. GFP_NOFS | __GFP_NOFAIL);
  1981. if (rlist->rl_rgd) {
  1982. memcpy(tmp, rlist->rl_rgd,
  1983. rlist->rl_space * sizeof(struct gfs2_rgrpd *));
  1984. kfree(rlist->rl_rgd);
  1985. }
  1986. rlist->rl_space = new_space;
  1987. rlist->rl_rgd = tmp;
  1988. }
  1989. rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
  1990. }
  1991. /**
  1992. * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
  1993. * and initialize an array of glock holders for them
  1994. * @rlist: the list of resource groups
  1995. * @state: the lock state to acquire the RG lock in
  1996. *
  1997. * FIXME: Don't use NOFAIL
  1998. *
  1999. */
  2000. void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
  2001. {
  2002. unsigned int x;
  2003. rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
  2004. GFP_NOFS | __GFP_NOFAIL);
  2005. for (x = 0; x < rlist->rl_rgrps; x++)
  2006. gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
  2007. state, 0,
  2008. &rlist->rl_ghs[x]);
  2009. }
  2010. /**
  2011. * gfs2_rlist_free - free a resource group list
  2012. * @list: the list of resource groups
  2013. *
  2014. */
  2015. void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
  2016. {
  2017. unsigned int x;
  2018. kfree(rlist->rl_rgd);
  2019. if (rlist->rl_ghs) {
  2020. for (x = 0; x < rlist->rl_rgrps; x++)
  2021. gfs2_holder_uninit(&rlist->rl_ghs[x]);
  2022. kfree(rlist->rl_ghs);
  2023. rlist->rl_ghs = NULL;
  2024. }
  2025. }