rgrp.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/fs.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/rbtree.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "glock.h"
  21. #include "glops.h"
  22. #include "lops.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "rgrp.h"
  26. #include "super.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. #include "log.h"
  30. #include "inode.h"
  31. #include "trace_gfs2.h"
  32. #define BFITNOENT ((u32)~0)
  33. #define NO_BLOCK ((u64)~0)
  34. #define RSRV_CONTENTION_FACTOR 4
  35. #define RGRP_RSRV_MAX_CONTENDERS 2
  36. #if BITS_PER_LONG == 32
  37. #define LBITMASK (0x55555555UL)
  38. #define LBITSKIP55 (0x55555555UL)
  39. #define LBITSKIP00 (0x00000000UL)
  40. #else
  41. #define LBITMASK (0x5555555555555555UL)
  42. #define LBITSKIP55 (0x5555555555555555UL)
  43. #define LBITSKIP00 (0x0000000000000000UL)
  44. #endif
  45. /*
  46. * These routines are used by the resource group routines (rgrp.c)
  47. * to keep track of block allocation. Each block is represented by two
  48. * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
  49. *
  50. * 0 = Free
  51. * 1 = Used (not metadata)
  52. * 2 = Unlinked (still in use) inode
  53. * 3 = Used (metadata)
  54. */
  55. static const char valid_change[16] = {
  56. /* current */
  57. /* n */ 0, 1, 1, 1,
  58. /* e */ 1, 0, 0, 0,
  59. /* w */ 0, 0, 0, 1,
  60. 1, 0, 0, 0
  61. };
  62. /**
  63. * gfs2_setbit - Set a bit in the bitmaps
  64. * @rgd: the resource group descriptor
  65. * @buf2: the clone buffer that holds the bitmaps
  66. * @bi: the bitmap structure
  67. * @block: the block to set
  68. * @new_state: the new state of the block
  69. *
  70. */
  71. static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
  72. struct gfs2_bitmap *bi, u32 block,
  73. unsigned char new_state)
  74. {
  75. unsigned char *byte1, *byte2, *end, cur_state;
  76. unsigned int buflen = bi->bi_len;
  77. const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  78. byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
  79. end = bi->bi_bh->b_data + bi->bi_offset + buflen;
  80. BUG_ON(byte1 >= end);
  81. cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
  82. if (unlikely(!valid_change[new_state * 4 + cur_state])) {
  83. printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
  84. "new_state=%d\n",
  85. (unsigned long long)block, cur_state, new_state);
  86. printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
  87. (unsigned long long)rgd->rd_addr,
  88. (unsigned long)bi->bi_start);
  89. printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
  90. (unsigned long)bi->bi_offset,
  91. (unsigned long)bi->bi_len);
  92. dump_stack();
  93. gfs2_consist_rgrpd(rgd);
  94. return;
  95. }
  96. *byte1 ^= (cur_state ^ new_state) << bit;
  97. if (buf2) {
  98. byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
  99. cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
  100. *byte2 ^= (cur_state ^ new_state) << bit;
  101. }
  102. }
  103. /**
  104. * gfs2_testbit - test a bit in the bitmaps
  105. * @rgd: the resource group descriptor
  106. * @buffer: the buffer that holds the bitmaps
  107. * @buflen: the length (in bytes) of the buffer
  108. * @block: the block to read
  109. *
  110. */
  111. static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
  112. const unsigned char *buffer,
  113. unsigned int buflen, u32 block)
  114. {
  115. const unsigned char *byte, *end;
  116. unsigned char cur_state;
  117. unsigned int bit;
  118. byte = buffer + (block / GFS2_NBBY);
  119. bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  120. end = buffer + buflen;
  121. gfs2_assert(rgd->rd_sbd, byte < end);
  122. cur_state = (*byte >> bit) & GFS2_BIT_MASK;
  123. return cur_state;
  124. }
  125. /**
  126. * gfs2_bit_search
  127. * @ptr: Pointer to bitmap data
  128. * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
  129. * @state: The state we are searching for
  130. *
  131. * We xor the bitmap data with a patter which is the bitwise opposite
  132. * of what we are looking for, this gives rise to a pattern of ones
  133. * wherever there is a match. Since we have two bits per entry, we
  134. * take this pattern, shift it down by one place and then and it with
  135. * the original. All the even bit positions (0,2,4, etc) then represent
  136. * successful matches, so we mask with 0x55555..... to remove the unwanted
  137. * odd bit positions.
  138. *
  139. * This allows searching of a whole u64 at once (32 blocks) with a
  140. * single test (on 64 bit arches).
  141. */
  142. static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
  143. {
  144. u64 tmp;
  145. static const u64 search[] = {
  146. [0] = 0xffffffffffffffffULL,
  147. [1] = 0xaaaaaaaaaaaaaaaaULL,
  148. [2] = 0x5555555555555555ULL,
  149. [3] = 0x0000000000000000ULL,
  150. };
  151. tmp = le64_to_cpu(*ptr) ^ search[state];
  152. tmp &= (tmp >> 1);
  153. tmp &= mask;
  154. return tmp;
  155. }
  156. /**
  157. * rs_cmp - multi-block reservation range compare
  158. * @blk: absolute file system block number of the new reservation
  159. * @len: number of blocks in the new reservation
  160. * @rs: existing reservation to compare against
  161. *
  162. * returns: 1 if the block range is beyond the reach of the reservation
  163. * -1 if the block range is before the start of the reservation
  164. * 0 if the block range overlaps with the reservation
  165. */
  166. static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
  167. {
  168. u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
  169. if (blk >= startblk + rs->rs_free)
  170. return 1;
  171. if (blk + len - 1 < startblk)
  172. return -1;
  173. return 0;
  174. }
  175. /**
  176. * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
  177. * a block in a given allocation state.
  178. * @buf: the buffer that holds the bitmaps
  179. * @len: the length (in bytes) of the buffer
  180. * @goal: start search at this block's bit-pair (within @buffer)
  181. * @state: GFS2_BLKST_XXX the state of the block we're looking for.
  182. *
  183. * Scope of @goal and returned block number is only within this bitmap buffer,
  184. * not entire rgrp or filesystem. @buffer will be offset from the actual
  185. * beginning of a bitmap block buffer, skipping any header structures, but
  186. * headers are always a multiple of 64 bits long so that the buffer is
  187. * always aligned to a 64 bit boundary.
  188. *
  189. * The size of the buffer is in bytes, but is it assumed that it is
  190. * always ok to read a complete multiple of 64 bits at the end
  191. * of the block in case the end is no aligned to a natural boundary.
  192. *
  193. * Return: the block number (bitmap buffer scope) that was found
  194. */
  195. static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
  196. u32 goal, u8 state)
  197. {
  198. u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
  199. const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
  200. const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
  201. u64 tmp;
  202. u64 mask = 0x5555555555555555ULL;
  203. u32 bit;
  204. /* Mask off bits we don't care about at the start of the search */
  205. mask <<= spoint;
  206. tmp = gfs2_bit_search(ptr, mask, state);
  207. ptr++;
  208. while(tmp == 0 && ptr < end) {
  209. tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
  210. ptr++;
  211. }
  212. /* Mask off any bits which are more than len bytes from the start */
  213. if (ptr == end && (len & (sizeof(u64) - 1)))
  214. tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
  215. /* Didn't find anything, so return */
  216. if (tmp == 0)
  217. return BFITNOENT;
  218. ptr--;
  219. bit = __ffs64(tmp);
  220. bit /= 2; /* two bits per entry in the bitmap */
  221. return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
  222. }
  223. /**
  224. * gfs2_bitcount - count the number of bits in a certain state
  225. * @rgd: the resource group descriptor
  226. * @buffer: the buffer that holds the bitmaps
  227. * @buflen: the length (in bytes) of the buffer
  228. * @state: the state of the block we're looking for
  229. *
  230. * Returns: The number of bits
  231. */
  232. static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
  233. unsigned int buflen, u8 state)
  234. {
  235. const u8 *byte = buffer;
  236. const u8 *end = buffer + buflen;
  237. const u8 state1 = state << 2;
  238. const u8 state2 = state << 4;
  239. const u8 state3 = state << 6;
  240. u32 count = 0;
  241. for (; byte < end; byte++) {
  242. if (((*byte) & 0x03) == state)
  243. count++;
  244. if (((*byte) & 0x0C) == state1)
  245. count++;
  246. if (((*byte) & 0x30) == state2)
  247. count++;
  248. if (((*byte) & 0xC0) == state3)
  249. count++;
  250. }
  251. return count;
  252. }
  253. /**
  254. * gfs2_rgrp_verify - Verify that a resource group is consistent
  255. * @rgd: the rgrp
  256. *
  257. */
  258. void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
  259. {
  260. struct gfs2_sbd *sdp = rgd->rd_sbd;
  261. struct gfs2_bitmap *bi = NULL;
  262. u32 length = rgd->rd_length;
  263. u32 count[4], tmp;
  264. int buf, x;
  265. memset(count, 0, 4 * sizeof(u32));
  266. /* Count # blocks in each of 4 possible allocation states */
  267. for (buf = 0; buf < length; buf++) {
  268. bi = rgd->rd_bits + buf;
  269. for (x = 0; x < 4; x++)
  270. count[x] += gfs2_bitcount(rgd,
  271. bi->bi_bh->b_data +
  272. bi->bi_offset,
  273. bi->bi_len, x);
  274. }
  275. if (count[0] != rgd->rd_free) {
  276. if (gfs2_consist_rgrpd(rgd))
  277. fs_err(sdp, "free data mismatch: %u != %u\n",
  278. count[0], rgd->rd_free);
  279. return;
  280. }
  281. tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
  282. if (count[1] != tmp) {
  283. if (gfs2_consist_rgrpd(rgd))
  284. fs_err(sdp, "used data mismatch: %u != %u\n",
  285. count[1], tmp);
  286. return;
  287. }
  288. if (count[2] + count[3] != rgd->rd_dinodes) {
  289. if (gfs2_consist_rgrpd(rgd))
  290. fs_err(sdp, "used metadata mismatch: %u != %u\n",
  291. count[2] + count[3], rgd->rd_dinodes);
  292. return;
  293. }
  294. }
  295. static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
  296. {
  297. u64 first = rgd->rd_data0;
  298. u64 last = first + rgd->rd_data;
  299. return first <= block && block < last;
  300. }
  301. /**
  302. * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  303. * @sdp: The GFS2 superblock
  304. * @blk: The data block number
  305. * @exact: True if this needs to be an exact match
  306. *
  307. * Returns: The resource group, or NULL if not found
  308. */
  309. struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
  310. {
  311. struct rb_node *n, *next;
  312. struct gfs2_rgrpd *cur;
  313. spin_lock(&sdp->sd_rindex_spin);
  314. n = sdp->sd_rindex_tree.rb_node;
  315. while (n) {
  316. cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
  317. next = NULL;
  318. if (blk < cur->rd_addr)
  319. next = n->rb_left;
  320. else if (blk >= cur->rd_data0 + cur->rd_data)
  321. next = n->rb_right;
  322. if (next == NULL) {
  323. spin_unlock(&sdp->sd_rindex_spin);
  324. if (exact) {
  325. if (blk < cur->rd_addr)
  326. return NULL;
  327. if (blk >= cur->rd_data0 + cur->rd_data)
  328. return NULL;
  329. }
  330. return cur;
  331. }
  332. n = next;
  333. }
  334. spin_unlock(&sdp->sd_rindex_spin);
  335. return NULL;
  336. }
  337. /**
  338. * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
  339. * @sdp: The GFS2 superblock
  340. *
  341. * Returns: The first rgrp in the filesystem
  342. */
  343. struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
  344. {
  345. const struct rb_node *n;
  346. struct gfs2_rgrpd *rgd;
  347. spin_lock(&sdp->sd_rindex_spin);
  348. n = rb_first(&sdp->sd_rindex_tree);
  349. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  350. spin_unlock(&sdp->sd_rindex_spin);
  351. return rgd;
  352. }
  353. /**
  354. * gfs2_rgrpd_get_next - get the next RG
  355. * @rgd: the resource group descriptor
  356. *
  357. * Returns: The next rgrp
  358. */
  359. struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
  360. {
  361. struct gfs2_sbd *sdp = rgd->rd_sbd;
  362. const struct rb_node *n;
  363. spin_lock(&sdp->sd_rindex_spin);
  364. n = rb_next(&rgd->rd_node);
  365. if (n == NULL)
  366. n = rb_first(&sdp->sd_rindex_tree);
  367. if (unlikely(&rgd->rd_node == n)) {
  368. spin_unlock(&sdp->sd_rindex_spin);
  369. return NULL;
  370. }
  371. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  372. spin_unlock(&sdp->sd_rindex_spin);
  373. return rgd;
  374. }
  375. void gfs2_free_clones(struct gfs2_rgrpd *rgd)
  376. {
  377. int x;
  378. for (x = 0; x < rgd->rd_length; x++) {
  379. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  380. kfree(bi->bi_clone);
  381. bi->bi_clone = NULL;
  382. }
  383. }
  384. /**
  385. * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
  386. * @ip: the inode for this reservation
  387. */
  388. int gfs2_rs_alloc(struct gfs2_inode *ip)
  389. {
  390. int error = 0;
  391. struct gfs2_blkreserv *res;
  392. if (ip->i_res)
  393. return 0;
  394. res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
  395. if (!res)
  396. error = -ENOMEM;
  397. RB_CLEAR_NODE(&res->rs_node);
  398. down_write(&ip->i_rw_mutex);
  399. if (ip->i_res)
  400. kmem_cache_free(gfs2_rsrv_cachep, res);
  401. else
  402. ip->i_res = res;
  403. up_write(&ip->i_rw_mutex);
  404. return error;
  405. }
  406. static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
  407. {
  408. gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
  409. rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
  410. rs->rs_rbm.offset, rs->rs_free);
  411. }
  412. /**
  413. * __rs_deltree - remove a multi-block reservation from the rgd tree
  414. * @rs: The reservation to remove
  415. *
  416. */
  417. static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  418. {
  419. struct gfs2_rgrpd *rgd;
  420. if (!gfs2_rs_active(rs))
  421. return;
  422. rgd = rs->rs_rbm.rgd;
  423. trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
  424. rb_erase(&rs->rs_node, &rgd->rd_rstree);
  425. RB_CLEAR_NODE(&rs->rs_node);
  426. BUG_ON(!rgd->rd_rs_cnt);
  427. rgd->rd_rs_cnt--;
  428. if (rs->rs_free) {
  429. /* return reserved blocks to the rgrp and the ip */
  430. BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
  431. rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
  432. rs->rs_free = 0;
  433. clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
  434. smp_mb__after_clear_bit();
  435. }
  436. }
  437. /**
  438. * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
  439. * @rs: The reservation to remove
  440. *
  441. */
  442. void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  443. {
  444. struct gfs2_rgrpd *rgd;
  445. rgd = rs->rs_rbm.rgd;
  446. if (rgd) {
  447. spin_lock(&rgd->rd_rsspin);
  448. __rs_deltree(ip, rs);
  449. spin_unlock(&rgd->rd_rsspin);
  450. }
  451. }
  452. /**
  453. * gfs2_rs_delete - delete a multi-block reservation
  454. * @ip: The inode for this reservation
  455. *
  456. */
  457. void gfs2_rs_delete(struct gfs2_inode *ip)
  458. {
  459. down_write(&ip->i_rw_mutex);
  460. if (ip->i_res) {
  461. gfs2_rs_deltree(ip, ip->i_res);
  462. trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
  463. BUG_ON(ip->i_res->rs_free);
  464. kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
  465. ip->i_res = NULL;
  466. }
  467. up_write(&ip->i_rw_mutex);
  468. }
  469. /**
  470. * return_all_reservations - return all reserved blocks back to the rgrp.
  471. * @rgd: the rgrp that needs its space back
  472. *
  473. * We previously reserved a bunch of blocks for allocation. Now we need to
  474. * give them back. This leave the reservation structures in tact, but removes
  475. * all of their corresponding "no-fly zones".
  476. */
  477. static void return_all_reservations(struct gfs2_rgrpd *rgd)
  478. {
  479. struct rb_node *n;
  480. struct gfs2_blkreserv *rs;
  481. spin_lock(&rgd->rd_rsspin);
  482. while ((n = rb_first(&rgd->rd_rstree))) {
  483. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  484. __rs_deltree(NULL, rs);
  485. }
  486. spin_unlock(&rgd->rd_rsspin);
  487. }
  488. void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
  489. {
  490. struct rb_node *n;
  491. struct gfs2_rgrpd *rgd;
  492. struct gfs2_glock *gl;
  493. while ((n = rb_first(&sdp->sd_rindex_tree))) {
  494. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  495. gl = rgd->rd_gl;
  496. rb_erase(n, &sdp->sd_rindex_tree);
  497. if (gl) {
  498. spin_lock(&gl->gl_spin);
  499. gl->gl_object = NULL;
  500. spin_unlock(&gl->gl_spin);
  501. gfs2_glock_add_to_lru(gl);
  502. gfs2_glock_put(gl);
  503. }
  504. gfs2_free_clones(rgd);
  505. kfree(rgd->rd_bits);
  506. return_all_reservations(rgd);
  507. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  508. }
  509. }
  510. static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
  511. {
  512. printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
  513. printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
  514. printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
  515. printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
  516. printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
  517. }
  518. /**
  519. * gfs2_compute_bitstructs - Compute the bitmap sizes
  520. * @rgd: The resource group descriptor
  521. *
  522. * Calculates bitmap descriptors, one for each block that contains bitmap data
  523. *
  524. * Returns: errno
  525. */
  526. static int compute_bitstructs(struct gfs2_rgrpd *rgd)
  527. {
  528. struct gfs2_sbd *sdp = rgd->rd_sbd;
  529. struct gfs2_bitmap *bi;
  530. u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
  531. u32 bytes_left, bytes;
  532. int x;
  533. if (!length)
  534. return -EINVAL;
  535. rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
  536. if (!rgd->rd_bits)
  537. return -ENOMEM;
  538. bytes_left = rgd->rd_bitbytes;
  539. for (x = 0; x < length; x++) {
  540. bi = rgd->rd_bits + x;
  541. bi->bi_flags = 0;
  542. /* small rgrp; bitmap stored completely in header block */
  543. if (length == 1) {
  544. bytes = bytes_left;
  545. bi->bi_offset = sizeof(struct gfs2_rgrp);
  546. bi->bi_start = 0;
  547. bi->bi_len = bytes;
  548. /* header block */
  549. } else if (x == 0) {
  550. bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
  551. bi->bi_offset = sizeof(struct gfs2_rgrp);
  552. bi->bi_start = 0;
  553. bi->bi_len = bytes;
  554. /* last block */
  555. } else if (x + 1 == length) {
  556. bytes = bytes_left;
  557. bi->bi_offset = sizeof(struct gfs2_meta_header);
  558. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  559. bi->bi_len = bytes;
  560. /* other blocks */
  561. } else {
  562. bytes = sdp->sd_sb.sb_bsize -
  563. sizeof(struct gfs2_meta_header);
  564. bi->bi_offset = sizeof(struct gfs2_meta_header);
  565. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  566. bi->bi_len = bytes;
  567. }
  568. bytes_left -= bytes;
  569. }
  570. if (bytes_left) {
  571. gfs2_consist_rgrpd(rgd);
  572. return -EIO;
  573. }
  574. bi = rgd->rd_bits + (length - 1);
  575. if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
  576. if (gfs2_consist_rgrpd(rgd)) {
  577. gfs2_rindex_print(rgd);
  578. fs_err(sdp, "start=%u len=%u offset=%u\n",
  579. bi->bi_start, bi->bi_len, bi->bi_offset);
  580. }
  581. return -EIO;
  582. }
  583. return 0;
  584. }
  585. /**
  586. * gfs2_ri_total - Total up the file system space, according to the rindex.
  587. * @sdp: the filesystem
  588. *
  589. */
  590. u64 gfs2_ri_total(struct gfs2_sbd *sdp)
  591. {
  592. u64 total_data = 0;
  593. struct inode *inode = sdp->sd_rindex;
  594. struct gfs2_inode *ip = GFS2_I(inode);
  595. char buf[sizeof(struct gfs2_rindex)];
  596. int error, rgrps;
  597. for (rgrps = 0;; rgrps++) {
  598. loff_t pos = rgrps * sizeof(struct gfs2_rindex);
  599. if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
  600. break;
  601. error = gfs2_internal_read(ip, buf, &pos,
  602. sizeof(struct gfs2_rindex));
  603. if (error != sizeof(struct gfs2_rindex))
  604. break;
  605. total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
  606. }
  607. return total_data;
  608. }
  609. static int rgd_insert(struct gfs2_rgrpd *rgd)
  610. {
  611. struct gfs2_sbd *sdp = rgd->rd_sbd;
  612. struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
  613. /* Figure out where to put new node */
  614. while (*newn) {
  615. struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
  616. rd_node);
  617. parent = *newn;
  618. if (rgd->rd_addr < cur->rd_addr)
  619. newn = &((*newn)->rb_left);
  620. else if (rgd->rd_addr > cur->rd_addr)
  621. newn = &((*newn)->rb_right);
  622. else
  623. return -EEXIST;
  624. }
  625. rb_link_node(&rgd->rd_node, parent, newn);
  626. rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
  627. sdp->sd_rgrps++;
  628. return 0;
  629. }
  630. /**
  631. * read_rindex_entry - Pull in a new resource index entry from the disk
  632. * @ip: Pointer to the rindex inode
  633. *
  634. * Returns: 0 on success, > 0 on EOF, error code otherwise
  635. */
  636. static int read_rindex_entry(struct gfs2_inode *ip)
  637. {
  638. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  639. loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
  640. struct gfs2_rindex buf;
  641. int error;
  642. struct gfs2_rgrpd *rgd;
  643. if (pos >= i_size_read(&ip->i_inode))
  644. return 1;
  645. error = gfs2_internal_read(ip, (char *)&buf, &pos,
  646. sizeof(struct gfs2_rindex));
  647. if (error != sizeof(struct gfs2_rindex))
  648. return (error == 0) ? 1 : error;
  649. rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
  650. error = -ENOMEM;
  651. if (!rgd)
  652. return error;
  653. rgd->rd_sbd = sdp;
  654. rgd->rd_addr = be64_to_cpu(buf.ri_addr);
  655. rgd->rd_length = be32_to_cpu(buf.ri_length);
  656. rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
  657. rgd->rd_data = be32_to_cpu(buf.ri_data);
  658. rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
  659. spin_lock_init(&rgd->rd_rsspin);
  660. error = compute_bitstructs(rgd);
  661. if (error)
  662. goto fail;
  663. error = gfs2_glock_get(sdp, rgd->rd_addr,
  664. &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
  665. if (error)
  666. goto fail;
  667. rgd->rd_gl->gl_object = rgd;
  668. rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
  669. rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
  670. if (rgd->rd_data > sdp->sd_max_rg_data)
  671. sdp->sd_max_rg_data = rgd->rd_data;
  672. spin_lock(&sdp->sd_rindex_spin);
  673. error = rgd_insert(rgd);
  674. spin_unlock(&sdp->sd_rindex_spin);
  675. if (!error)
  676. return 0;
  677. error = 0; /* someone else read in the rgrp; free it and ignore it */
  678. gfs2_glock_put(rgd->rd_gl);
  679. fail:
  680. kfree(rgd->rd_bits);
  681. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  682. return error;
  683. }
  684. /**
  685. * gfs2_ri_update - Pull in a new resource index from the disk
  686. * @ip: pointer to the rindex inode
  687. *
  688. * Returns: 0 on successful update, error code otherwise
  689. */
  690. static int gfs2_ri_update(struct gfs2_inode *ip)
  691. {
  692. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  693. int error;
  694. do {
  695. error = read_rindex_entry(ip);
  696. } while (error == 0);
  697. if (error < 0)
  698. return error;
  699. sdp->sd_rindex_uptodate = 1;
  700. return 0;
  701. }
  702. /**
  703. * gfs2_rindex_update - Update the rindex if required
  704. * @sdp: The GFS2 superblock
  705. *
  706. * We grab a lock on the rindex inode to make sure that it doesn't
  707. * change whilst we are performing an operation. We keep this lock
  708. * for quite long periods of time compared to other locks. This
  709. * doesn't matter, since it is shared and it is very, very rarely
  710. * accessed in the exclusive mode (i.e. only when expanding the filesystem).
  711. *
  712. * This makes sure that we're using the latest copy of the resource index
  713. * special file, which might have been updated if someone expanded the
  714. * filesystem (via gfs2_grow utility), which adds new resource groups.
  715. *
  716. * Returns: 0 on succeess, error code otherwise
  717. */
  718. int gfs2_rindex_update(struct gfs2_sbd *sdp)
  719. {
  720. struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
  721. struct gfs2_glock *gl = ip->i_gl;
  722. struct gfs2_holder ri_gh;
  723. int error = 0;
  724. int unlock_required = 0;
  725. /* Read new copy from disk if we don't have the latest */
  726. if (!sdp->sd_rindex_uptodate) {
  727. if (!gfs2_glock_is_locked_by_me(gl)) {
  728. error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
  729. if (error)
  730. return error;
  731. unlock_required = 1;
  732. }
  733. if (!sdp->sd_rindex_uptodate)
  734. error = gfs2_ri_update(ip);
  735. if (unlock_required)
  736. gfs2_glock_dq_uninit(&ri_gh);
  737. }
  738. return error;
  739. }
  740. static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
  741. {
  742. const struct gfs2_rgrp *str = buf;
  743. u32 rg_flags;
  744. rg_flags = be32_to_cpu(str->rg_flags);
  745. rg_flags &= ~GFS2_RDF_MASK;
  746. rgd->rd_flags &= GFS2_RDF_MASK;
  747. rgd->rd_flags |= rg_flags;
  748. rgd->rd_free = be32_to_cpu(str->rg_free);
  749. rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
  750. rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
  751. }
  752. static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
  753. {
  754. struct gfs2_rgrp *str = buf;
  755. str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
  756. str->rg_free = cpu_to_be32(rgd->rd_free);
  757. str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
  758. str->__pad = cpu_to_be32(0);
  759. str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
  760. memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
  761. }
  762. static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
  763. {
  764. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  765. struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
  766. if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
  767. rgl->rl_dinodes != str->rg_dinodes ||
  768. rgl->rl_igeneration != str->rg_igeneration)
  769. return 0;
  770. return 1;
  771. }
  772. static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
  773. {
  774. const struct gfs2_rgrp *str = buf;
  775. rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
  776. rgl->rl_flags = str->rg_flags;
  777. rgl->rl_free = str->rg_free;
  778. rgl->rl_dinodes = str->rg_dinodes;
  779. rgl->rl_igeneration = str->rg_igeneration;
  780. rgl->__pad = 0UL;
  781. }
  782. static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
  783. {
  784. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  785. u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
  786. rgl->rl_unlinked = cpu_to_be32(unlinked);
  787. }
  788. static u32 count_unlinked(struct gfs2_rgrpd *rgd)
  789. {
  790. struct gfs2_bitmap *bi;
  791. const u32 length = rgd->rd_length;
  792. const u8 *buffer = NULL;
  793. u32 i, goal, count = 0;
  794. for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
  795. goal = 0;
  796. buffer = bi->bi_bh->b_data + bi->bi_offset;
  797. WARN_ON(!buffer_uptodate(bi->bi_bh));
  798. while (goal < bi->bi_len * GFS2_NBBY) {
  799. goal = gfs2_bitfit(buffer, bi->bi_len, goal,
  800. GFS2_BLKST_UNLINKED);
  801. if (goal == BFITNOENT)
  802. break;
  803. count++;
  804. goal++;
  805. }
  806. }
  807. return count;
  808. }
  809. /**
  810. * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
  811. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  812. *
  813. * Read in all of a Resource Group's header and bitmap blocks.
  814. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
  815. *
  816. * Returns: errno
  817. */
  818. int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
  819. {
  820. struct gfs2_sbd *sdp = rgd->rd_sbd;
  821. struct gfs2_glock *gl = rgd->rd_gl;
  822. unsigned int length = rgd->rd_length;
  823. struct gfs2_bitmap *bi;
  824. unsigned int x, y;
  825. int error;
  826. if (rgd->rd_bits[0].bi_bh != NULL)
  827. return 0;
  828. for (x = 0; x < length; x++) {
  829. bi = rgd->rd_bits + x;
  830. error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
  831. if (error)
  832. goto fail;
  833. }
  834. for (y = length; y--;) {
  835. bi = rgd->rd_bits + y;
  836. error = gfs2_meta_wait(sdp, bi->bi_bh);
  837. if (error)
  838. goto fail;
  839. if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
  840. GFS2_METATYPE_RG)) {
  841. error = -EIO;
  842. goto fail;
  843. }
  844. }
  845. if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
  846. for (x = 0; x < length; x++)
  847. clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
  848. gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
  849. rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  850. rgd->rd_free_clone = rgd->rd_free;
  851. }
  852. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
  853. rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
  854. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
  855. rgd->rd_bits[0].bi_bh->b_data);
  856. }
  857. else if (sdp->sd_args.ar_rgrplvb) {
  858. if (!gfs2_rgrp_lvb_valid(rgd)){
  859. gfs2_consist_rgrpd(rgd);
  860. error = -EIO;
  861. goto fail;
  862. }
  863. if (rgd->rd_rgl->rl_unlinked == 0)
  864. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  865. }
  866. return 0;
  867. fail:
  868. while (x--) {
  869. bi = rgd->rd_bits + x;
  870. brelse(bi->bi_bh);
  871. bi->bi_bh = NULL;
  872. gfs2_assert_warn(sdp, !bi->bi_clone);
  873. }
  874. return error;
  875. }
  876. int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
  877. {
  878. u32 rl_flags;
  879. if (rgd->rd_flags & GFS2_RDF_UPTODATE)
  880. return 0;
  881. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
  882. return gfs2_rgrp_bh_get(rgd);
  883. rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
  884. rl_flags &= ~GFS2_RDF_MASK;
  885. rgd->rd_flags &= GFS2_RDF_MASK;
  886. rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  887. if (rgd->rd_rgl->rl_unlinked == 0)
  888. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  889. rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
  890. rgd->rd_free_clone = rgd->rd_free;
  891. rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
  892. rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
  893. return 0;
  894. }
  895. int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
  896. {
  897. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  898. struct gfs2_sbd *sdp = rgd->rd_sbd;
  899. if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
  900. return 0;
  901. return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
  902. }
  903. /**
  904. * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
  905. * @gh: The glock holder for the resource group
  906. *
  907. */
  908. void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
  909. {
  910. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  911. int x, length = rgd->rd_length;
  912. for (x = 0; x < length; x++) {
  913. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  914. if (bi->bi_bh) {
  915. brelse(bi->bi_bh);
  916. bi->bi_bh = NULL;
  917. }
  918. }
  919. }
  920. int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
  921. struct buffer_head *bh,
  922. const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
  923. {
  924. struct super_block *sb = sdp->sd_vfs;
  925. struct block_device *bdev = sb->s_bdev;
  926. const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
  927. bdev_logical_block_size(sb->s_bdev);
  928. u64 blk;
  929. sector_t start = 0;
  930. sector_t nr_sects = 0;
  931. int rv;
  932. unsigned int x;
  933. u32 trimmed = 0;
  934. u8 diff;
  935. for (x = 0; x < bi->bi_len; x++) {
  936. const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
  937. clone += bi->bi_offset;
  938. clone += x;
  939. if (bh) {
  940. const u8 *orig = bh->b_data + bi->bi_offset + x;
  941. diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
  942. } else {
  943. diff = ~(*clone | (*clone >> 1));
  944. }
  945. diff &= 0x55;
  946. if (diff == 0)
  947. continue;
  948. blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
  949. blk *= sects_per_blk; /* convert to sectors */
  950. while(diff) {
  951. if (diff & 1) {
  952. if (nr_sects == 0)
  953. goto start_new_extent;
  954. if ((start + nr_sects) != blk) {
  955. if (nr_sects >= minlen) {
  956. rv = blkdev_issue_discard(bdev,
  957. start, nr_sects,
  958. GFP_NOFS, 0);
  959. if (rv)
  960. goto fail;
  961. trimmed += nr_sects;
  962. }
  963. nr_sects = 0;
  964. start_new_extent:
  965. start = blk;
  966. }
  967. nr_sects += sects_per_blk;
  968. }
  969. diff >>= 2;
  970. blk += sects_per_blk;
  971. }
  972. }
  973. if (nr_sects >= minlen) {
  974. rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
  975. if (rv)
  976. goto fail;
  977. trimmed += nr_sects;
  978. }
  979. if (ptrimmed)
  980. *ptrimmed = trimmed;
  981. return 0;
  982. fail:
  983. if (sdp->sd_args.ar_discard)
  984. fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
  985. sdp->sd_args.ar_discard = 0;
  986. return -EIO;
  987. }
  988. /**
  989. * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
  990. * @filp: Any file on the filesystem
  991. * @argp: Pointer to the arguments (also used to pass result)
  992. *
  993. * Returns: 0 on success, otherwise error code
  994. */
  995. int gfs2_fitrim(struct file *filp, void __user *argp)
  996. {
  997. struct inode *inode = filp->f_dentry->d_inode;
  998. struct gfs2_sbd *sdp = GFS2_SB(inode);
  999. struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
  1000. struct buffer_head *bh;
  1001. struct gfs2_rgrpd *rgd;
  1002. struct gfs2_rgrpd *rgd_end;
  1003. struct gfs2_holder gh;
  1004. struct fstrim_range r;
  1005. int ret = 0;
  1006. u64 amt;
  1007. u64 trimmed = 0;
  1008. unsigned int x;
  1009. if (!capable(CAP_SYS_ADMIN))
  1010. return -EPERM;
  1011. if (!blk_queue_discard(q))
  1012. return -EOPNOTSUPP;
  1013. if (argp == NULL) {
  1014. r.start = 0;
  1015. r.len = ULLONG_MAX;
  1016. r.minlen = 0;
  1017. } else if (copy_from_user(&r, argp, sizeof(r)))
  1018. return -EFAULT;
  1019. ret = gfs2_rindex_update(sdp);
  1020. if (ret)
  1021. return ret;
  1022. rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
  1023. rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
  1024. while (1) {
  1025. ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1026. if (ret)
  1027. goto out;
  1028. if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
  1029. /* Trim each bitmap in the rgrp */
  1030. for (x = 0; x < rgd->rd_length; x++) {
  1031. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  1032. ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
  1033. if (ret) {
  1034. gfs2_glock_dq_uninit(&gh);
  1035. goto out;
  1036. }
  1037. trimmed += amt;
  1038. }
  1039. /* Mark rgrp as having been trimmed */
  1040. ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
  1041. if (ret == 0) {
  1042. bh = rgd->rd_bits[0].bi_bh;
  1043. rgd->rd_flags |= GFS2_RGF_TRIMMED;
  1044. gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
  1045. gfs2_rgrp_out(rgd, bh->b_data);
  1046. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
  1047. gfs2_trans_end(sdp);
  1048. }
  1049. }
  1050. gfs2_glock_dq_uninit(&gh);
  1051. if (rgd == rgd_end)
  1052. break;
  1053. rgd = gfs2_rgrpd_get_next(rgd);
  1054. }
  1055. out:
  1056. r.len = trimmed << 9;
  1057. if (argp && copy_to_user(argp, &r, sizeof(r)))
  1058. return -EFAULT;
  1059. return ret;
  1060. }
  1061. /**
  1062. * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
  1063. * @bi: the bitmap with the blocks
  1064. * @ip: the inode structure
  1065. * @biblk: the 32-bit block number relative to the start of the bitmap
  1066. * @amount: the number of blocks to reserve
  1067. *
  1068. * Returns: NULL - reservation was already taken, so not inserted
  1069. * pointer to the inserted reservation
  1070. */
  1071. static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
  1072. struct gfs2_inode *ip, u32 biblk,
  1073. int amount)
  1074. {
  1075. struct rb_node **newn, *parent = NULL;
  1076. int rc;
  1077. struct gfs2_blkreserv *rs = ip->i_res;
  1078. struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
  1079. u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
  1080. spin_lock(&rgd->rd_rsspin);
  1081. newn = &rgd->rd_rstree.rb_node;
  1082. BUG_ON(!ip->i_res);
  1083. BUG_ON(gfs2_rs_active(rs));
  1084. /* Figure out where to put new node */
  1085. /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
  1086. while (*newn) {
  1087. struct gfs2_blkreserv *cur =
  1088. rb_entry(*newn, struct gfs2_blkreserv, rs_node);
  1089. parent = *newn;
  1090. rc = rs_cmp(fsblock, amount, cur);
  1091. if (rc > 0)
  1092. newn = &((*newn)->rb_right);
  1093. else if (rc < 0)
  1094. newn = &((*newn)->rb_left);
  1095. else {
  1096. spin_unlock(&rgd->rd_rsspin);
  1097. return NULL; /* reservation already in use */
  1098. }
  1099. }
  1100. /* Do our reservation work */
  1101. rs = ip->i_res;
  1102. rs->rs_free = amount;
  1103. rs->rs_rbm.offset = biblk;
  1104. rs->rs_rbm.bi = bi;
  1105. rb_link_node(&rs->rs_node, parent, newn);
  1106. rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
  1107. /* Do our rgrp accounting for the reservation */
  1108. rgd->rd_reserved += amount; /* blocks reserved */
  1109. rgd->rd_rs_cnt++; /* number of in-tree reservations */
  1110. spin_unlock(&rgd->rd_rsspin);
  1111. trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
  1112. return rs;
  1113. }
  1114. /**
  1115. * unclaimed_blocks - return number of blocks that aren't spoken for
  1116. */
  1117. static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
  1118. {
  1119. return rgd->rd_free_clone - rgd->rd_reserved;
  1120. }
  1121. /**
  1122. * rg_mblk_search - find a group of multiple free blocks
  1123. * @rgd: the resource group descriptor
  1124. * @rs: the block reservation
  1125. * @ip: pointer to the inode for which we're reserving blocks
  1126. *
  1127. * This is very similar to rgblk_search, except we're looking for whole
  1128. * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
  1129. * on aligned dwords for speed's sake.
  1130. *
  1131. * Returns: 0 if successful or BFITNOENT if there isn't enough free space
  1132. */
  1133. static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested)
  1134. {
  1135. struct gfs2_bitmap *bi = rgd->rd_bits;
  1136. const u32 length = rgd->rd_length;
  1137. u32 blk;
  1138. unsigned int buf, x, search_bytes;
  1139. u8 *buffer = NULL;
  1140. u8 *ptr, *end, *nonzero;
  1141. u32 goal, rsv_bytes;
  1142. struct gfs2_blkreserv *rs;
  1143. u32 best_rs_bytes, unclaimed;
  1144. int best_rs_blocks;
  1145. /* Find bitmap block that contains bits for goal block */
  1146. if (rgrp_contains_block(rgd, ip->i_goal))
  1147. goal = ip->i_goal - rgd->rd_data0;
  1148. else
  1149. goal = rgd->rd_last_alloc;
  1150. for (buf = 0; buf < length; buf++) {
  1151. bi = rgd->rd_bits + buf;
  1152. /* Convert scope of "goal" from rgrp-wide to within
  1153. found bit block */
  1154. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
  1155. goal -= bi->bi_start * GFS2_NBBY;
  1156. goto do_search;
  1157. }
  1158. }
  1159. buf = 0;
  1160. goal = 0;
  1161. do_search:
  1162. best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
  1163. (RGRP_RSRV_MINBLKS * rgd->rd_length));
  1164. best_rs_bytes = (best_rs_blocks *
  1165. (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
  1166. GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
  1167. best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
  1168. unclaimed = unclaimed_blocks(rgd);
  1169. if (best_rs_bytes * GFS2_NBBY > unclaimed)
  1170. best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
  1171. for (x = 0; x <= length; x++) {
  1172. bi = rgd->rd_bits + buf;
  1173. if (test_bit(GBF_FULL, &bi->bi_flags))
  1174. goto skip;
  1175. WARN_ON(!buffer_uptodate(bi->bi_bh));
  1176. if (bi->bi_clone)
  1177. buffer = bi->bi_clone + bi->bi_offset;
  1178. else
  1179. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1180. /* We have to keep the reservations aligned on u64 boundaries
  1181. otherwise we could get situations where a byte can't be
  1182. used because it's after a reservation, but a free bit still
  1183. is within the reservation's area. */
  1184. ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
  1185. end = (buffer + bi->bi_len);
  1186. while (ptr < end) {
  1187. rsv_bytes = 0;
  1188. if ((ptr + best_rs_bytes) <= end)
  1189. search_bytes = best_rs_bytes;
  1190. else
  1191. search_bytes = end - ptr;
  1192. BUG_ON(!search_bytes);
  1193. nonzero = memchr_inv(ptr, 0, search_bytes);
  1194. /* If the lot is all zeroes, reserve the whole size. If
  1195. there's enough zeroes to satisfy the request, use
  1196. what we can. If there's not enough, keep looking. */
  1197. if (nonzero == NULL)
  1198. rsv_bytes = search_bytes;
  1199. else if ((nonzero - ptr) * GFS2_NBBY >= requested)
  1200. rsv_bytes = (nonzero - ptr);
  1201. if (rsv_bytes) {
  1202. blk = ((ptr - buffer) * GFS2_NBBY);
  1203. BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
  1204. rs = rs_insert(bi, ip, blk,
  1205. rsv_bytes * GFS2_NBBY);
  1206. if (IS_ERR(rs))
  1207. return PTR_ERR(rs);
  1208. if (rs)
  1209. return 0;
  1210. }
  1211. ptr += ALIGN(search_bytes, sizeof(u64));
  1212. }
  1213. skip:
  1214. /* Try next bitmap block (wrap back to rgrp header
  1215. if at end) */
  1216. buf++;
  1217. buf %= length;
  1218. goal = 0;
  1219. }
  1220. return BFITNOENT;
  1221. }
  1222. /**
  1223. * try_rgrp_fit - See if a given reservation will fit in a given RG
  1224. * @rgd: the RG data
  1225. * @ip: the inode
  1226. *
  1227. * If there's room for the requested blocks to be allocated from the RG:
  1228. * This will try to get a multi-block reservation first, and if that doesn't
  1229. * fit, it will take what it can.
  1230. *
  1231. * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
  1232. */
  1233. static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
  1234. unsigned requested)
  1235. {
  1236. if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
  1237. return 0;
  1238. /* Look for a multi-block reservation. */
  1239. if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
  1240. rg_mblk_search(rgd, ip, requested) != BFITNOENT)
  1241. return 1;
  1242. if (unclaimed_blocks(rgd) >= requested)
  1243. return 1;
  1244. return 0;
  1245. }
  1246. /**
  1247. * gfs2_next_unreserved_block - Return next block that is not reserved
  1248. * @rgd: The resource group
  1249. * @block: The starting block
  1250. * @ip: Ignore any reservations for this inode
  1251. *
  1252. * If the block does not appear in any reservation, then return the
  1253. * block number unchanged. If it does appear in the reservation, then
  1254. * keep looking through the tree of reservations in order to find the
  1255. * first block number which is not reserved.
  1256. */
  1257. static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
  1258. const struct gfs2_inode *ip)
  1259. {
  1260. struct gfs2_blkreserv *rs;
  1261. struct rb_node *n;
  1262. int rc;
  1263. spin_lock(&rgd->rd_rsspin);
  1264. n = rb_first(&rgd->rd_rstree);
  1265. while (n) {
  1266. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1267. rc = rs_cmp(block, 1, rs);
  1268. if (rc < 0)
  1269. n = n->rb_left;
  1270. else if (rc > 0)
  1271. n = n->rb_right;
  1272. else
  1273. break;
  1274. }
  1275. if (n) {
  1276. while ((rs_cmp(block, 1, rs) == 0) && (ip->i_res != rs)) {
  1277. block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
  1278. n = rb_next(&rs->rs_node);
  1279. if (n == NULL)
  1280. break;
  1281. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1282. }
  1283. }
  1284. spin_unlock(&rgd->rd_rsspin);
  1285. return block;
  1286. }
  1287. /**
  1288. * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
  1289. * @rbm: The rbm with rgd already set correctly
  1290. * @block: The block number (filesystem relative)
  1291. *
  1292. * This sets the bi and offset members of an rbm based on a
  1293. * resource group and a filesystem relative block number. The
  1294. * resource group must be set in the rbm on entry, the bi and
  1295. * offset members will be set by this function.
  1296. *
  1297. * Returns: 0 on success, or an error code
  1298. */
  1299. static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
  1300. {
  1301. u64 rblock = block - rbm->rgd->rd_data0;
  1302. u32 goal = (u32)rblock;
  1303. int x;
  1304. if (WARN_ON_ONCE(rblock > UINT_MAX))
  1305. return -EINVAL;
  1306. if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
  1307. return -E2BIG;
  1308. for (x = 0; x < rbm->rgd->rd_length; x++) {
  1309. rbm->bi = rbm->rgd->rd_bits + x;
  1310. if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
  1311. rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
  1312. break;
  1313. }
  1314. }
  1315. return 0;
  1316. }
  1317. /**
  1318. * gfs2_reservation_check_and_update - Check for reservations during block alloc
  1319. * @rbm: The current position in the resource group
  1320. *
  1321. * This checks the current position in the rgrp to see whether there is
  1322. * a reservation covering this block. If not then this function is a
  1323. * no-op. If there is, then the position is moved to the end of the
  1324. * contiguous reservation(s) so that we are pointing at the first
  1325. * non-reserved block.
  1326. *
  1327. * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
  1328. */
  1329. static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
  1330. const struct gfs2_inode *ip)
  1331. {
  1332. u64 block = gfs2_rbm_to_block(rbm);
  1333. u64 nblock;
  1334. int ret;
  1335. nblock = gfs2_next_unreserved_block(rbm->rgd, block, ip);
  1336. if (nblock == block)
  1337. return 0;
  1338. ret = gfs2_rbm_from_block(rbm, nblock);
  1339. if (ret < 0)
  1340. return ret;
  1341. return 1;
  1342. }
  1343. /**
  1344. * gfs2_rbm_find - Look for blocks of a particular state
  1345. * @rbm: Value/result starting position and final position
  1346. * @state: The state which we want to find
  1347. * @ip: If set, check for reservations
  1348. * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
  1349. * around until we've reached the starting point.
  1350. *
  1351. * Side effects:
  1352. * - If looking for free blocks, we set GBF_FULL on each bitmap which
  1353. * has no free blocks in it.
  1354. *
  1355. * Returns: 0 on success, -ENOSPC if there is no block of the requested state
  1356. */
  1357. static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state,
  1358. const struct gfs2_inode *ip, bool nowrap)
  1359. {
  1360. struct buffer_head *bh;
  1361. struct gfs2_bitmap *initial_bi;
  1362. u32 initial_offset;
  1363. u32 offset;
  1364. u8 *buffer;
  1365. int index;
  1366. int n = 0;
  1367. int iters = rbm->rgd->rd_length;
  1368. int ret;
  1369. /* If we are not starting at the beginning of a bitmap, then we
  1370. * need to add one to the bitmap count to ensure that we search
  1371. * the starting bitmap twice.
  1372. */
  1373. if (rbm->offset != 0)
  1374. iters++;
  1375. while(1) {
  1376. if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
  1377. (state == GFS2_BLKST_FREE))
  1378. goto next_bitmap;
  1379. bh = rbm->bi->bi_bh;
  1380. buffer = bh->b_data + rbm->bi->bi_offset;
  1381. WARN_ON(!buffer_uptodate(bh));
  1382. if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
  1383. buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
  1384. initial_offset = rbm->offset;
  1385. offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
  1386. if (offset == BFITNOENT)
  1387. goto bitmap_full;
  1388. rbm->offset = offset;
  1389. if (ip == NULL)
  1390. return 0;
  1391. initial_bi = rbm->bi;
  1392. ret = gfs2_reservation_check_and_update(rbm, ip);
  1393. if (ret == 0)
  1394. return 0;
  1395. if (ret > 0) {
  1396. n += (rbm->bi - initial_bi);
  1397. goto next_iter;
  1398. }
  1399. if (ret == -E2BIG) {
  1400. index = 0;
  1401. rbm->offset = 0;
  1402. n += (rbm->bi - initial_bi);
  1403. goto res_covered_end_of_rgrp;
  1404. }
  1405. return ret;
  1406. bitmap_full: /* Mark bitmap as full and fall through */
  1407. if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
  1408. set_bit(GBF_FULL, &rbm->bi->bi_flags);
  1409. next_bitmap: /* Find next bitmap in the rgrp */
  1410. rbm->offset = 0;
  1411. index = rbm->bi - rbm->rgd->rd_bits;
  1412. index++;
  1413. if (index == rbm->rgd->rd_length)
  1414. index = 0;
  1415. res_covered_end_of_rgrp:
  1416. rbm->bi = &rbm->rgd->rd_bits[index];
  1417. if ((index == 0) && nowrap)
  1418. break;
  1419. n++;
  1420. next_iter:
  1421. if (n >= iters)
  1422. break;
  1423. }
  1424. return -ENOSPC;
  1425. }
  1426. /**
  1427. * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
  1428. * @rgd: The rgrp
  1429. * @last_unlinked: block address of the last dinode we unlinked
  1430. * @skip: block address we should explicitly not unlink
  1431. *
  1432. * Returns: 0 if no error
  1433. * The inode, if one has been found, in inode.
  1434. */
  1435. static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
  1436. {
  1437. u64 block;
  1438. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1439. struct gfs2_glock *gl;
  1440. struct gfs2_inode *ip;
  1441. int error;
  1442. int found = 0;
  1443. struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
  1444. while (1) {
  1445. down_write(&sdp->sd_log_flush_lock);
  1446. error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, true);
  1447. up_write(&sdp->sd_log_flush_lock);
  1448. if (error == -ENOSPC)
  1449. break;
  1450. if (WARN_ON_ONCE(error))
  1451. break;
  1452. block = gfs2_rbm_to_block(&rbm);
  1453. if (gfs2_rbm_from_block(&rbm, block + 1))
  1454. break;
  1455. if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
  1456. continue;
  1457. if (block == skip)
  1458. continue;
  1459. *last_unlinked = block;
  1460. error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
  1461. if (error)
  1462. continue;
  1463. /* If the inode is already in cache, we can ignore it here
  1464. * because the existing inode disposal code will deal with
  1465. * it when all refs have gone away. Accessing gl_object like
  1466. * this is not safe in general. Here it is ok because we do
  1467. * not dereference the pointer, and we only need an approx
  1468. * answer to whether it is NULL or not.
  1469. */
  1470. ip = gl->gl_object;
  1471. if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  1472. gfs2_glock_put(gl);
  1473. else
  1474. found++;
  1475. /* Limit reclaim to sensible number of tasks */
  1476. if (found > NR_CPUS)
  1477. return;
  1478. }
  1479. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  1480. return;
  1481. }
  1482. /**
  1483. * gfs2_inplace_reserve - Reserve space in the filesystem
  1484. * @ip: the inode to reserve space for
  1485. * @requested: the number of blocks to be reserved
  1486. *
  1487. * Returns: errno
  1488. */
  1489. int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
  1490. {
  1491. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1492. struct gfs2_rgrpd *begin = NULL;
  1493. struct gfs2_blkreserv *rs = ip->i_res;
  1494. int error = 0, rg_locked, flags = LM_FLAG_TRY;
  1495. u64 last_unlinked = NO_BLOCK;
  1496. int loops = 0;
  1497. if (sdp->sd_args.ar_rgrplvb)
  1498. flags |= GL_SKIP;
  1499. if (gfs2_assert_warn(sdp, requested)) {
  1500. error = -EINVAL;
  1501. goto out;
  1502. }
  1503. if (gfs2_rs_active(rs)) {
  1504. begin = rs->rs_rbm.rgd;
  1505. flags = 0; /* Yoda: Do or do not. There is no try */
  1506. } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
  1507. rs->rs_rbm.rgd = begin = ip->i_rgd;
  1508. } else {
  1509. rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
  1510. }
  1511. if (rs->rs_rbm.rgd == NULL)
  1512. return -EBADSLT;
  1513. while (loops < 3) {
  1514. rg_locked = 0;
  1515. if (gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
  1516. rg_locked = 1;
  1517. error = 0;
  1518. } else if (!loops && !gfs2_rs_active(rs) &&
  1519. rs->rs_rbm.rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
  1520. /* If the rgrp already is maxed out for contenders,
  1521. we can eliminate it as a "first pass" without even
  1522. requesting the rgrp glock. */
  1523. error = GLR_TRYFAILED;
  1524. } else {
  1525. error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
  1526. LM_ST_EXCLUSIVE, flags,
  1527. &rs->rs_rgd_gh);
  1528. if (!error && sdp->sd_args.ar_rgrplvb) {
  1529. error = update_rgrp_lvb(rs->rs_rbm.rgd);
  1530. if (error) {
  1531. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1532. return error;
  1533. }
  1534. }
  1535. }
  1536. switch (error) {
  1537. case 0:
  1538. if (gfs2_rs_active(rs)) {
  1539. if (unclaimed_blocks(rs->rs_rbm.rgd) +
  1540. rs->rs_free >= requested) {
  1541. ip->i_rgd = rs->rs_rbm.rgd;
  1542. return 0;
  1543. }
  1544. /* We have a multi-block reservation, but the
  1545. rgrp doesn't have enough free blocks to
  1546. satisfy the request. Free the reservation
  1547. and look for a suitable rgrp. */
  1548. gfs2_rs_deltree(ip, rs);
  1549. }
  1550. if (try_rgrp_fit(rs->rs_rbm.rgd, ip, requested)) {
  1551. if (sdp->sd_args.ar_rgrplvb)
  1552. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1553. ip->i_rgd = rs->rs_rbm.rgd;
  1554. return 0;
  1555. }
  1556. if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) {
  1557. if (sdp->sd_args.ar_rgrplvb)
  1558. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1559. try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
  1560. ip->i_no_addr);
  1561. }
  1562. if (!rg_locked)
  1563. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1564. /* fall through */
  1565. case GLR_TRYFAILED:
  1566. rs->rs_rbm.rgd = gfs2_rgrpd_get_next(rs->rs_rbm.rgd);
  1567. rs->rs_rbm.rgd = rs->rs_rbm.rgd ? : begin; /* if NULL, wrap */
  1568. if (rs->rs_rbm.rgd != begin) /* If we didn't wrap */
  1569. break;
  1570. flags &= ~LM_FLAG_TRY;
  1571. loops++;
  1572. /* Check that fs hasn't grown if writing to rindex */
  1573. if (ip == GFS2_I(sdp->sd_rindex) &&
  1574. !sdp->sd_rindex_uptodate) {
  1575. error = gfs2_ri_update(ip);
  1576. if (error)
  1577. goto out;
  1578. } else if (loops == 2)
  1579. /* Flushing the log may release space */
  1580. gfs2_log_flush(sdp, NULL);
  1581. break;
  1582. default:
  1583. goto out;
  1584. }
  1585. }
  1586. error = -ENOSPC;
  1587. out:
  1588. return error;
  1589. }
  1590. /**
  1591. * gfs2_inplace_release - release an inplace reservation
  1592. * @ip: the inode the reservation was taken out on
  1593. *
  1594. * Release a reservation made by gfs2_inplace_reserve().
  1595. */
  1596. void gfs2_inplace_release(struct gfs2_inode *ip)
  1597. {
  1598. struct gfs2_blkreserv *rs = ip->i_res;
  1599. if (rs->rs_rgd_gh.gh_gl)
  1600. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1601. }
  1602. /**
  1603. * gfs2_get_block_type - Check a block in a RG is of given type
  1604. * @rgd: the resource group holding the block
  1605. * @block: the block number
  1606. *
  1607. * Returns: The block type (GFS2_BLKST_*)
  1608. */
  1609. static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
  1610. {
  1611. struct gfs2_rbm rbm = { .rgd = rgd, };
  1612. int ret;
  1613. ret = gfs2_rbm_from_block(&rbm, block);
  1614. WARN_ON_ONCE(ret != 0);
  1615. return gfs2_testbit(rgd, rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
  1616. rbm.bi->bi_len, rbm.offset);
  1617. }
  1618. /**
  1619. * gfs2_alloc_extent - allocate an extent from a given bitmap
  1620. * @rbm: the resource group information
  1621. * @dinode: TRUE if the first block we allocate is for a dinode
  1622. * @n: The extent length
  1623. *
  1624. * Add the found bitmap buffer to the transaction.
  1625. * Set the found bits to @new_state to change block's allocation state.
  1626. * Returns: starting block number of the extent (fs scope)
  1627. */
  1628. static u64 gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
  1629. unsigned int *n)
  1630. {
  1631. struct gfs2_rgrpd *rgd = rbm->rgd;
  1632. struct gfs2_bitmap *bi = rbm->bi;
  1633. u32 blk = rbm->offset;
  1634. const unsigned int elen = *n;
  1635. u32 goal;
  1636. const u8 *buffer = NULL;
  1637. *n = 0;
  1638. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1639. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  1640. gfs2_setbit(rgd, bi->bi_clone, bi, blk,
  1641. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1642. (*n)++;
  1643. goal = blk;
  1644. while (*n < elen) {
  1645. goal++;
  1646. if (goal >= (bi->bi_len * GFS2_NBBY))
  1647. break;
  1648. if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
  1649. GFS2_BLKST_FREE)
  1650. break;
  1651. gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
  1652. (*n)++;
  1653. }
  1654. blk = gfs2_bi2rgd_blk(bi, blk);
  1655. rgd->rd_last_alloc = blk + *n - 1;
  1656. return rgd->rd_data0 + blk;
  1657. }
  1658. /**
  1659. * rgblk_free - Change alloc state of given block(s)
  1660. * @sdp: the filesystem
  1661. * @bstart: the start of a run of blocks to free
  1662. * @blen: the length of the block run (all must lie within ONE RG!)
  1663. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  1664. *
  1665. * Returns: Resource group containing the block(s)
  1666. */
  1667. static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
  1668. u32 blen, unsigned char new_state)
  1669. {
  1670. struct gfs2_rbm rbm;
  1671. rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
  1672. if (!rbm.rgd) {
  1673. if (gfs2_consist(sdp))
  1674. fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
  1675. return NULL;
  1676. }
  1677. while (blen--) {
  1678. gfs2_rbm_from_block(&rbm, bstart);
  1679. bstart++;
  1680. if (!rbm.bi->bi_clone) {
  1681. rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
  1682. GFP_NOFS | __GFP_NOFAIL);
  1683. memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
  1684. rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
  1685. rbm.bi->bi_len);
  1686. }
  1687. gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
  1688. gfs2_setbit(rbm.rgd, NULL, rbm.bi, rbm.offset, new_state);
  1689. }
  1690. return rbm.rgd;
  1691. }
  1692. /**
  1693. * gfs2_rgrp_dump - print out an rgrp
  1694. * @seq: The iterator
  1695. * @gl: The glock in question
  1696. *
  1697. */
  1698. int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
  1699. {
  1700. struct gfs2_rgrpd *rgd = gl->gl_object;
  1701. struct gfs2_blkreserv *trs;
  1702. const struct rb_node *n;
  1703. if (rgd == NULL)
  1704. return 0;
  1705. gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
  1706. (unsigned long long)rgd->rd_addr, rgd->rd_flags,
  1707. rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
  1708. rgd->rd_reserved);
  1709. spin_lock(&rgd->rd_rsspin);
  1710. for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
  1711. trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1712. dump_rs(seq, trs);
  1713. }
  1714. spin_unlock(&rgd->rd_rsspin);
  1715. return 0;
  1716. }
  1717. static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
  1718. {
  1719. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1720. fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
  1721. (unsigned long long)rgd->rd_addr);
  1722. fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
  1723. gfs2_rgrp_dump(NULL, rgd->rd_gl);
  1724. rgd->rd_flags |= GFS2_RDF_ERROR;
  1725. }
  1726. /**
  1727. * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
  1728. * @ip: The inode we have just allocated blocks for
  1729. * @rbm: The start of the allocated blocks
  1730. * @len: The extent length
  1731. *
  1732. * Adjusts a reservation after an allocation has taken place. If the
  1733. * reservation does not match the allocation, or if it is now empty
  1734. * then it is removed.
  1735. */
  1736. static void gfs2_adjust_reservation(struct gfs2_inode *ip,
  1737. const struct gfs2_rbm *rbm, unsigned len)
  1738. {
  1739. struct gfs2_blkreserv *rs = ip->i_res;
  1740. struct gfs2_rgrpd *rgd = rbm->rgd;
  1741. unsigned rlen;
  1742. u64 block;
  1743. int ret;
  1744. spin_lock(&rgd->rd_rsspin);
  1745. if (gfs2_rs_active(rs)) {
  1746. if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
  1747. block = gfs2_rbm_to_block(rbm);
  1748. ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
  1749. rlen = min(rs->rs_free, len);
  1750. rs->rs_free -= rlen;
  1751. rgd->rd_reserved -= rlen;
  1752. trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
  1753. if (rs->rs_free && !ret)
  1754. goto out;
  1755. }
  1756. __rs_deltree(ip, rs);
  1757. }
  1758. out:
  1759. spin_unlock(&rgd->rd_rsspin);
  1760. }
  1761. /**
  1762. * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  1763. * @ip: the inode to allocate the block for
  1764. * @bn: Used to return the starting block number
  1765. * @nblocks: requested number of blocks/extent length (value/result)
  1766. * @dinode: 1 if we're allocating a dinode block, else 0
  1767. * @generation: the generation number of the inode
  1768. *
  1769. * Returns: 0 or error
  1770. */
  1771. int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
  1772. bool dinode, u64 *generation)
  1773. {
  1774. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1775. struct buffer_head *dibh;
  1776. struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
  1777. unsigned int ndata;
  1778. u64 goal;
  1779. u64 block; /* block, within the file system scope */
  1780. int error;
  1781. if (gfs2_rs_active(ip->i_res))
  1782. goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
  1783. else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
  1784. goal = ip->i_goal;
  1785. else
  1786. goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
  1787. if ((goal < rbm.rgd->rd_data0) ||
  1788. (goal >= rbm.rgd->rd_data0 + rbm.rgd->rd_data))
  1789. rbm.rgd = gfs2_blk2rgrpd(sdp, goal, 1);
  1790. gfs2_rbm_from_block(&rbm, goal);
  1791. error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false);
  1792. /* Since all blocks are reserved in advance, this shouldn't happen */
  1793. if (error) {
  1794. fs_warn(sdp, "error=%d, nblocks=%u, full=%d\n", error, *nblocks,
  1795. test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
  1796. goto rgrp_error;
  1797. }
  1798. block = gfs2_alloc_extent(&rbm, dinode, nblocks);
  1799. if (gfs2_rs_active(ip->i_res))
  1800. gfs2_adjust_reservation(ip, &rbm, *nblocks);
  1801. ndata = *nblocks;
  1802. if (dinode)
  1803. ndata--;
  1804. if (!dinode) {
  1805. ip->i_goal = block + ndata - 1;
  1806. error = gfs2_meta_inode_buffer(ip, &dibh);
  1807. if (error == 0) {
  1808. struct gfs2_dinode *di =
  1809. (struct gfs2_dinode *)dibh->b_data;
  1810. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1811. di->di_goal_meta = di->di_goal_data =
  1812. cpu_to_be64(ip->i_goal);
  1813. brelse(dibh);
  1814. }
  1815. }
  1816. if (rbm.rgd->rd_free < *nblocks) {
  1817. printk(KERN_WARNING "nblocks=%u\n", *nblocks);
  1818. goto rgrp_error;
  1819. }
  1820. rbm.rgd->rd_free -= *nblocks;
  1821. if (dinode) {
  1822. rbm.rgd->rd_dinodes++;
  1823. *generation = rbm.rgd->rd_igeneration++;
  1824. if (*generation == 0)
  1825. *generation = rbm.rgd->rd_igeneration++;
  1826. }
  1827. gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
  1828. gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1829. gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1830. gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
  1831. if (dinode)
  1832. gfs2_trans_add_unrevoke(sdp, block, 1);
  1833. /*
  1834. * This needs reviewing to see why we cannot do the quota change
  1835. * at this point in the dinode case.
  1836. */
  1837. if (ndata)
  1838. gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
  1839. ip->i_inode.i_gid);
  1840. rbm.rgd->rd_free_clone -= *nblocks;
  1841. trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
  1842. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1843. *bn = block;
  1844. return 0;
  1845. rgrp_error:
  1846. gfs2_rgrp_error(rbm.rgd);
  1847. return -EIO;
  1848. }
  1849. /**
  1850. * __gfs2_free_blocks - free a contiguous run of block(s)
  1851. * @ip: the inode these blocks are being freed from
  1852. * @bstart: first block of a run of contiguous blocks
  1853. * @blen: the length of the block run
  1854. * @meta: 1 if the blocks represent metadata
  1855. *
  1856. */
  1857. void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
  1858. {
  1859. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1860. struct gfs2_rgrpd *rgd;
  1861. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  1862. if (!rgd)
  1863. return;
  1864. trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
  1865. rgd->rd_free += blen;
  1866. rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
  1867. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1868. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1869. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1870. /* Directories keep their data in the metadata address space */
  1871. if (meta || ip->i_depth)
  1872. gfs2_meta_wipe(ip, bstart, blen);
  1873. }
  1874. /**
  1875. * gfs2_free_meta - free a contiguous run of data block(s)
  1876. * @ip: the inode these blocks are being freed from
  1877. * @bstart: first block of a run of contiguous blocks
  1878. * @blen: the length of the block run
  1879. *
  1880. */
  1881. void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
  1882. {
  1883. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1884. __gfs2_free_blocks(ip, bstart, blen, 1);
  1885. gfs2_statfs_change(sdp, 0, +blen, 0);
  1886. gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1887. }
  1888. void gfs2_unlink_di(struct inode *inode)
  1889. {
  1890. struct gfs2_inode *ip = GFS2_I(inode);
  1891. struct gfs2_sbd *sdp = GFS2_SB(inode);
  1892. struct gfs2_rgrpd *rgd;
  1893. u64 blkno = ip->i_no_addr;
  1894. rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
  1895. if (!rgd)
  1896. return;
  1897. trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
  1898. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1899. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1900. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1901. update_rgrp_lvb_unlinked(rgd, 1);
  1902. }
  1903. static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
  1904. {
  1905. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1906. struct gfs2_rgrpd *tmp_rgd;
  1907. tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
  1908. if (!tmp_rgd)
  1909. return;
  1910. gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
  1911. if (!rgd->rd_dinodes)
  1912. gfs2_consist_rgrpd(rgd);
  1913. rgd->rd_dinodes--;
  1914. rgd->rd_free++;
  1915. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1916. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1917. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1918. update_rgrp_lvb_unlinked(rgd, -1);
  1919. gfs2_statfs_change(sdp, 0, +1, -1);
  1920. }
  1921. void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
  1922. {
  1923. gfs2_free_uninit_di(rgd, ip->i_no_addr);
  1924. trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
  1925. gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1926. gfs2_meta_wipe(ip, ip->i_no_addr, 1);
  1927. }
  1928. /**
  1929. * gfs2_check_blk_type - Check the type of a block
  1930. * @sdp: The superblock
  1931. * @no_addr: The block number to check
  1932. * @type: The block type we are looking for
  1933. *
  1934. * Returns: 0 if the block type matches the expected type
  1935. * -ESTALE if it doesn't match
  1936. * or -ve errno if something went wrong while checking
  1937. */
  1938. int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
  1939. {
  1940. struct gfs2_rgrpd *rgd;
  1941. struct gfs2_holder rgd_gh;
  1942. int error = -EINVAL;
  1943. rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
  1944. if (!rgd)
  1945. goto fail;
  1946. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
  1947. if (error)
  1948. goto fail;
  1949. if (gfs2_get_block_type(rgd, no_addr) != type)
  1950. error = -ESTALE;
  1951. gfs2_glock_dq_uninit(&rgd_gh);
  1952. fail:
  1953. return error;
  1954. }
  1955. /**
  1956. * gfs2_rlist_add - add a RG to a list of RGs
  1957. * @ip: the inode
  1958. * @rlist: the list of resource groups
  1959. * @block: the block
  1960. *
  1961. * Figure out what RG a block belongs to and add that RG to the list
  1962. *
  1963. * FIXME: Don't use NOFAIL
  1964. *
  1965. */
  1966. void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
  1967. u64 block)
  1968. {
  1969. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1970. struct gfs2_rgrpd *rgd;
  1971. struct gfs2_rgrpd **tmp;
  1972. unsigned int new_space;
  1973. unsigned int x;
  1974. if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
  1975. return;
  1976. if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
  1977. rgd = ip->i_rgd;
  1978. else
  1979. rgd = gfs2_blk2rgrpd(sdp, block, 1);
  1980. if (!rgd) {
  1981. fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
  1982. return;
  1983. }
  1984. ip->i_rgd = rgd;
  1985. for (x = 0; x < rlist->rl_rgrps; x++)
  1986. if (rlist->rl_rgd[x] == rgd)
  1987. return;
  1988. if (rlist->rl_rgrps == rlist->rl_space) {
  1989. new_space = rlist->rl_space + 10;
  1990. tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
  1991. GFP_NOFS | __GFP_NOFAIL);
  1992. if (rlist->rl_rgd) {
  1993. memcpy(tmp, rlist->rl_rgd,
  1994. rlist->rl_space * sizeof(struct gfs2_rgrpd *));
  1995. kfree(rlist->rl_rgd);
  1996. }
  1997. rlist->rl_space = new_space;
  1998. rlist->rl_rgd = tmp;
  1999. }
  2000. rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
  2001. }
  2002. /**
  2003. * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
  2004. * and initialize an array of glock holders for them
  2005. * @rlist: the list of resource groups
  2006. * @state: the lock state to acquire the RG lock in
  2007. *
  2008. * FIXME: Don't use NOFAIL
  2009. *
  2010. */
  2011. void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
  2012. {
  2013. unsigned int x;
  2014. rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
  2015. GFP_NOFS | __GFP_NOFAIL);
  2016. for (x = 0; x < rlist->rl_rgrps; x++)
  2017. gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
  2018. state, 0,
  2019. &rlist->rl_ghs[x]);
  2020. }
  2021. /**
  2022. * gfs2_rlist_free - free a resource group list
  2023. * @list: the list of resource groups
  2024. *
  2025. */
  2026. void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
  2027. {
  2028. unsigned int x;
  2029. kfree(rlist->rl_rgd);
  2030. if (rlist->rl_ghs) {
  2031. for (x = 0; x < rlist->rl_rgrps; x++)
  2032. gfs2_holder_uninit(&rlist->rl_ghs[x]);
  2033. kfree(rlist->rl_ghs);
  2034. rlist->rl_ghs = NULL;
  2035. }
  2036. }