rgrp.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/fs.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/prefetch.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/rbtree.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "glock.h"
  21. #include "glops.h"
  22. #include "lops.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "rgrp.h"
  26. #include "super.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. #include "log.h"
  30. #include "inode.h"
  31. #include "trace_gfs2.h"
  32. #define BFITNOENT ((u32)~0)
  33. #define NO_BLOCK ((u64)~0)
  34. #define RSRV_CONTENTION_FACTOR 4
  35. #define RGRP_RSRV_MAX_CONTENDERS 2
  36. #if BITS_PER_LONG == 32
  37. #define LBITMASK (0x55555555UL)
  38. #define LBITSKIP55 (0x55555555UL)
  39. #define LBITSKIP00 (0x00000000UL)
  40. #else
  41. #define LBITMASK (0x5555555555555555UL)
  42. #define LBITSKIP55 (0x5555555555555555UL)
  43. #define LBITSKIP00 (0x0000000000000000UL)
  44. #endif
  45. /*
  46. * These routines are used by the resource group routines (rgrp.c)
  47. * to keep track of block allocation. Each block is represented by two
  48. * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
  49. *
  50. * 0 = Free
  51. * 1 = Used (not metadata)
  52. * 2 = Unlinked (still in use) inode
  53. * 3 = Used (metadata)
  54. */
  55. static const char valid_change[16] = {
  56. /* current */
  57. /* n */ 0, 1, 1, 1,
  58. /* e */ 1, 0, 0, 0,
  59. /* w */ 0, 0, 0, 1,
  60. 1, 0, 0, 0
  61. };
  62. static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
  63. unsigned char old_state,
  64. struct gfs2_bitmap **rbi);
  65. /**
  66. * gfs2_setbit - Set a bit in the bitmaps
  67. * @rgd: the resource group descriptor
  68. * @buf2: the clone buffer that holds the bitmaps
  69. * @bi: the bitmap structure
  70. * @block: the block to set
  71. * @new_state: the new state of the block
  72. *
  73. */
  74. static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
  75. struct gfs2_bitmap *bi, u32 block,
  76. unsigned char new_state)
  77. {
  78. unsigned char *byte1, *byte2, *end, cur_state;
  79. unsigned int buflen = bi->bi_len;
  80. const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  81. byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
  82. end = bi->bi_bh->b_data + bi->bi_offset + buflen;
  83. BUG_ON(byte1 >= end);
  84. cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
  85. if (unlikely(!valid_change[new_state * 4 + cur_state])) {
  86. printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
  87. "new_state=%d\n",
  88. (unsigned long long)block, cur_state, new_state);
  89. printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
  90. (unsigned long long)rgd->rd_addr,
  91. (unsigned long)bi->bi_start);
  92. printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
  93. (unsigned long)bi->bi_offset,
  94. (unsigned long)bi->bi_len);
  95. dump_stack();
  96. gfs2_consist_rgrpd(rgd);
  97. return;
  98. }
  99. *byte1 ^= (cur_state ^ new_state) << bit;
  100. if (buf2) {
  101. byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
  102. cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
  103. *byte2 ^= (cur_state ^ new_state) << bit;
  104. }
  105. }
  106. /**
  107. * gfs2_testbit - test a bit in the bitmaps
  108. * @rgd: the resource group descriptor
  109. * @buffer: the buffer that holds the bitmaps
  110. * @buflen: the length (in bytes) of the buffer
  111. * @block: the block to read
  112. *
  113. */
  114. static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
  115. const unsigned char *buffer,
  116. unsigned int buflen, u32 block)
  117. {
  118. const unsigned char *byte, *end;
  119. unsigned char cur_state;
  120. unsigned int bit;
  121. byte = buffer + (block / GFS2_NBBY);
  122. bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
  123. end = buffer + buflen;
  124. gfs2_assert(rgd->rd_sbd, byte < end);
  125. cur_state = (*byte >> bit) & GFS2_BIT_MASK;
  126. return cur_state;
  127. }
  128. /**
  129. * gfs2_bit_search
  130. * @ptr: Pointer to bitmap data
  131. * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
  132. * @state: The state we are searching for
  133. *
  134. * We xor the bitmap data with a patter which is the bitwise opposite
  135. * of what we are looking for, this gives rise to a pattern of ones
  136. * wherever there is a match. Since we have two bits per entry, we
  137. * take this pattern, shift it down by one place and then and it with
  138. * the original. All the even bit positions (0,2,4, etc) then represent
  139. * successful matches, so we mask with 0x55555..... to remove the unwanted
  140. * odd bit positions.
  141. *
  142. * This allows searching of a whole u64 at once (32 blocks) with a
  143. * single test (on 64 bit arches).
  144. */
  145. static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
  146. {
  147. u64 tmp;
  148. static const u64 search[] = {
  149. [0] = 0xffffffffffffffffULL,
  150. [1] = 0xaaaaaaaaaaaaaaaaULL,
  151. [2] = 0x5555555555555555ULL,
  152. [3] = 0x0000000000000000ULL,
  153. };
  154. tmp = le64_to_cpu(*ptr) ^ search[state];
  155. tmp &= (tmp >> 1);
  156. tmp &= mask;
  157. return tmp;
  158. }
  159. /**
  160. * rs_cmp - multi-block reservation range compare
  161. * @blk: absolute file system block number of the new reservation
  162. * @len: number of blocks in the new reservation
  163. * @rs: existing reservation to compare against
  164. *
  165. * returns: 1 if the block range is beyond the reach of the reservation
  166. * -1 if the block range is before the start of the reservation
  167. * 0 if the block range overlaps with the reservation
  168. */
  169. static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
  170. {
  171. u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
  172. if (blk >= startblk + rs->rs_free)
  173. return 1;
  174. if (blk + len - 1 < startblk)
  175. return -1;
  176. return 0;
  177. }
  178. /**
  179. * rs_find - Find a rgrp multi-block reservation that contains a given block
  180. * @rgd: The rgrp
  181. * @rgblk: The block we're looking for, relative to the rgrp
  182. */
  183. static struct gfs2_blkreserv *rs_find(struct gfs2_rgrpd *rgd, u32 rgblk)
  184. {
  185. struct rb_node **newn;
  186. int rc;
  187. u64 fsblk = rgblk + rgd->rd_data0;
  188. spin_lock(&rgd->rd_rsspin);
  189. newn = &rgd->rd_rstree.rb_node;
  190. while (*newn) {
  191. struct gfs2_blkreserv *cur =
  192. rb_entry(*newn, struct gfs2_blkreserv, rs_node);
  193. rc = rs_cmp(fsblk, 1, cur);
  194. if (rc < 0)
  195. newn = &((*newn)->rb_left);
  196. else if (rc > 0)
  197. newn = &((*newn)->rb_right);
  198. else {
  199. spin_unlock(&rgd->rd_rsspin);
  200. return cur;
  201. }
  202. }
  203. spin_unlock(&rgd->rd_rsspin);
  204. return NULL;
  205. }
  206. /**
  207. * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
  208. * a block in a given allocation state.
  209. * @buf: the buffer that holds the bitmaps
  210. * @len: the length (in bytes) of the buffer
  211. * @goal: start search at this block's bit-pair (within @buffer)
  212. * @state: GFS2_BLKST_XXX the state of the block we're looking for.
  213. *
  214. * Scope of @goal and returned block number is only within this bitmap buffer,
  215. * not entire rgrp or filesystem. @buffer will be offset from the actual
  216. * beginning of a bitmap block buffer, skipping any header structures, but
  217. * headers are always a multiple of 64 bits long so that the buffer is
  218. * always aligned to a 64 bit boundary.
  219. *
  220. * The size of the buffer is in bytes, but is it assumed that it is
  221. * always ok to read a complete multiple of 64 bits at the end
  222. * of the block in case the end is no aligned to a natural boundary.
  223. *
  224. * Return: the block number (bitmap buffer scope) that was found
  225. */
  226. static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
  227. u32 goal, u8 state)
  228. {
  229. u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
  230. const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
  231. const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
  232. u64 tmp;
  233. u64 mask = 0x5555555555555555ULL;
  234. u32 bit;
  235. BUG_ON(state > 3);
  236. /* Mask off bits we don't care about at the start of the search */
  237. mask <<= spoint;
  238. tmp = gfs2_bit_search(ptr, mask, state);
  239. ptr++;
  240. while(tmp == 0 && ptr < end) {
  241. tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
  242. ptr++;
  243. }
  244. /* Mask off any bits which are more than len bytes from the start */
  245. if (ptr == end && (len & (sizeof(u64) - 1)))
  246. tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
  247. /* Didn't find anything, so return */
  248. if (tmp == 0)
  249. return BFITNOENT;
  250. ptr--;
  251. bit = __ffs64(tmp);
  252. bit /= 2; /* two bits per entry in the bitmap */
  253. return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
  254. }
  255. /**
  256. * gfs2_bitcount - count the number of bits in a certain state
  257. * @rgd: the resource group descriptor
  258. * @buffer: the buffer that holds the bitmaps
  259. * @buflen: the length (in bytes) of the buffer
  260. * @state: the state of the block we're looking for
  261. *
  262. * Returns: The number of bits
  263. */
  264. static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
  265. unsigned int buflen, u8 state)
  266. {
  267. const u8 *byte = buffer;
  268. const u8 *end = buffer + buflen;
  269. const u8 state1 = state << 2;
  270. const u8 state2 = state << 4;
  271. const u8 state3 = state << 6;
  272. u32 count = 0;
  273. for (; byte < end; byte++) {
  274. if (((*byte) & 0x03) == state)
  275. count++;
  276. if (((*byte) & 0x0C) == state1)
  277. count++;
  278. if (((*byte) & 0x30) == state2)
  279. count++;
  280. if (((*byte) & 0xC0) == state3)
  281. count++;
  282. }
  283. return count;
  284. }
  285. /**
  286. * gfs2_rgrp_verify - Verify that a resource group is consistent
  287. * @rgd: the rgrp
  288. *
  289. */
  290. void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
  291. {
  292. struct gfs2_sbd *sdp = rgd->rd_sbd;
  293. struct gfs2_bitmap *bi = NULL;
  294. u32 length = rgd->rd_length;
  295. u32 count[4], tmp;
  296. int buf, x;
  297. memset(count, 0, 4 * sizeof(u32));
  298. /* Count # blocks in each of 4 possible allocation states */
  299. for (buf = 0; buf < length; buf++) {
  300. bi = rgd->rd_bits + buf;
  301. for (x = 0; x < 4; x++)
  302. count[x] += gfs2_bitcount(rgd,
  303. bi->bi_bh->b_data +
  304. bi->bi_offset,
  305. bi->bi_len, x);
  306. }
  307. if (count[0] != rgd->rd_free) {
  308. if (gfs2_consist_rgrpd(rgd))
  309. fs_err(sdp, "free data mismatch: %u != %u\n",
  310. count[0], rgd->rd_free);
  311. return;
  312. }
  313. tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
  314. if (count[1] != tmp) {
  315. if (gfs2_consist_rgrpd(rgd))
  316. fs_err(sdp, "used data mismatch: %u != %u\n",
  317. count[1], tmp);
  318. return;
  319. }
  320. if (count[2] + count[3] != rgd->rd_dinodes) {
  321. if (gfs2_consist_rgrpd(rgd))
  322. fs_err(sdp, "used metadata mismatch: %u != %u\n",
  323. count[2] + count[3], rgd->rd_dinodes);
  324. return;
  325. }
  326. }
  327. static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
  328. {
  329. u64 first = rgd->rd_data0;
  330. u64 last = first + rgd->rd_data;
  331. return first <= block && block < last;
  332. }
  333. /**
  334. * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  335. * @sdp: The GFS2 superblock
  336. * @blk: The data block number
  337. * @exact: True if this needs to be an exact match
  338. *
  339. * Returns: The resource group, or NULL if not found
  340. */
  341. struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
  342. {
  343. struct rb_node *n, *next;
  344. struct gfs2_rgrpd *cur;
  345. spin_lock(&sdp->sd_rindex_spin);
  346. n = sdp->sd_rindex_tree.rb_node;
  347. while (n) {
  348. cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
  349. next = NULL;
  350. if (blk < cur->rd_addr)
  351. next = n->rb_left;
  352. else if (blk >= cur->rd_data0 + cur->rd_data)
  353. next = n->rb_right;
  354. if (next == NULL) {
  355. spin_unlock(&sdp->sd_rindex_spin);
  356. if (exact) {
  357. if (blk < cur->rd_addr)
  358. return NULL;
  359. if (blk >= cur->rd_data0 + cur->rd_data)
  360. return NULL;
  361. }
  362. return cur;
  363. }
  364. n = next;
  365. }
  366. spin_unlock(&sdp->sd_rindex_spin);
  367. return NULL;
  368. }
  369. /**
  370. * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
  371. * @sdp: The GFS2 superblock
  372. *
  373. * Returns: The first rgrp in the filesystem
  374. */
  375. struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
  376. {
  377. const struct rb_node *n;
  378. struct gfs2_rgrpd *rgd;
  379. spin_lock(&sdp->sd_rindex_spin);
  380. n = rb_first(&sdp->sd_rindex_tree);
  381. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  382. spin_unlock(&sdp->sd_rindex_spin);
  383. return rgd;
  384. }
  385. /**
  386. * gfs2_rgrpd_get_next - get the next RG
  387. * @rgd: the resource group descriptor
  388. *
  389. * Returns: The next rgrp
  390. */
  391. struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
  392. {
  393. struct gfs2_sbd *sdp = rgd->rd_sbd;
  394. const struct rb_node *n;
  395. spin_lock(&sdp->sd_rindex_spin);
  396. n = rb_next(&rgd->rd_node);
  397. if (n == NULL)
  398. n = rb_first(&sdp->sd_rindex_tree);
  399. if (unlikely(&rgd->rd_node == n)) {
  400. spin_unlock(&sdp->sd_rindex_spin);
  401. return NULL;
  402. }
  403. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  404. spin_unlock(&sdp->sd_rindex_spin);
  405. return rgd;
  406. }
  407. void gfs2_free_clones(struct gfs2_rgrpd *rgd)
  408. {
  409. int x;
  410. for (x = 0; x < rgd->rd_length; x++) {
  411. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  412. kfree(bi->bi_clone);
  413. bi->bi_clone = NULL;
  414. }
  415. }
  416. /**
  417. * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
  418. * @ip: the inode for this reservation
  419. */
  420. int gfs2_rs_alloc(struct gfs2_inode *ip)
  421. {
  422. int error = 0;
  423. struct gfs2_blkreserv *res;
  424. if (ip->i_res)
  425. return 0;
  426. res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
  427. if (!res)
  428. error = -ENOMEM;
  429. rb_init_node(&res->rs_node);
  430. down_write(&ip->i_rw_mutex);
  431. if (ip->i_res)
  432. kmem_cache_free(gfs2_rsrv_cachep, res);
  433. else
  434. ip->i_res = res;
  435. up_write(&ip->i_rw_mutex);
  436. return error;
  437. }
  438. static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
  439. {
  440. gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
  441. rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
  442. rs->rs_rbm.offset, rs->rs_free);
  443. }
  444. /**
  445. * __rs_deltree - remove a multi-block reservation from the rgd tree
  446. * @rs: The reservation to remove
  447. *
  448. */
  449. static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  450. {
  451. struct gfs2_rgrpd *rgd;
  452. if (!gfs2_rs_active(rs))
  453. return;
  454. rgd = rs->rs_rbm.rgd;
  455. trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
  456. rb_erase(&rs->rs_node, &rgd->rd_rstree);
  457. rb_init_node(&rs->rs_node);
  458. BUG_ON(!rgd->rd_rs_cnt);
  459. rgd->rd_rs_cnt--;
  460. if (rs->rs_free) {
  461. /* return reserved blocks to the rgrp and the ip */
  462. BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
  463. rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
  464. rs->rs_free = 0;
  465. clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
  466. smp_mb__after_clear_bit();
  467. }
  468. }
  469. /**
  470. * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
  471. * @rs: The reservation to remove
  472. *
  473. */
  474. void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
  475. {
  476. struct gfs2_rgrpd *rgd;
  477. rgd = rs->rs_rbm.rgd;
  478. if (rgd) {
  479. spin_lock(&rgd->rd_rsspin);
  480. __rs_deltree(ip, rs);
  481. spin_unlock(&rgd->rd_rsspin);
  482. }
  483. }
  484. /**
  485. * gfs2_rs_delete - delete a multi-block reservation
  486. * @ip: The inode for this reservation
  487. *
  488. */
  489. void gfs2_rs_delete(struct gfs2_inode *ip)
  490. {
  491. down_write(&ip->i_rw_mutex);
  492. if (ip->i_res) {
  493. gfs2_rs_deltree(ip, ip->i_res);
  494. trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
  495. BUG_ON(ip->i_res->rs_free);
  496. kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
  497. ip->i_res = NULL;
  498. }
  499. up_write(&ip->i_rw_mutex);
  500. }
  501. /**
  502. * return_all_reservations - return all reserved blocks back to the rgrp.
  503. * @rgd: the rgrp that needs its space back
  504. *
  505. * We previously reserved a bunch of blocks for allocation. Now we need to
  506. * give them back. This leave the reservation structures in tact, but removes
  507. * all of their corresponding "no-fly zones".
  508. */
  509. static void return_all_reservations(struct gfs2_rgrpd *rgd)
  510. {
  511. struct rb_node *n;
  512. struct gfs2_blkreserv *rs;
  513. spin_lock(&rgd->rd_rsspin);
  514. while ((n = rb_first(&rgd->rd_rstree))) {
  515. rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  516. __rs_deltree(NULL, rs);
  517. }
  518. spin_unlock(&rgd->rd_rsspin);
  519. }
  520. void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
  521. {
  522. struct rb_node *n;
  523. struct gfs2_rgrpd *rgd;
  524. struct gfs2_glock *gl;
  525. while ((n = rb_first(&sdp->sd_rindex_tree))) {
  526. rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
  527. gl = rgd->rd_gl;
  528. rb_erase(n, &sdp->sd_rindex_tree);
  529. if (gl) {
  530. spin_lock(&gl->gl_spin);
  531. gl->gl_object = NULL;
  532. spin_unlock(&gl->gl_spin);
  533. gfs2_glock_add_to_lru(gl);
  534. gfs2_glock_put(gl);
  535. }
  536. gfs2_free_clones(rgd);
  537. kfree(rgd->rd_bits);
  538. return_all_reservations(rgd);
  539. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  540. }
  541. }
  542. static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
  543. {
  544. printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
  545. printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
  546. printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
  547. printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
  548. printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
  549. }
  550. /**
  551. * gfs2_compute_bitstructs - Compute the bitmap sizes
  552. * @rgd: The resource group descriptor
  553. *
  554. * Calculates bitmap descriptors, one for each block that contains bitmap data
  555. *
  556. * Returns: errno
  557. */
  558. static int compute_bitstructs(struct gfs2_rgrpd *rgd)
  559. {
  560. struct gfs2_sbd *sdp = rgd->rd_sbd;
  561. struct gfs2_bitmap *bi;
  562. u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
  563. u32 bytes_left, bytes;
  564. int x;
  565. if (!length)
  566. return -EINVAL;
  567. rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
  568. if (!rgd->rd_bits)
  569. return -ENOMEM;
  570. bytes_left = rgd->rd_bitbytes;
  571. for (x = 0; x < length; x++) {
  572. bi = rgd->rd_bits + x;
  573. bi->bi_flags = 0;
  574. /* small rgrp; bitmap stored completely in header block */
  575. if (length == 1) {
  576. bytes = bytes_left;
  577. bi->bi_offset = sizeof(struct gfs2_rgrp);
  578. bi->bi_start = 0;
  579. bi->bi_len = bytes;
  580. /* header block */
  581. } else if (x == 0) {
  582. bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
  583. bi->bi_offset = sizeof(struct gfs2_rgrp);
  584. bi->bi_start = 0;
  585. bi->bi_len = bytes;
  586. /* last block */
  587. } else if (x + 1 == length) {
  588. bytes = bytes_left;
  589. bi->bi_offset = sizeof(struct gfs2_meta_header);
  590. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  591. bi->bi_len = bytes;
  592. /* other blocks */
  593. } else {
  594. bytes = sdp->sd_sb.sb_bsize -
  595. sizeof(struct gfs2_meta_header);
  596. bi->bi_offset = sizeof(struct gfs2_meta_header);
  597. bi->bi_start = rgd->rd_bitbytes - bytes_left;
  598. bi->bi_len = bytes;
  599. }
  600. bytes_left -= bytes;
  601. }
  602. if (bytes_left) {
  603. gfs2_consist_rgrpd(rgd);
  604. return -EIO;
  605. }
  606. bi = rgd->rd_bits + (length - 1);
  607. if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
  608. if (gfs2_consist_rgrpd(rgd)) {
  609. gfs2_rindex_print(rgd);
  610. fs_err(sdp, "start=%u len=%u offset=%u\n",
  611. bi->bi_start, bi->bi_len, bi->bi_offset);
  612. }
  613. return -EIO;
  614. }
  615. return 0;
  616. }
  617. /**
  618. * gfs2_ri_total - Total up the file system space, according to the rindex.
  619. * @sdp: the filesystem
  620. *
  621. */
  622. u64 gfs2_ri_total(struct gfs2_sbd *sdp)
  623. {
  624. u64 total_data = 0;
  625. struct inode *inode = sdp->sd_rindex;
  626. struct gfs2_inode *ip = GFS2_I(inode);
  627. char buf[sizeof(struct gfs2_rindex)];
  628. int error, rgrps;
  629. for (rgrps = 0;; rgrps++) {
  630. loff_t pos = rgrps * sizeof(struct gfs2_rindex);
  631. if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
  632. break;
  633. error = gfs2_internal_read(ip, buf, &pos,
  634. sizeof(struct gfs2_rindex));
  635. if (error != sizeof(struct gfs2_rindex))
  636. break;
  637. total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
  638. }
  639. return total_data;
  640. }
  641. static int rgd_insert(struct gfs2_rgrpd *rgd)
  642. {
  643. struct gfs2_sbd *sdp = rgd->rd_sbd;
  644. struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
  645. /* Figure out where to put new node */
  646. while (*newn) {
  647. struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
  648. rd_node);
  649. parent = *newn;
  650. if (rgd->rd_addr < cur->rd_addr)
  651. newn = &((*newn)->rb_left);
  652. else if (rgd->rd_addr > cur->rd_addr)
  653. newn = &((*newn)->rb_right);
  654. else
  655. return -EEXIST;
  656. }
  657. rb_link_node(&rgd->rd_node, parent, newn);
  658. rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
  659. sdp->sd_rgrps++;
  660. return 0;
  661. }
  662. /**
  663. * read_rindex_entry - Pull in a new resource index entry from the disk
  664. * @ip: Pointer to the rindex inode
  665. *
  666. * Returns: 0 on success, > 0 on EOF, error code otherwise
  667. */
  668. static int read_rindex_entry(struct gfs2_inode *ip)
  669. {
  670. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  671. loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
  672. struct gfs2_rindex buf;
  673. int error;
  674. struct gfs2_rgrpd *rgd;
  675. if (pos >= i_size_read(&ip->i_inode))
  676. return 1;
  677. error = gfs2_internal_read(ip, (char *)&buf, &pos,
  678. sizeof(struct gfs2_rindex));
  679. if (error != sizeof(struct gfs2_rindex))
  680. return (error == 0) ? 1 : error;
  681. rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
  682. error = -ENOMEM;
  683. if (!rgd)
  684. return error;
  685. rgd->rd_sbd = sdp;
  686. rgd->rd_addr = be64_to_cpu(buf.ri_addr);
  687. rgd->rd_length = be32_to_cpu(buf.ri_length);
  688. rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
  689. rgd->rd_data = be32_to_cpu(buf.ri_data);
  690. rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
  691. spin_lock_init(&rgd->rd_rsspin);
  692. error = compute_bitstructs(rgd);
  693. if (error)
  694. goto fail;
  695. error = gfs2_glock_get(sdp, rgd->rd_addr,
  696. &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
  697. if (error)
  698. goto fail;
  699. rgd->rd_gl->gl_object = rgd;
  700. rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
  701. rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
  702. if (rgd->rd_data > sdp->sd_max_rg_data)
  703. sdp->sd_max_rg_data = rgd->rd_data;
  704. spin_lock(&sdp->sd_rindex_spin);
  705. error = rgd_insert(rgd);
  706. spin_unlock(&sdp->sd_rindex_spin);
  707. if (!error)
  708. return 0;
  709. error = 0; /* someone else read in the rgrp; free it and ignore it */
  710. gfs2_glock_put(rgd->rd_gl);
  711. fail:
  712. kfree(rgd->rd_bits);
  713. kmem_cache_free(gfs2_rgrpd_cachep, rgd);
  714. return error;
  715. }
  716. /**
  717. * gfs2_ri_update - Pull in a new resource index from the disk
  718. * @ip: pointer to the rindex inode
  719. *
  720. * Returns: 0 on successful update, error code otherwise
  721. */
  722. static int gfs2_ri_update(struct gfs2_inode *ip)
  723. {
  724. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  725. int error;
  726. do {
  727. error = read_rindex_entry(ip);
  728. } while (error == 0);
  729. if (error < 0)
  730. return error;
  731. sdp->sd_rindex_uptodate = 1;
  732. return 0;
  733. }
  734. /**
  735. * gfs2_rindex_update - Update the rindex if required
  736. * @sdp: The GFS2 superblock
  737. *
  738. * We grab a lock on the rindex inode to make sure that it doesn't
  739. * change whilst we are performing an operation. We keep this lock
  740. * for quite long periods of time compared to other locks. This
  741. * doesn't matter, since it is shared and it is very, very rarely
  742. * accessed in the exclusive mode (i.e. only when expanding the filesystem).
  743. *
  744. * This makes sure that we're using the latest copy of the resource index
  745. * special file, which might have been updated if someone expanded the
  746. * filesystem (via gfs2_grow utility), which adds new resource groups.
  747. *
  748. * Returns: 0 on succeess, error code otherwise
  749. */
  750. int gfs2_rindex_update(struct gfs2_sbd *sdp)
  751. {
  752. struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
  753. struct gfs2_glock *gl = ip->i_gl;
  754. struct gfs2_holder ri_gh;
  755. int error = 0;
  756. int unlock_required = 0;
  757. /* Read new copy from disk if we don't have the latest */
  758. if (!sdp->sd_rindex_uptodate) {
  759. if (!gfs2_glock_is_locked_by_me(gl)) {
  760. error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
  761. if (error)
  762. return error;
  763. unlock_required = 1;
  764. }
  765. if (!sdp->sd_rindex_uptodate)
  766. error = gfs2_ri_update(ip);
  767. if (unlock_required)
  768. gfs2_glock_dq_uninit(&ri_gh);
  769. }
  770. return error;
  771. }
  772. static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
  773. {
  774. const struct gfs2_rgrp *str = buf;
  775. u32 rg_flags;
  776. rg_flags = be32_to_cpu(str->rg_flags);
  777. rg_flags &= ~GFS2_RDF_MASK;
  778. rgd->rd_flags &= GFS2_RDF_MASK;
  779. rgd->rd_flags |= rg_flags;
  780. rgd->rd_free = be32_to_cpu(str->rg_free);
  781. rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
  782. rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
  783. }
  784. static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
  785. {
  786. struct gfs2_rgrp *str = buf;
  787. str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
  788. str->rg_free = cpu_to_be32(rgd->rd_free);
  789. str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
  790. str->__pad = cpu_to_be32(0);
  791. str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
  792. memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
  793. }
  794. static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
  795. {
  796. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  797. struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
  798. if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
  799. rgl->rl_dinodes != str->rg_dinodes ||
  800. rgl->rl_igeneration != str->rg_igeneration)
  801. return 0;
  802. return 1;
  803. }
  804. static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
  805. {
  806. const struct gfs2_rgrp *str = buf;
  807. rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
  808. rgl->rl_flags = str->rg_flags;
  809. rgl->rl_free = str->rg_free;
  810. rgl->rl_dinodes = str->rg_dinodes;
  811. rgl->rl_igeneration = str->rg_igeneration;
  812. rgl->__pad = 0UL;
  813. }
  814. static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
  815. {
  816. struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
  817. u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
  818. rgl->rl_unlinked = cpu_to_be32(unlinked);
  819. }
  820. static u32 count_unlinked(struct gfs2_rgrpd *rgd)
  821. {
  822. struct gfs2_bitmap *bi;
  823. const u32 length = rgd->rd_length;
  824. const u8 *buffer = NULL;
  825. u32 i, goal, count = 0;
  826. for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
  827. goal = 0;
  828. buffer = bi->bi_bh->b_data + bi->bi_offset;
  829. WARN_ON(!buffer_uptodate(bi->bi_bh));
  830. while (goal < bi->bi_len * GFS2_NBBY) {
  831. goal = gfs2_bitfit(buffer, bi->bi_len, goal,
  832. GFS2_BLKST_UNLINKED);
  833. if (goal == BFITNOENT)
  834. break;
  835. count++;
  836. goal++;
  837. }
  838. }
  839. return count;
  840. }
  841. /**
  842. * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
  843. * @rgd: the struct gfs2_rgrpd describing the RG to read in
  844. *
  845. * Read in all of a Resource Group's header and bitmap blocks.
  846. * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
  847. *
  848. * Returns: errno
  849. */
  850. int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
  851. {
  852. struct gfs2_sbd *sdp = rgd->rd_sbd;
  853. struct gfs2_glock *gl = rgd->rd_gl;
  854. unsigned int length = rgd->rd_length;
  855. struct gfs2_bitmap *bi;
  856. unsigned int x, y;
  857. int error;
  858. if (rgd->rd_bits[0].bi_bh != NULL)
  859. return 0;
  860. for (x = 0; x < length; x++) {
  861. bi = rgd->rd_bits + x;
  862. error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
  863. if (error)
  864. goto fail;
  865. }
  866. for (y = length; y--;) {
  867. bi = rgd->rd_bits + y;
  868. error = gfs2_meta_wait(sdp, bi->bi_bh);
  869. if (error)
  870. goto fail;
  871. if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
  872. GFS2_METATYPE_RG)) {
  873. error = -EIO;
  874. goto fail;
  875. }
  876. }
  877. if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
  878. for (x = 0; x < length; x++)
  879. clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
  880. gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
  881. rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  882. rgd->rd_free_clone = rgd->rd_free;
  883. }
  884. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
  885. rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
  886. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
  887. rgd->rd_bits[0].bi_bh->b_data);
  888. }
  889. else if (sdp->sd_args.ar_rgrplvb) {
  890. if (!gfs2_rgrp_lvb_valid(rgd)){
  891. gfs2_consist_rgrpd(rgd);
  892. error = -EIO;
  893. goto fail;
  894. }
  895. if (rgd->rd_rgl->rl_unlinked == 0)
  896. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  897. }
  898. return 0;
  899. fail:
  900. while (x--) {
  901. bi = rgd->rd_bits + x;
  902. brelse(bi->bi_bh);
  903. bi->bi_bh = NULL;
  904. gfs2_assert_warn(sdp, !bi->bi_clone);
  905. }
  906. return error;
  907. }
  908. int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
  909. {
  910. u32 rl_flags;
  911. if (rgd->rd_flags & GFS2_RDF_UPTODATE)
  912. return 0;
  913. if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
  914. return gfs2_rgrp_bh_get(rgd);
  915. rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
  916. rl_flags &= ~GFS2_RDF_MASK;
  917. rgd->rd_flags &= GFS2_RDF_MASK;
  918. rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
  919. if (rgd->rd_rgl->rl_unlinked == 0)
  920. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  921. rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
  922. rgd->rd_free_clone = rgd->rd_free;
  923. rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
  924. rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
  925. return 0;
  926. }
  927. int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
  928. {
  929. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  930. struct gfs2_sbd *sdp = rgd->rd_sbd;
  931. if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
  932. return 0;
  933. return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
  934. }
  935. /**
  936. * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
  937. * @gh: The glock holder for the resource group
  938. *
  939. */
  940. void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
  941. {
  942. struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
  943. int x, length = rgd->rd_length;
  944. for (x = 0; x < length; x++) {
  945. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  946. if (bi->bi_bh) {
  947. brelse(bi->bi_bh);
  948. bi->bi_bh = NULL;
  949. }
  950. }
  951. }
  952. int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
  953. struct buffer_head *bh,
  954. const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
  955. {
  956. struct super_block *sb = sdp->sd_vfs;
  957. struct block_device *bdev = sb->s_bdev;
  958. const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
  959. bdev_logical_block_size(sb->s_bdev);
  960. u64 blk;
  961. sector_t start = 0;
  962. sector_t nr_sects = 0;
  963. int rv;
  964. unsigned int x;
  965. u32 trimmed = 0;
  966. u8 diff;
  967. for (x = 0; x < bi->bi_len; x++) {
  968. const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
  969. clone += bi->bi_offset;
  970. clone += x;
  971. if (bh) {
  972. const u8 *orig = bh->b_data + bi->bi_offset + x;
  973. diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
  974. } else {
  975. diff = ~(*clone | (*clone >> 1));
  976. }
  977. diff &= 0x55;
  978. if (diff == 0)
  979. continue;
  980. blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
  981. blk *= sects_per_blk; /* convert to sectors */
  982. while(diff) {
  983. if (diff & 1) {
  984. if (nr_sects == 0)
  985. goto start_new_extent;
  986. if ((start + nr_sects) != blk) {
  987. if (nr_sects >= minlen) {
  988. rv = blkdev_issue_discard(bdev,
  989. start, nr_sects,
  990. GFP_NOFS, 0);
  991. if (rv)
  992. goto fail;
  993. trimmed += nr_sects;
  994. }
  995. nr_sects = 0;
  996. start_new_extent:
  997. start = blk;
  998. }
  999. nr_sects += sects_per_blk;
  1000. }
  1001. diff >>= 2;
  1002. blk += sects_per_blk;
  1003. }
  1004. }
  1005. if (nr_sects >= minlen) {
  1006. rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
  1007. if (rv)
  1008. goto fail;
  1009. trimmed += nr_sects;
  1010. }
  1011. if (ptrimmed)
  1012. *ptrimmed = trimmed;
  1013. return 0;
  1014. fail:
  1015. if (sdp->sd_args.ar_discard)
  1016. fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
  1017. sdp->sd_args.ar_discard = 0;
  1018. return -EIO;
  1019. }
  1020. /**
  1021. * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
  1022. * @filp: Any file on the filesystem
  1023. * @argp: Pointer to the arguments (also used to pass result)
  1024. *
  1025. * Returns: 0 on success, otherwise error code
  1026. */
  1027. int gfs2_fitrim(struct file *filp, void __user *argp)
  1028. {
  1029. struct inode *inode = filp->f_dentry->d_inode;
  1030. struct gfs2_sbd *sdp = GFS2_SB(inode);
  1031. struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
  1032. struct buffer_head *bh;
  1033. struct gfs2_rgrpd *rgd;
  1034. struct gfs2_rgrpd *rgd_end;
  1035. struct gfs2_holder gh;
  1036. struct fstrim_range r;
  1037. int ret = 0;
  1038. u64 amt;
  1039. u64 trimmed = 0;
  1040. unsigned int x;
  1041. if (!capable(CAP_SYS_ADMIN))
  1042. return -EPERM;
  1043. if (!blk_queue_discard(q))
  1044. return -EOPNOTSUPP;
  1045. if (argp == NULL) {
  1046. r.start = 0;
  1047. r.len = ULLONG_MAX;
  1048. r.minlen = 0;
  1049. } else if (copy_from_user(&r, argp, sizeof(r)))
  1050. return -EFAULT;
  1051. ret = gfs2_rindex_update(sdp);
  1052. if (ret)
  1053. return ret;
  1054. rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
  1055. rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
  1056. while (1) {
  1057. ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1058. if (ret)
  1059. goto out;
  1060. if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
  1061. /* Trim each bitmap in the rgrp */
  1062. for (x = 0; x < rgd->rd_length; x++) {
  1063. struct gfs2_bitmap *bi = rgd->rd_bits + x;
  1064. ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
  1065. if (ret) {
  1066. gfs2_glock_dq_uninit(&gh);
  1067. goto out;
  1068. }
  1069. trimmed += amt;
  1070. }
  1071. /* Mark rgrp as having been trimmed */
  1072. ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
  1073. if (ret == 0) {
  1074. bh = rgd->rd_bits[0].bi_bh;
  1075. rgd->rd_flags |= GFS2_RGF_TRIMMED;
  1076. gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
  1077. gfs2_rgrp_out(rgd, bh->b_data);
  1078. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
  1079. gfs2_trans_end(sdp);
  1080. }
  1081. }
  1082. gfs2_glock_dq_uninit(&gh);
  1083. if (rgd == rgd_end)
  1084. break;
  1085. rgd = gfs2_rgrpd_get_next(rgd);
  1086. }
  1087. out:
  1088. r.len = trimmed << 9;
  1089. if (argp && copy_to_user(argp, &r, sizeof(r)))
  1090. return -EFAULT;
  1091. return ret;
  1092. }
  1093. /**
  1094. * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
  1095. * @bi: the bitmap with the blocks
  1096. * @ip: the inode structure
  1097. * @biblk: the 32-bit block number relative to the start of the bitmap
  1098. * @amount: the number of blocks to reserve
  1099. *
  1100. * Returns: NULL - reservation was already taken, so not inserted
  1101. * pointer to the inserted reservation
  1102. */
  1103. static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
  1104. struct gfs2_inode *ip, u32 biblk,
  1105. int amount)
  1106. {
  1107. struct rb_node **newn, *parent = NULL;
  1108. int rc;
  1109. struct gfs2_blkreserv *rs = ip->i_res;
  1110. struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
  1111. u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
  1112. spin_lock(&rgd->rd_rsspin);
  1113. newn = &rgd->rd_rstree.rb_node;
  1114. BUG_ON(!ip->i_res);
  1115. BUG_ON(gfs2_rs_active(rs));
  1116. /* Figure out where to put new node */
  1117. /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
  1118. while (*newn) {
  1119. struct gfs2_blkreserv *cur =
  1120. rb_entry(*newn, struct gfs2_blkreserv, rs_node);
  1121. parent = *newn;
  1122. rc = rs_cmp(fsblock, amount, cur);
  1123. if (rc > 0)
  1124. newn = &((*newn)->rb_right);
  1125. else if (rc < 0)
  1126. newn = &((*newn)->rb_left);
  1127. else {
  1128. spin_unlock(&rgd->rd_rsspin);
  1129. return NULL; /* reservation already in use */
  1130. }
  1131. }
  1132. /* Do our reservation work */
  1133. rs = ip->i_res;
  1134. rs->rs_free = amount;
  1135. rs->rs_rbm.offset = biblk;
  1136. rs->rs_rbm.bi = bi;
  1137. rb_link_node(&rs->rs_node, parent, newn);
  1138. rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
  1139. /* Do our inode accounting for the reservation */
  1140. /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
  1141. /* Do our rgrp accounting for the reservation */
  1142. rgd->rd_reserved += amount; /* blocks reserved */
  1143. rgd->rd_rs_cnt++; /* number of in-tree reservations */
  1144. spin_unlock(&rgd->rd_rsspin);
  1145. trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
  1146. return rs;
  1147. }
  1148. /**
  1149. * unclaimed_blocks - return number of blocks that aren't spoken for
  1150. */
  1151. static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
  1152. {
  1153. return rgd->rd_free_clone - rgd->rd_reserved;
  1154. }
  1155. /**
  1156. * rg_mblk_search - find a group of multiple free blocks
  1157. * @rgd: the resource group descriptor
  1158. * @rs: the block reservation
  1159. * @ip: pointer to the inode for which we're reserving blocks
  1160. *
  1161. * This is very similar to rgblk_search, except we're looking for whole
  1162. * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
  1163. * on aligned dwords for speed's sake.
  1164. *
  1165. * Returns: 0 if successful or BFITNOENT if there isn't enough free space
  1166. */
  1167. static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested)
  1168. {
  1169. struct gfs2_bitmap *bi = rgd->rd_bits;
  1170. const u32 length = rgd->rd_length;
  1171. u32 blk;
  1172. unsigned int buf, x, search_bytes;
  1173. u8 *buffer = NULL;
  1174. u8 *ptr, *end, *nonzero;
  1175. u32 goal, rsv_bytes;
  1176. struct gfs2_blkreserv *rs;
  1177. u32 best_rs_bytes, unclaimed;
  1178. int best_rs_blocks;
  1179. /* Find bitmap block that contains bits for goal block */
  1180. if (rgrp_contains_block(rgd, ip->i_goal))
  1181. goal = ip->i_goal - rgd->rd_data0;
  1182. else
  1183. goal = rgd->rd_last_alloc;
  1184. for (buf = 0; buf < length; buf++) {
  1185. bi = rgd->rd_bits + buf;
  1186. /* Convert scope of "goal" from rgrp-wide to within
  1187. found bit block */
  1188. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
  1189. goal -= bi->bi_start * GFS2_NBBY;
  1190. goto do_search;
  1191. }
  1192. }
  1193. buf = 0;
  1194. goal = 0;
  1195. do_search:
  1196. best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
  1197. (RGRP_RSRV_MINBLKS * rgd->rd_length));
  1198. best_rs_bytes = (best_rs_blocks *
  1199. (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
  1200. GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
  1201. best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
  1202. unclaimed = unclaimed_blocks(rgd);
  1203. if (best_rs_bytes * GFS2_NBBY > unclaimed)
  1204. best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
  1205. for (x = 0; x <= length; x++) {
  1206. bi = rgd->rd_bits + buf;
  1207. if (test_bit(GBF_FULL, &bi->bi_flags))
  1208. goto skip;
  1209. WARN_ON(!buffer_uptodate(bi->bi_bh));
  1210. if (bi->bi_clone)
  1211. buffer = bi->bi_clone + bi->bi_offset;
  1212. else
  1213. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1214. /* We have to keep the reservations aligned on u64 boundaries
  1215. otherwise we could get situations where a byte can't be
  1216. used because it's after a reservation, but a free bit still
  1217. is within the reservation's area. */
  1218. ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
  1219. end = (buffer + bi->bi_len);
  1220. while (ptr < end) {
  1221. rsv_bytes = 0;
  1222. if ((ptr + best_rs_bytes) <= end)
  1223. search_bytes = best_rs_bytes;
  1224. else
  1225. search_bytes = end - ptr;
  1226. BUG_ON(!search_bytes);
  1227. nonzero = memchr_inv(ptr, 0, search_bytes);
  1228. /* If the lot is all zeroes, reserve the whole size. If
  1229. there's enough zeroes to satisfy the request, use
  1230. what we can. If there's not enough, keep looking. */
  1231. if (nonzero == NULL)
  1232. rsv_bytes = search_bytes;
  1233. else if ((nonzero - ptr) * GFS2_NBBY >= requested)
  1234. rsv_bytes = (nonzero - ptr);
  1235. if (rsv_bytes) {
  1236. blk = ((ptr - buffer) * GFS2_NBBY);
  1237. BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
  1238. rs = rs_insert(bi, ip, blk,
  1239. rsv_bytes * GFS2_NBBY);
  1240. if (IS_ERR(rs))
  1241. return PTR_ERR(rs);
  1242. if (rs)
  1243. return 0;
  1244. }
  1245. ptr += ALIGN(search_bytes, sizeof(u64));
  1246. }
  1247. skip:
  1248. /* Try next bitmap block (wrap back to rgrp header
  1249. if at end) */
  1250. buf++;
  1251. buf %= length;
  1252. goal = 0;
  1253. }
  1254. return BFITNOENT;
  1255. }
  1256. /**
  1257. * try_rgrp_fit - See if a given reservation will fit in a given RG
  1258. * @rgd: the RG data
  1259. * @ip: the inode
  1260. *
  1261. * If there's room for the requested blocks to be allocated from the RG:
  1262. * This will try to get a multi-block reservation first, and if that doesn't
  1263. * fit, it will take what it can.
  1264. *
  1265. * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
  1266. */
  1267. static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
  1268. unsigned requested)
  1269. {
  1270. if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
  1271. return 0;
  1272. /* Look for a multi-block reservation. */
  1273. if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
  1274. rg_mblk_search(rgd, ip, requested) != BFITNOENT)
  1275. return 1;
  1276. if (unclaimed_blocks(rgd) >= requested)
  1277. return 1;
  1278. return 0;
  1279. }
  1280. /**
  1281. * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
  1282. * @rgd: The rgrp
  1283. * @last_unlinked: block address of the last dinode we unlinked
  1284. * @skip: block address we should explicitly not unlink
  1285. *
  1286. * Returns: 0 if no error
  1287. * The inode, if one has been found, in inode.
  1288. */
  1289. static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
  1290. {
  1291. u32 goal = 0, block;
  1292. u64 no_addr;
  1293. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1294. struct gfs2_glock *gl;
  1295. struct gfs2_inode *ip;
  1296. int error;
  1297. int found = 0;
  1298. struct gfs2_bitmap *bi;
  1299. while (goal < rgd->rd_data) {
  1300. down_write(&sdp->sd_log_flush_lock);
  1301. block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi);
  1302. up_write(&sdp->sd_log_flush_lock);
  1303. if (block == BFITNOENT)
  1304. break;
  1305. block = gfs2_bi2rgd_blk(bi, block);
  1306. /* rgblk_search can return a block < goal, so we need to
  1307. keep it marching forward. */
  1308. no_addr = block + rgd->rd_data0;
  1309. goal = max(block + 1, goal + 1);
  1310. if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
  1311. continue;
  1312. if (no_addr == skip)
  1313. continue;
  1314. *last_unlinked = no_addr;
  1315. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl);
  1316. if (error)
  1317. continue;
  1318. /* If the inode is already in cache, we can ignore it here
  1319. * because the existing inode disposal code will deal with
  1320. * it when all refs have gone away. Accessing gl_object like
  1321. * this is not safe in general. Here it is ok because we do
  1322. * not dereference the pointer, and we only need an approx
  1323. * answer to whether it is NULL or not.
  1324. */
  1325. ip = gl->gl_object;
  1326. if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  1327. gfs2_glock_put(gl);
  1328. else
  1329. found++;
  1330. /* Limit reclaim to sensible number of tasks */
  1331. if (found > NR_CPUS)
  1332. return;
  1333. }
  1334. rgd->rd_flags &= ~GFS2_RDF_CHECK;
  1335. return;
  1336. }
  1337. /**
  1338. * gfs2_inplace_reserve - Reserve space in the filesystem
  1339. * @ip: the inode to reserve space for
  1340. * @requested: the number of blocks to be reserved
  1341. *
  1342. * Returns: errno
  1343. */
  1344. int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
  1345. {
  1346. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1347. struct gfs2_rgrpd *begin = NULL;
  1348. struct gfs2_blkreserv *rs = ip->i_res;
  1349. int error = 0, rg_locked, flags = LM_FLAG_TRY;
  1350. u64 last_unlinked = NO_BLOCK;
  1351. int loops = 0;
  1352. if (sdp->sd_args.ar_rgrplvb)
  1353. flags |= GL_SKIP;
  1354. if (gfs2_assert_warn(sdp, requested)) {
  1355. error = -EINVAL;
  1356. goto out;
  1357. }
  1358. if (gfs2_rs_active(rs)) {
  1359. begin = rs->rs_rbm.rgd;
  1360. flags = 0; /* Yoda: Do or do not. There is no try */
  1361. } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
  1362. rs->rs_rbm.rgd = begin = ip->i_rgd;
  1363. } else {
  1364. rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
  1365. }
  1366. if (rs->rs_rbm.rgd == NULL)
  1367. return -EBADSLT;
  1368. while (loops < 3) {
  1369. rg_locked = 0;
  1370. if (gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
  1371. rg_locked = 1;
  1372. error = 0;
  1373. } else if (!loops && !gfs2_rs_active(rs) &&
  1374. rs->rs_rbm.rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
  1375. /* If the rgrp already is maxed out for contenders,
  1376. we can eliminate it as a "first pass" without even
  1377. requesting the rgrp glock. */
  1378. error = GLR_TRYFAILED;
  1379. } else {
  1380. error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
  1381. LM_ST_EXCLUSIVE, flags,
  1382. &rs->rs_rgd_gh);
  1383. if (!error && sdp->sd_args.ar_rgrplvb) {
  1384. error = update_rgrp_lvb(rs->rs_rbm.rgd);
  1385. if (error) {
  1386. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1387. return error;
  1388. }
  1389. }
  1390. }
  1391. switch (error) {
  1392. case 0:
  1393. if (gfs2_rs_active(rs)) {
  1394. if (unclaimed_blocks(rs->rs_rbm.rgd) +
  1395. rs->rs_free >= requested) {
  1396. ip->i_rgd = rs->rs_rbm.rgd;
  1397. return 0;
  1398. }
  1399. /* We have a multi-block reservation, but the
  1400. rgrp doesn't have enough free blocks to
  1401. satisfy the request. Free the reservation
  1402. and look for a suitable rgrp. */
  1403. gfs2_rs_deltree(ip, rs);
  1404. }
  1405. if (try_rgrp_fit(rs->rs_rbm.rgd, ip, requested)) {
  1406. if (sdp->sd_args.ar_rgrplvb)
  1407. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1408. ip->i_rgd = rs->rs_rbm.rgd;
  1409. return 0;
  1410. }
  1411. if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) {
  1412. if (sdp->sd_args.ar_rgrplvb)
  1413. gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
  1414. try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
  1415. ip->i_no_addr);
  1416. }
  1417. if (!rg_locked)
  1418. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1419. /* fall through */
  1420. case GLR_TRYFAILED:
  1421. rs->rs_rbm.rgd = gfs2_rgrpd_get_next(rs->rs_rbm.rgd);
  1422. rs->rs_rbm.rgd = rs->rs_rbm.rgd ? : begin; /* if NULL, wrap */
  1423. if (rs->rs_rbm.rgd != begin) /* If we didn't wrap */
  1424. break;
  1425. flags &= ~LM_FLAG_TRY;
  1426. loops++;
  1427. /* Check that fs hasn't grown if writing to rindex */
  1428. if (ip == GFS2_I(sdp->sd_rindex) &&
  1429. !sdp->sd_rindex_uptodate) {
  1430. error = gfs2_ri_update(ip);
  1431. if (error)
  1432. goto out;
  1433. } else if (loops == 2)
  1434. /* Flushing the log may release space */
  1435. gfs2_log_flush(sdp, NULL);
  1436. break;
  1437. default:
  1438. goto out;
  1439. }
  1440. }
  1441. error = -ENOSPC;
  1442. out:
  1443. return error;
  1444. }
  1445. /**
  1446. * gfs2_inplace_release - release an inplace reservation
  1447. * @ip: the inode the reservation was taken out on
  1448. *
  1449. * Release a reservation made by gfs2_inplace_reserve().
  1450. */
  1451. void gfs2_inplace_release(struct gfs2_inode *ip)
  1452. {
  1453. struct gfs2_blkreserv *rs = ip->i_res;
  1454. if (rs->rs_rgd_gh.gh_gl)
  1455. gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
  1456. }
  1457. /**
  1458. * gfs2_get_block_type - Check a block in a RG is of given type
  1459. * @rgd: the resource group holding the block
  1460. * @block: the block number
  1461. *
  1462. * Returns: The block type (GFS2_BLKST_*)
  1463. */
  1464. static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
  1465. {
  1466. struct gfs2_bitmap *bi = NULL;
  1467. u32 length, rgrp_block, buf_block;
  1468. unsigned int buf;
  1469. unsigned char type;
  1470. length = rgd->rd_length;
  1471. rgrp_block = block - rgd->rd_data0;
  1472. for (buf = 0; buf < length; buf++) {
  1473. bi = rgd->rd_bits + buf;
  1474. if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  1475. break;
  1476. }
  1477. gfs2_assert(rgd->rd_sbd, buf < length);
  1478. buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
  1479. type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
  1480. bi->bi_len, buf_block);
  1481. return type;
  1482. }
  1483. /**
  1484. * rgblk_search - find a block in @state
  1485. * @rgd: the resource group descriptor
  1486. * @goal: the goal block within the RG (start here to search for avail block)
  1487. * @state: GFS2_BLKST_XXX the before-allocation state to find
  1488. * @rbi: address of the pointer to the bitmap containing the block found
  1489. *
  1490. * Walk rgrp's bitmap to find bits that represent a block in @state.
  1491. *
  1492. * This function never fails, because we wouldn't call it unless we
  1493. * know (from reservation results, etc.) that a block is available.
  1494. *
  1495. * Scope of @goal is just within rgrp, not the whole filesystem.
  1496. * Scope of @returned block is just within bitmap, not the whole filesystem.
  1497. *
  1498. * Returns: the block number found relative to the bitmap rbi
  1499. */
  1500. static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state,
  1501. struct gfs2_bitmap **rbi)
  1502. {
  1503. struct gfs2_bitmap *bi = NULL;
  1504. const u32 length = rgd->rd_length;
  1505. u32 biblk = BFITNOENT;
  1506. unsigned int buf, x;
  1507. const u8 *buffer = NULL;
  1508. *rbi = NULL;
  1509. /* Find bitmap block that contains bits for goal block */
  1510. for (buf = 0; buf < length; buf++) {
  1511. bi = rgd->rd_bits + buf;
  1512. /* Convert scope of "goal" from rgrp-wide to within found bit block */
  1513. if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
  1514. goal -= bi->bi_start * GFS2_NBBY;
  1515. goto do_search;
  1516. }
  1517. }
  1518. buf = 0;
  1519. goal = 0;
  1520. do_search:
  1521. /* Search (up to entire) bitmap in this rgrp for allocatable block.
  1522. "x <= length", instead of "x < length", because we typically start
  1523. the search in the middle of a bit block, but if we can't find an
  1524. allocatable block anywhere else, we want to be able wrap around and
  1525. search in the first part of our first-searched bit block. */
  1526. for (x = 0; x <= length; x++) {
  1527. bi = rgd->rd_bits + buf;
  1528. if (test_bit(GBF_FULL, &bi->bi_flags) &&
  1529. (state == GFS2_BLKST_FREE))
  1530. goto skip;
  1531. /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
  1532. bitmaps, so we must search the originals for that. */
  1533. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1534. WARN_ON(!buffer_uptodate(bi->bi_bh));
  1535. if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
  1536. buffer = bi->bi_clone + bi->bi_offset;
  1537. while (1) {
  1538. struct gfs2_blkreserv *rs;
  1539. u32 rgblk;
  1540. biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
  1541. if (biblk == BFITNOENT)
  1542. break;
  1543. /* Check if this block is reserved() */
  1544. rgblk = gfs2_bi2rgd_blk(bi, biblk);
  1545. rs = rs_find(rgd, rgblk);
  1546. if (rs == NULL)
  1547. break;
  1548. BUG_ON(rs->rs_rbm.bi != bi);
  1549. biblk = BFITNOENT;
  1550. /* This should jump to the first block after the
  1551. reservation. */
  1552. goal = rs->rs_rbm.offset + rs->rs_free;
  1553. if (goal >= bi->bi_len * GFS2_NBBY)
  1554. break;
  1555. }
  1556. if (biblk != BFITNOENT)
  1557. break;
  1558. if ((goal == 0) && (state == GFS2_BLKST_FREE))
  1559. set_bit(GBF_FULL, &bi->bi_flags);
  1560. /* Try next bitmap block (wrap back to rgrp header if at end) */
  1561. skip:
  1562. buf++;
  1563. buf %= length;
  1564. goal = 0;
  1565. }
  1566. if (biblk != BFITNOENT)
  1567. *rbi = bi;
  1568. return biblk;
  1569. }
  1570. /**
  1571. * gfs2_alloc_extent - allocate an extent from a given bitmap
  1572. * @rbm: the resource group information
  1573. * @dinode: TRUE if the first block we allocate is for a dinode
  1574. * @n: The extent length
  1575. *
  1576. * Add the found bitmap buffer to the transaction.
  1577. * Set the found bits to @new_state to change block's allocation state.
  1578. * Returns: starting block number of the extent (fs scope)
  1579. */
  1580. static u64 gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
  1581. unsigned int *n)
  1582. {
  1583. struct gfs2_rgrpd *rgd = rbm->rgd;
  1584. struct gfs2_bitmap *bi = rbm->bi;
  1585. u32 blk = rbm->offset;
  1586. const unsigned int elen = *n;
  1587. u32 goal, rgblk;
  1588. const u8 *buffer = NULL;
  1589. struct gfs2_blkreserv *rs;
  1590. *n = 0;
  1591. buffer = bi->bi_bh->b_data + bi->bi_offset;
  1592. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  1593. gfs2_setbit(rgd, bi->bi_clone, bi, blk,
  1594. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1595. (*n)++;
  1596. goal = blk;
  1597. while (*n < elen) {
  1598. goal++;
  1599. if (goal >= (bi->bi_len * GFS2_NBBY))
  1600. break;
  1601. rgblk = gfs2_bi2rgd_blk(bi, goal);
  1602. rs = rs_find(rgd, rgblk);
  1603. if (rs) /* Oops, we bumped into someone's reservation */
  1604. break;
  1605. if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
  1606. GFS2_BLKST_FREE)
  1607. break;
  1608. gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
  1609. (*n)++;
  1610. }
  1611. blk = gfs2_bi2rgd_blk(bi, blk);
  1612. rgd->rd_last_alloc = blk + *n - 1;
  1613. return rgd->rd_data0 + blk;
  1614. }
  1615. /**
  1616. * rgblk_free - Change alloc state of given block(s)
  1617. * @sdp: the filesystem
  1618. * @bstart: the start of a run of blocks to free
  1619. * @blen: the length of the block run (all must lie within ONE RG!)
  1620. * @new_state: GFS2_BLKST_XXX the after-allocation block state
  1621. *
  1622. * Returns: Resource group containing the block(s)
  1623. */
  1624. static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
  1625. u32 blen, unsigned char new_state)
  1626. {
  1627. struct gfs2_rgrpd *rgd;
  1628. struct gfs2_bitmap *bi = NULL;
  1629. u32 length, rgrp_blk, buf_blk;
  1630. unsigned int buf;
  1631. rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
  1632. if (!rgd) {
  1633. if (gfs2_consist(sdp))
  1634. fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
  1635. return NULL;
  1636. }
  1637. length = rgd->rd_length;
  1638. rgrp_blk = bstart - rgd->rd_data0;
  1639. while (blen--) {
  1640. for (buf = 0; buf < length; buf++) {
  1641. bi = rgd->rd_bits + buf;
  1642. if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
  1643. break;
  1644. }
  1645. gfs2_assert(rgd->rd_sbd, buf < length);
  1646. buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
  1647. rgrp_blk++;
  1648. if (!bi->bi_clone) {
  1649. bi->bi_clone = kmalloc(bi->bi_bh->b_size,
  1650. GFP_NOFS | __GFP_NOFAIL);
  1651. memcpy(bi->bi_clone + bi->bi_offset,
  1652. bi->bi_bh->b_data + bi->bi_offset,
  1653. bi->bi_len);
  1654. }
  1655. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  1656. gfs2_setbit(rgd, NULL, bi, buf_blk, new_state);
  1657. }
  1658. return rgd;
  1659. }
  1660. /**
  1661. * gfs2_rgrp_dump - print out an rgrp
  1662. * @seq: The iterator
  1663. * @gl: The glock in question
  1664. *
  1665. */
  1666. int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
  1667. {
  1668. struct gfs2_rgrpd *rgd = gl->gl_object;
  1669. struct gfs2_blkreserv *trs;
  1670. const struct rb_node *n;
  1671. if (rgd == NULL)
  1672. return 0;
  1673. gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
  1674. (unsigned long long)rgd->rd_addr, rgd->rd_flags,
  1675. rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
  1676. rgd->rd_reserved);
  1677. spin_lock(&rgd->rd_rsspin);
  1678. for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
  1679. trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
  1680. dump_rs(seq, trs);
  1681. }
  1682. spin_unlock(&rgd->rd_rsspin);
  1683. return 0;
  1684. }
  1685. static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
  1686. {
  1687. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1688. fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
  1689. (unsigned long long)rgd->rd_addr);
  1690. fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
  1691. gfs2_rgrp_dump(NULL, rgd->rd_gl);
  1692. rgd->rd_flags |= GFS2_RDF_ERROR;
  1693. }
  1694. /**
  1695. * claim_reserved_blks - Claim previously reserved blocks
  1696. * @ip: the inode that's claiming the reservation
  1697. * @dinode: 1 if this block is a dinode block, otherwise data block
  1698. * @nblocks: desired extent length
  1699. *
  1700. * Lay claim to previously reserved blocks.
  1701. * Returns: Starting block number of the blocks claimed.
  1702. * Sets *nblocks to the actual extent length allocated.
  1703. */
  1704. static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
  1705. unsigned int *nblocks)
  1706. {
  1707. struct gfs2_blkreserv *rs = ip->i_res;
  1708. struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
  1709. struct gfs2_bitmap *bi;
  1710. u64 start_block = gfs2_rbm_to_block(&rs->rs_rbm);
  1711. const unsigned int elen = *nblocks;
  1712. bi = rs->rs_rbm.bi;
  1713. gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
  1714. for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) {
  1715. if (gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
  1716. bi->bi_len, rs->rs_rbm.offset) != GFS2_BLKST_FREE)
  1717. break;
  1718. gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_rbm.offset,
  1719. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1720. rs->rs_rbm.offset++;
  1721. rs->rs_free--;
  1722. BUG_ON(!rgd->rd_reserved);
  1723. rgd->rd_reserved--;
  1724. dinode = false;
  1725. }
  1726. trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
  1727. if (!rs->rs_free || *nblocks != elen)
  1728. gfs2_rs_deltree(ip, rs);
  1729. return start_block;
  1730. }
  1731. /**
  1732. * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  1733. * @ip: the inode to allocate the block for
  1734. * @bn: Used to return the starting block number
  1735. * @nblocks: requested number of blocks/extent length (value/result)
  1736. * @dinode: 1 if we're allocating a dinode block, else 0
  1737. * @generation: the generation number of the inode
  1738. *
  1739. * Returns: 0 or error
  1740. */
  1741. int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
  1742. bool dinode, u64 *generation)
  1743. {
  1744. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1745. struct buffer_head *dibh;
  1746. struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
  1747. unsigned int ndata;
  1748. u32 goal; /* block, within the rgrp scope */
  1749. u64 block; /* block, within the file system scope */
  1750. int error;
  1751. /* If we have a reservation, claim blocks from it. */
  1752. if (gfs2_rs_active(ip->i_res)) {
  1753. BUG_ON(!ip->i_res->rs_free);
  1754. rbm.rgd = ip->i_res->rs_rbm.rgd;
  1755. block = claim_reserved_blks(ip, dinode, nblocks);
  1756. if (*nblocks)
  1757. goto found_blocks;
  1758. }
  1759. if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
  1760. goal = ip->i_goal - rbm.rgd->rd_data0;
  1761. else
  1762. goal = rbm.rgd->rd_last_alloc;
  1763. rbm.offset = rgblk_search(rbm.rgd, goal, GFS2_BLKST_FREE, &rbm.bi);
  1764. /* Since all blocks are reserved in advance, this shouldn't happen */
  1765. if (rbm.offset == BFITNOENT) {
  1766. printk(KERN_WARNING "BFITNOENT, nblocks=%u\n", *nblocks);
  1767. printk(KERN_WARNING "FULL=%d\n",
  1768. test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
  1769. goto rgrp_error;
  1770. }
  1771. block = gfs2_alloc_extent(&rbm, dinode, nblocks);
  1772. found_blocks:
  1773. ndata = *nblocks;
  1774. if (dinode)
  1775. ndata--;
  1776. if (!dinode) {
  1777. ip->i_goal = block + ndata - 1;
  1778. error = gfs2_meta_inode_buffer(ip, &dibh);
  1779. if (error == 0) {
  1780. struct gfs2_dinode *di =
  1781. (struct gfs2_dinode *)dibh->b_data;
  1782. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1783. di->di_goal_meta = di->di_goal_data =
  1784. cpu_to_be64(ip->i_goal);
  1785. brelse(dibh);
  1786. }
  1787. }
  1788. if (rbm.rgd->rd_free < *nblocks) {
  1789. printk(KERN_WARNING "nblocks=%u\n", *nblocks);
  1790. goto rgrp_error;
  1791. }
  1792. rbm.rgd->rd_free -= *nblocks;
  1793. if (dinode) {
  1794. rbm.rgd->rd_dinodes++;
  1795. *generation = rbm.rgd->rd_igeneration++;
  1796. if (*generation == 0)
  1797. *generation = rbm.rgd->rd_igeneration++;
  1798. }
  1799. gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
  1800. gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1801. gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
  1802. gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
  1803. if (dinode)
  1804. gfs2_trans_add_unrevoke(sdp, block, 1);
  1805. /*
  1806. * This needs reviewing to see why we cannot do the quota change
  1807. * at this point in the dinode case.
  1808. */
  1809. if (ndata)
  1810. gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
  1811. ip->i_inode.i_gid);
  1812. rbm.rgd->rd_free_clone -= *nblocks;
  1813. trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
  1814. dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
  1815. *bn = block;
  1816. return 0;
  1817. rgrp_error:
  1818. gfs2_rgrp_error(rbm.rgd);
  1819. return -EIO;
  1820. }
  1821. /**
  1822. * __gfs2_free_blocks - free a contiguous run of block(s)
  1823. * @ip: the inode these blocks are being freed from
  1824. * @bstart: first block of a run of contiguous blocks
  1825. * @blen: the length of the block run
  1826. * @meta: 1 if the blocks represent metadata
  1827. *
  1828. */
  1829. void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
  1830. {
  1831. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1832. struct gfs2_rgrpd *rgd;
  1833. rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
  1834. if (!rgd)
  1835. return;
  1836. trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
  1837. rgd->rd_free += blen;
  1838. rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
  1839. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1840. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1841. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1842. /* Directories keep their data in the metadata address space */
  1843. if (meta || ip->i_depth)
  1844. gfs2_meta_wipe(ip, bstart, blen);
  1845. }
  1846. /**
  1847. * gfs2_free_meta - free a contiguous run of data block(s)
  1848. * @ip: the inode these blocks are being freed from
  1849. * @bstart: first block of a run of contiguous blocks
  1850. * @blen: the length of the block run
  1851. *
  1852. */
  1853. void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
  1854. {
  1855. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1856. __gfs2_free_blocks(ip, bstart, blen, 1);
  1857. gfs2_statfs_change(sdp, 0, +blen, 0);
  1858. gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1859. }
  1860. void gfs2_unlink_di(struct inode *inode)
  1861. {
  1862. struct gfs2_inode *ip = GFS2_I(inode);
  1863. struct gfs2_sbd *sdp = GFS2_SB(inode);
  1864. struct gfs2_rgrpd *rgd;
  1865. u64 blkno = ip->i_no_addr;
  1866. rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
  1867. if (!rgd)
  1868. return;
  1869. trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
  1870. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1871. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1872. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1873. update_rgrp_lvb_unlinked(rgd, 1);
  1874. }
  1875. static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
  1876. {
  1877. struct gfs2_sbd *sdp = rgd->rd_sbd;
  1878. struct gfs2_rgrpd *tmp_rgd;
  1879. tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
  1880. if (!tmp_rgd)
  1881. return;
  1882. gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
  1883. if (!rgd->rd_dinodes)
  1884. gfs2_consist_rgrpd(rgd);
  1885. rgd->rd_dinodes--;
  1886. rgd->rd_free++;
  1887. gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
  1888. gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
  1889. gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
  1890. update_rgrp_lvb_unlinked(rgd, -1);
  1891. gfs2_statfs_change(sdp, 0, +1, -1);
  1892. }
  1893. void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
  1894. {
  1895. gfs2_free_uninit_di(rgd, ip->i_no_addr);
  1896. trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
  1897. gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
  1898. gfs2_meta_wipe(ip, ip->i_no_addr, 1);
  1899. }
  1900. /**
  1901. * gfs2_check_blk_type - Check the type of a block
  1902. * @sdp: The superblock
  1903. * @no_addr: The block number to check
  1904. * @type: The block type we are looking for
  1905. *
  1906. * Returns: 0 if the block type matches the expected type
  1907. * -ESTALE if it doesn't match
  1908. * or -ve errno if something went wrong while checking
  1909. */
  1910. int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
  1911. {
  1912. struct gfs2_rgrpd *rgd;
  1913. struct gfs2_holder rgd_gh;
  1914. int error = -EINVAL;
  1915. rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
  1916. if (!rgd)
  1917. goto fail;
  1918. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
  1919. if (error)
  1920. goto fail;
  1921. if (gfs2_get_block_type(rgd, no_addr) != type)
  1922. error = -ESTALE;
  1923. gfs2_glock_dq_uninit(&rgd_gh);
  1924. fail:
  1925. return error;
  1926. }
  1927. /**
  1928. * gfs2_rlist_add - add a RG to a list of RGs
  1929. * @ip: the inode
  1930. * @rlist: the list of resource groups
  1931. * @block: the block
  1932. *
  1933. * Figure out what RG a block belongs to and add that RG to the list
  1934. *
  1935. * FIXME: Don't use NOFAIL
  1936. *
  1937. */
  1938. void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
  1939. u64 block)
  1940. {
  1941. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1942. struct gfs2_rgrpd *rgd;
  1943. struct gfs2_rgrpd **tmp;
  1944. unsigned int new_space;
  1945. unsigned int x;
  1946. if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
  1947. return;
  1948. if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
  1949. rgd = ip->i_rgd;
  1950. else
  1951. rgd = gfs2_blk2rgrpd(sdp, block, 1);
  1952. if (!rgd) {
  1953. fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
  1954. return;
  1955. }
  1956. ip->i_rgd = rgd;
  1957. for (x = 0; x < rlist->rl_rgrps; x++)
  1958. if (rlist->rl_rgd[x] == rgd)
  1959. return;
  1960. if (rlist->rl_rgrps == rlist->rl_space) {
  1961. new_space = rlist->rl_space + 10;
  1962. tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
  1963. GFP_NOFS | __GFP_NOFAIL);
  1964. if (rlist->rl_rgd) {
  1965. memcpy(tmp, rlist->rl_rgd,
  1966. rlist->rl_space * sizeof(struct gfs2_rgrpd *));
  1967. kfree(rlist->rl_rgd);
  1968. }
  1969. rlist->rl_space = new_space;
  1970. rlist->rl_rgd = tmp;
  1971. }
  1972. rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
  1973. }
  1974. /**
  1975. * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
  1976. * and initialize an array of glock holders for them
  1977. * @rlist: the list of resource groups
  1978. * @state: the lock state to acquire the RG lock in
  1979. *
  1980. * FIXME: Don't use NOFAIL
  1981. *
  1982. */
  1983. void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
  1984. {
  1985. unsigned int x;
  1986. rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
  1987. GFP_NOFS | __GFP_NOFAIL);
  1988. for (x = 0; x < rlist->rl_rgrps; x++)
  1989. gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
  1990. state, 0,
  1991. &rlist->rl_ghs[x]);
  1992. }
  1993. /**
  1994. * gfs2_rlist_free - free a resource group list
  1995. * @list: the list of resource groups
  1996. *
  1997. */
  1998. void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
  1999. {
  2000. unsigned int x;
  2001. kfree(rlist->rl_rgd);
  2002. if (rlist->rl_ghs) {
  2003. for (x = 0; x < rlist->rl_rgrps; x++)
  2004. gfs2_holder_uninit(&rlist->rl_ghs[x]);
  2005. kfree(rlist->rl_ghs);
  2006. rlist->rl_ghs = NULL;
  2007. }
  2008. }