balloc.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. /*
  2. * linux/fs/ext4/balloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
  10. * Big-endian to little-endian byte-swapping/bitmaps by
  11. * David S. Miller (davem@caip.rutgers.edu), 1995
  12. */
  13. #include <linux/time.h>
  14. #include <linux/capability.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/ext4_fs.h>
  18. #include <linux/ext4_jbd2.h>
  19. #include <linux/quotaops.h>
  20. #include <linux/buffer_head.h>
  21. #include "group.h"
  22. /*
  23. * balloc.c contains the blocks allocation and deallocation routines
  24. */
  25. /*
  26. * Calculate the block group number and offset, given a block number
  27. */
  28. void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
  29. ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
  30. {
  31. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  32. ext4_grpblk_t offset;
  33. blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
  34. offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
  35. if (offsetp)
  36. *offsetp = offset;
  37. if (blockgrpp)
  38. *blockgrpp = blocknr;
  39. }
  40. /* Initializes an uninitialized block bitmap if given, and returns the
  41. * number of blocks free in the group. */
  42. unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
  43. ext4_group_t block_group, struct ext4_group_desc *gdp)
  44. {
  45. unsigned long start;
  46. int bit, bit_max;
  47. unsigned free_blocks, group_blocks;
  48. struct ext4_sb_info *sbi = EXT4_SB(sb);
  49. if (bh) {
  50. J_ASSERT_BH(bh, buffer_locked(bh));
  51. /* If checksum is bad mark all blocks used to prevent allocation
  52. * essentially implementing a per-group read-only flag. */
  53. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  54. ext4_error(sb, __FUNCTION__,
  55. "Checksum bad for group %lu\n", block_group);
  56. gdp->bg_free_blocks_count = 0;
  57. gdp->bg_free_inodes_count = 0;
  58. gdp->bg_itable_unused = 0;
  59. memset(bh->b_data, 0xff, sb->s_blocksize);
  60. return 0;
  61. }
  62. memset(bh->b_data, 0, sb->s_blocksize);
  63. }
  64. /* Check for superblock and gdt backups in this group */
  65. bit_max = ext4_bg_has_super(sb, block_group);
  66. if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
  67. block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
  68. sbi->s_desc_per_block) {
  69. if (bit_max) {
  70. bit_max += ext4_bg_num_gdb(sb, block_group);
  71. bit_max +=
  72. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
  73. }
  74. } else { /* For META_BG_BLOCK_GROUPS */
  75. int group_rel = (block_group -
  76. le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
  77. EXT4_DESC_PER_BLOCK(sb);
  78. if (group_rel == 0 || group_rel == 1 ||
  79. (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
  80. bit_max += 1;
  81. }
  82. if (block_group == sbi->s_groups_count - 1) {
  83. /*
  84. * Even though mke2fs always initialize first and last group
  85. * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
  86. * to make sure we calculate the right free blocks
  87. */
  88. group_blocks = ext4_blocks_count(sbi->s_es) -
  89. le32_to_cpu(sbi->s_es->s_first_data_block) -
  90. (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
  91. } else {
  92. group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
  93. }
  94. free_blocks = group_blocks - bit_max;
  95. if (bh) {
  96. for (bit = 0; bit < bit_max; bit++)
  97. ext4_set_bit(bit, bh->b_data);
  98. start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
  99. le32_to_cpu(sbi->s_es->s_first_data_block);
  100. /* Set bits for block and inode bitmaps, and inode table */
  101. ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
  102. ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
  103. for (bit = (ext4_inode_table(sb, gdp) - start),
  104. bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
  105. ext4_set_bit(bit, bh->b_data);
  106. /*
  107. * Also if the number of blocks within the group is
  108. * less than the blocksize * 8 ( which is the size
  109. * of bitmap ), set rest of the block bitmap to 1
  110. */
  111. mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
  112. }
  113. return free_blocks - sbi->s_itb_per_group - 2;
  114. }
  115. /*
  116. * The free blocks are managed by bitmaps. A file system contains several
  117. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  118. * block for inodes, N blocks for the inode table and data blocks.
  119. *
  120. * The file system contains group descriptors which are located after the
  121. * super block. Each descriptor contains the number of the bitmap block and
  122. * the free blocks count in the block. The descriptors are loaded in memory
  123. * when a file system is mounted (see ext4_fill_super).
  124. */
  125. #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
  126. /**
  127. * ext4_get_group_desc() -- load group descriptor from disk
  128. * @sb: super block
  129. * @block_group: given block group
  130. * @bh: pointer to the buffer head to store the block
  131. * group descriptor
  132. */
  133. struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
  134. ext4_group_t block_group,
  135. struct buffer_head ** bh)
  136. {
  137. unsigned long group_desc;
  138. unsigned long offset;
  139. struct ext4_group_desc * desc;
  140. struct ext4_sb_info *sbi = EXT4_SB(sb);
  141. if (block_group >= sbi->s_groups_count) {
  142. ext4_error (sb, "ext4_get_group_desc",
  143. "block_group >= groups_count - "
  144. "block_group = %lu, groups_count = %lu",
  145. block_group, sbi->s_groups_count);
  146. return NULL;
  147. }
  148. smp_rmb();
  149. group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
  150. offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
  151. if (!sbi->s_group_desc[group_desc]) {
  152. ext4_error (sb, "ext4_get_group_desc",
  153. "Group descriptor not loaded - "
  154. "block_group = %lu, group_desc = %lu, desc = %lu",
  155. block_group, group_desc, offset);
  156. return NULL;
  157. }
  158. desc = (struct ext4_group_desc *)(
  159. (__u8 *)sbi->s_group_desc[group_desc]->b_data +
  160. offset * EXT4_DESC_SIZE(sb));
  161. if (bh)
  162. *bh = sbi->s_group_desc[group_desc];
  163. return desc;
  164. }
  165. static int ext4_valid_block_bitmap(struct super_block *sb,
  166. struct ext4_group_desc *desc,
  167. unsigned int block_group,
  168. struct buffer_head *bh)
  169. {
  170. ext4_grpblk_t offset;
  171. ext4_grpblk_t next_zero_bit;
  172. ext4_fsblk_t bitmap_blk;
  173. ext4_fsblk_t group_first_block;
  174. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
  175. /* with FLEX_BG, the inode/block bitmaps and itable
  176. * blocks may not be in the group at all
  177. * so the bitmap validation will be skipped for those groups
  178. * or it has to also read the block group where the bitmaps
  179. * are located to verify they are set.
  180. */
  181. return 1;
  182. }
  183. group_first_block = ext4_group_first_block_no(sb, block_group);
  184. /* check whether block bitmap block number is set */
  185. bitmap_blk = ext4_block_bitmap(sb, desc);
  186. offset = bitmap_blk - group_first_block;
  187. if (!ext4_test_bit(offset, bh->b_data))
  188. /* bad block bitmap */
  189. goto err_out;
  190. /* check whether the inode bitmap block number is set */
  191. bitmap_blk = ext4_inode_bitmap(sb, desc);
  192. offset = bitmap_blk - group_first_block;
  193. if (!ext4_test_bit(offset, bh->b_data))
  194. /* bad block bitmap */
  195. goto err_out;
  196. /* check whether the inode table block number is set */
  197. bitmap_blk = ext4_inode_table(sb, desc);
  198. offset = bitmap_blk - group_first_block;
  199. next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
  200. offset + EXT4_SB(sb)->s_itb_per_group,
  201. offset);
  202. if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
  203. /* good bitmap for inode tables */
  204. return 1;
  205. err_out:
  206. ext4_error(sb, __FUNCTION__,
  207. "Invalid block bitmap - "
  208. "block_group = %d, block = %llu",
  209. block_group, bitmap_blk);
  210. return 0;
  211. }
  212. /**
  213. * read_block_bitmap()
  214. * @sb: super block
  215. * @block_group: given block group
  216. *
  217. * Read the bitmap for a given block_group,and validate the
  218. * bits for block/inode/inode tables are set in the bitmaps
  219. *
  220. * Return buffer_head on success or NULL in case of failure.
  221. */
  222. struct buffer_head *
  223. read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
  224. {
  225. struct ext4_group_desc * desc;
  226. struct buffer_head * bh = NULL;
  227. ext4_fsblk_t bitmap_blk;
  228. desc = ext4_get_group_desc(sb, block_group, NULL);
  229. if (!desc)
  230. return NULL;
  231. bitmap_blk = ext4_block_bitmap(sb, desc);
  232. bh = sb_getblk(sb, bitmap_blk);
  233. if (unlikely(!bh)) {
  234. ext4_error(sb, __FUNCTION__,
  235. "Cannot read block bitmap - "
  236. "block_group = %d, block_bitmap = %llu",
  237. (int)block_group, (unsigned long long)bitmap_blk);
  238. return NULL;
  239. }
  240. if (bh_uptodate_or_lock(bh))
  241. return bh;
  242. if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  243. ext4_init_block_bitmap(sb, bh, block_group, desc);
  244. set_buffer_uptodate(bh);
  245. unlock_buffer(bh);
  246. return bh;
  247. }
  248. if (bh_submit_read(bh) < 0) {
  249. put_bh(bh);
  250. ext4_error(sb, __FUNCTION__,
  251. "Cannot read block bitmap - "
  252. "block_group = %d, block_bitmap = %llu",
  253. (int)block_group, (unsigned long long)bitmap_blk);
  254. return NULL;
  255. }
  256. if (!ext4_valid_block_bitmap(sb, desc, block_group, bh)) {
  257. put_bh(bh);
  258. return NULL;
  259. }
  260. return bh;
  261. }
  262. /*
  263. * The reservation window structure operations
  264. * --------------------------------------------
  265. * Operations include:
  266. * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
  267. *
  268. * We use a red-black tree to represent per-filesystem reservation
  269. * windows.
  270. *
  271. */
  272. /**
  273. * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
  274. * @rb_root: root of per-filesystem reservation rb tree
  275. * @verbose: verbose mode
  276. * @fn: function which wishes to dump the reservation map
  277. *
  278. * If verbose is turned on, it will print the whole block reservation
  279. * windows(start, end). Otherwise, it will only print out the "bad" windows,
  280. * those windows that overlap with their immediate neighbors.
  281. */
  282. #if 1
  283. static void __rsv_window_dump(struct rb_root *root, int verbose,
  284. const char *fn)
  285. {
  286. struct rb_node *n;
  287. struct ext4_reserve_window_node *rsv, *prev;
  288. int bad;
  289. restart:
  290. n = rb_first(root);
  291. bad = 0;
  292. prev = NULL;
  293. printk("Block Allocation Reservation Windows Map (%s):\n", fn);
  294. while (n) {
  295. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  296. if (verbose)
  297. printk("reservation window 0x%p "
  298. "start: %llu, end: %llu\n",
  299. rsv, rsv->rsv_start, rsv->rsv_end);
  300. if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
  301. printk("Bad reservation %p (start >= end)\n",
  302. rsv);
  303. bad = 1;
  304. }
  305. if (prev && prev->rsv_end >= rsv->rsv_start) {
  306. printk("Bad reservation %p (prev->end >= start)\n",
  307. rsv);
  308. bad = 1;
  309. }
  310. if (bad) {
  311. if (!verbose) {
  312. printk("Restarting reservation walk in verbose mode\n");
  313. verbose = 1;
  314. goto restart;
  315. }
  316. }
  317. n = rb_next(n);
  318. prev = rsv;
  319. }
  320. printk("Window map complete.\n");
  321. if (bad)
  322. BUG();
  323. }
  324. #define rsv_window_dump(root, verbose) \
  325. __rsv_window_dump((root), (verbose), __FUNCTION__)
  326. #else
  327. #define rsv_window_dump(root, verbose) do {} while (0)
  328. #endif
  329. /**
  330. * goal_in_my_reservation()
  331. * @rsv: inode's reservation window
  332. * @grp_goal: given goal block relative to the allocation block group
  333. * @group: the current allocation block group
  334. * @sb: filesystem super block
  335. *
  336. * Test if the given goal block (group relative) is within the file's
  337. * own block reservation window range.
  338. *
  339. * If the reservation window is outside the goal allocation group, return 0;
  340. * grp_goal (given goal block) could be -1, which means no specific
  341. * goal block. In this case, always return 1.
  342. * If the goal block is within the reservation window, return 1;
  343. * otherwise, return 0;
  344. */
  345. static int
  346. goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
  347. ext4_group_t group, struct super_block *sb)
  348. {
  349. ext4_fsblk_t group_first_block, group_last_block;
  350. group_first_block = ext4_group_first_block_no(sb, group);
  351. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  352. if ((rsv->_rsv_start > group_last_block) ||
  353. (rsv->_rsv_end < group_first_block))
  354. return 0;
  355. if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
  356. || (grp_goal + group_first_block > rsv->_rsv_end)))
  357. return 0;
  358. return 1;
  359. }
  360. /**
  361. * search_reserve_window()
  362. * @rb_root: root of reservation tree
  363. * @goal: target allocation block
  364. *
  365. * Find the reserved window which includes the goal, or the previous one
  366. * if the goal is not in any window.
  367. * Returns NULL if there are no windows or if all windows start after the goal.
  368. */
  369. static struct ext4_reserve_window_node *
  370. search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
  371. {
  372. struct rb_node *n = root->rb_node;
  373. struct ext4_reserve_window_node *rsv;
  374. if (!n)
  375. return NULL;
  376. do {
  377. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  378. if (goal < rsv->rsv_start)
  379. n = n->rb_left;
  380. else if (goal > rsv->rsv_end)
  381. n = n->rb_right;
  382. else
  383. return rsv;
  384. } while (n);
  385. /*
  386. * We've fallen off the end of the tree: the goal wasn't inside
  387. * any particular node. OK, the previous node must be to one
  388. * side of the interval containing the goal. If it's the RHS,
  389. * we need to back up one.
  390. */
  391. if (rsv->rsv_start > goal) {
  392. n = rb_prev(&rsv->rsv_node);
  393. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  394. }
  395. return rsv;
  396. }
  397. /**
  398. * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
  399. * @sb: super block
  400. * @rsv: reservation window to add
  401. *
  402. * Must be called with rsv_lock hold.
  403. */
  404. void ext4_rsv_window_add(struct super_block *sb,
  405. struct ext4_reserve_window_node *rsv)
  406. {
  407. struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
  408. struct rb_node *node = &rsv->rsv_node;
  409. ext4_fsblk_t start = rsv->rsv_start;
  410. struct rb_node ** p = &root->rb_node;
  411. struct rb_node * parent = NULL;
  412. struct ext4_reserve_window_node *this;
  413. while (*p)
  414. {
  415. parent = *p;
  416. this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
  417. if (start < this->rsv_start)
  418. p = &(*p)->rb_left;
  419. else if (start > this->rsv_end)
  420. p = &(*p)->rb_right;
  421. else {
  422. rsv_window_dump(root, 1);
  423. BUG();
  424. }
  425. }
  426. rb_link_node(node, parent, p);
  427. rb_insert_color(node, root);
  428. }
  429. /**
  430. * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
  431. * @sb: super block
  432. * @rsv: reservation window to remove
  433. *
  434. * Mark the block reservation window as not allocated, and unlink it
  435. * from the filesystem reservation window rb tree. Must be called with
  436. * rsv_lock hold.
  437. */
  438. static void rsv_window_remove(struct super_block *sb,
  439. struct ext4_reserve_window_node *rsv)
  440. {
  441. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  442. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  443. rsv->rsv_alloc_hit = 0;
  444. rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
  445. }
  446. /*
  447. * rsv_is_empty() -- Check if the reservation window is allocated.
  448. * @rsv: given reservation window to check
  449. *
  450. * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
  451. */
  452. static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
  453. {
  454. /* a valid reservation end block could not be 0 */
  455. return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  456. }
  457. /**
  458. * ext4_init_block_alloc_info()
  459. * @inode: file inode structure
  460. *
  461. * Allocate and initialize the reservation window structure, and
  462. * link the window to the ext4 inode structure at last
  463. *
  464. * The reservation window structure is only dynamically allocated
  465. * and linked to ext4 inode the first time the open file
  466. * needs a new block. So, before every ext4_new_block(s) call, for
  467. * regular files, we should check whether the reservation window
  468. * structure exists or not. In the latter case, this function is called.
  469. * Fail to do so will result in block reservation being turned off for that
  470. * open file.
  471. *
  472. * This function is called from ext4_get_blocks_handle(), also called
  473. * when setting the reservation window size through ioctl before the file
  474. * is open for write (needs block allocation).
  475. *
  476. * Needs down_write(i_data_sem) protection prior to call this function.
  477. */
  478. void ext4_init_block_alloc_info(struct inode *inode)
  479. {
  480. struct ext4_inode_info *ei = EXT4_I(inode);
  481. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  482. struct super_block *sb = inode->i_sb;
  483. block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
  484. if (block_i) {
  485. struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
  486. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  487. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  488. /*
  489. * if filesystem is mounted with NORESERVATION, the goal
  490. * reservation window size is set to zero to indicate
  491. * block reservation is off
  492. */
  493. if (!test_opt(sb, RESERVATION))
  494. rsv->rsv_goal_size = 0;
  495. else
  496. rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
  497. rsv->rsv_alloc_hit = 0;
  498. block_i->last_alloc_logical_block = 0;
  499. block_i->last_alloc_physical_block = 0;
  500. }
  501. ei->i_block_alloc_info = block_i;
  502. }
  503. /**
  504. * ext4_discard_reservation()
  505. * @inode: inode
  506. *
  507. * Discard(free) block reservation window on last file close, or truncate
  508. * or at last iput().
  509. *
  510. * It is being called in three cases:
  511. * ext4_release_file(): last writer close the file
  512. * ext4_clear_inode(): last iput(), when nobody link to this file.
  513. * ext4_truncate(): when the block indirect map is about to change.
  514. *
  515. */
  516. void ext4_discard_reservation(struct inode *inode)
  517. {
  518. struct ext4_inode_info *ei = EXT4_I(inode);
  519. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  520. struct ext4_reserve_window_node *rsv;
  521. spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
  522. if (!block_i)
  523. return;
  524. rsv = &block_i->rsv_window_node;
  525. if (!rsv_is_empty(&rsv->rsv_window)) {
  526. spin_lock(rsv_lock);
  527. if (!rsv_is_empty(&rsv->rsv_window))
  528. rsv_window_remove(inode->i_sb, rsv);
  529. spin_unlock(rsv_lock);
  530. }
  531. }
  532. /**
  533. * ext4_free_blocks_sb() -- Free given blocks and update quota
  534. * @handle: handle to this transaction
  535. * @sb: super block
  536. * @block: start physcial block to free
  537. * @count: number of blocks to free
  538. * @pdquot_freed_blocks: pointer to quota
  539. */
  540. void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
  541. ext4_fsblk_t block, unsigned long count,
  542. unsigned long *pdquot_freed_blocks)
  543. {
  544. struct buffer_head *bitmap_bh = NULL;
  545. struct buffer_head *gd_bh;
  546. ext4_group_t block_group;
  547. ext4_grpblk_t bit;
  548. unsigned long i;
  549. unsigned long overflow;
  550. struct ext4_group_desc * desc;
  551. struct ext4_super_block * es;
  552. struct ext4_sb_info *sbi;
  553. int err = 0, ret;
  554. ext4_grpblk_t group_freed;
  555. *pdquot_freed_blocks = 0;
  556. sbi = EXT4_SB(sb);
  557. es = sbi->s_es;
  558. if (block < le32_to_cpu(es->s_first_data_block) ||
  559. block + count < block ||
  560. block + count > ext4_blocks_count(es)) {
  561. ext4_error (sb, "ext4_free_blocks",
  562. "Freeing blocks not in datazone - "
  563. "block = %llu, count = %lu", block, count);
  564. goto error_return;
  565. }
  566. ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
  567. do_more:
  568. overflow = 0;
  569. ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
  570. /*
  571. * Check to see if we are freeing blocks across a group
  572. * boundary.
  573. */
  574. if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
  575. overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
  576. count -= overflow;
  577. }
  578. brelse(bitmap_bh);
  579. bitmap_bh = read_block_bitmap(sb, block_group);
  580. if (!bitmap_bh)
  581. goto error_return;
  582. desc = ext4_get_group_desc (sb, block_group, &gd_bh);
  583. if (!desc)
  584. goto error_return;
  585. if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
  586. in_range(ext4_inode_bitmap(sb, desc), block, count) ||
  587. in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
  588. in_range(block + count - 1, ext4_inode_table(sb, desc),
  589. sbi->s_itb_per_group)) {
  590. ext4_error (sb, "ext4_free_blocks",
  591. "Freeing blocks in system zones - "
  592. "Block = %llu, count = %lu",
  593. block, count);
  594. goto error_return;
  595. }
  596. /*
  597. * We are about to start releasing blocks in the bitmap,
  598. * so we need undo access.
  599. */
  600. /* @@@ check errors */
  601. BUFFER_TRACE(bitmap_bh, "getting undo access");
  602. err = ext4_journal_get_undo_access(handle, bitmap_bh);
  603. if (err)
  604. goto error_return;
  605. /*
  606. * We are about to modify some metadata. Call the journal APIs
  607. * to unshare ->b_data if a currently-committing transaction is
  608. * using it
  609. */
  610. BUFFER_TRACE(gd_bh, "get_write_access");
  611. err = ext4_journal_get_write_access(handle, gd_bh);
  612. if (err)
  613. goto error_return;
  614. jbd_lock_bh_state(bitmap_bh);
  615. for (i = 0, group_freed = 0; i < count; i++) {
  616. /*
  617. * An HJ special. This is expensive...
  618. */
  619. #ifdef CONFIG_JBD2_DEBUG
  620. jbd_unlock_bh_state(bitmap_bh);
  621. {
  622. struct buffer_head *debug_bh;
  623. debug_bh = sb_find_get_block(sb, block + i);
  624. if (debug_bh) {
  625. BUFFER_TRACE(debug_bh, "Deleted!");
  626. if (!bh2jh(bitmap_bh)->b_committed_data)
  627. BUFFER_TRACE(debug_bh,
  628. "No commited data in bitmap");
  629. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
  630. __brelse(debug_bh);
  631. }
  632. }
  633. jbd_lock_bh_state(bitmap_bh);
  634. #endif
  635. if (need_resched()) {
  636. jbd_unlock_bh_state(bitmap_bh);
  637. cond_resched();
  638. jbd_lock_bh_state(bitmap_bh);
  639. }
  640. /* @@@ This prevents newly-allocated data from being
  641. * freed and then reallocated within the same
  642. * transaction.
  643. *
  644. * Ideally we would want to allow that to happen, but to
  645. * do so requires making jbd2_journal_forget() capable of
  646. * revoking the queued write of a data block, which
  647. * implies blocking on the journal lock. *forget()
  648. * cannot block due to truncate races.
  649. *
  650. * Eventually we can fix this by making jbd2_journal_forget()
  651. * return a status indicating whether or not it was able
  652. * to revoke the buffer. On successful revoke, it is
  653. * safe not to set the allocation bit in the committed
  654. * bitmap, because we know that there is no outstanding
  655. * activity on the buffer any more and so it is safe to
  656. * reallocate it.
  657. */
  658. BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
  659. J_ASSERT_BH(bitmap_bh,
  660. bh2jh(bitmap_bh)->b_committed_data != NULL);
  661. ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
  662. bh2jh(bitmap_bh)->b_committed_data);
  663. /*
  664. * We clear the bit in the bitmap after setting the committed
  665. * data bit, because this is the reverse order to that which
  666. * the allocator uses.
  667. */
  668. BUFFER_TRACE(bitmap_bh, "clear bit");
  669. if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
  670. bit + i, bitmap_bh->b_data)) {
  671. jbd_unlock_bh_state(bitmap_bh);
  672. ext4_error(sb, __FUNCTION__,
  673. "bit already cleared for block %llu",
  674. (ext4_fsblk_t)(block + i));
  675. jbd_lock_bh_state(bitmap_bh);
  676. BUFFER_TRACE(bitmap_bh, "bit already cleared");
  677. } else {
  678. group_freed++;
  679. }
  680. }
  681. jbd_unlock_bh_state(bitmap_bh);
  682. spin_lock(sb_bgl_lock(sbi, block_group));
  683. desc->bg_free_blocks_count =
  684. cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
  685. group_freed);
  686. desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
  687. spin_unlock(sb_bgl_lock(sbi, block_group));
  688. percpu_counter_add(&sbi->s_freeblocks_counter, count);
  689. /* We dirtied the bitmap block */
  690. BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
  691. err = ext4_journal_dirty_metadata(handle, bitmap_bh);
  692. /* And the group descriptor block */
  693. BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
  694. ret = ext4_journal_dirty_metadata(handle, gd_bh);
  695. if (!err) err = ret;
  696. *pdquot_freed_blocks += group_freed;
  697. if (overflow && !err) {
  698. block += count;
  699. count = overflow;
  700. goto do_more;
  701. }
  702. sb->s_dirt = 1;
  703. error_return:
  704. brelse(bitmap_bh);
  705. ext4_std_error(sb, err);
  706. return;
  707. }
  708. /**
  709. * ext4_free_blocks() -- Free given blocks and update quota
  710. * @handle: handle for this transaction
  711. * @inode: inode
  712. * @block: start physical block to free
  713. * @count: number of blocks to count
  714. */
  715. void ext4_free_blocks(handle_t *handle, struct inode *inode,
  716. ext4_fsblk_t block, unsigned long count)
  717. {
  718. struct super_block * sb;
  719. unsigned long dquot_freed_blocks;
  720. sb = inode->i_sb;
  721. if (!sb) {
  722. printk ("ext4_free_blocks: nonexistent device");
  723. return;
  724. }
  725. ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
  726. if (dquot_freed_blocks)
  727. DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
  728. return;
  729. }
  730. /**
  731. * ext4_test_allocatable()
  732. * @nr: given allocation block group
  733. * @bh: bufferhead contains the bitmap of the given block group
  734. *
  735. * For ext4 allocations, we must not reuse any blocks which are
  736. * allocated in the bitmap buffer's "last committed data" copy. This
  737. * prevents deletes from freeing up the page for reuse until we have
  738. * committed the delete transaction.
  739. *
  740. * If we didn't do this, then deleting something and reallocating it as
  741. * data would allow the old block to be overwritten before the
  742. * transaction committed (because we force data to disk before commit).
  743. * This would lead to corruption if we crashed between overwriting the
  744. * data and committing the delete.
  745. *
  746. * @@@ We may want to make this allocation behaviour conditional on
  747. * data-writes at some point, and disable it for metadata allocations or
  748. * sync-data inodes.
  749. */
  750. static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
  751. {
  752. int ret;
  753. struct journal_head *jh = bh2jh(bh);
  754. if (ext4_test_bit(nr, bh->b_data))
  755. return 0;
  756. jbd_lock_bh_state(bh);
  757. if (!jh->b_committed_data)
  758. ret = 1;
  759. else
  760. ret = !ext4_test_bit(nr, jh->b_committed_data);
  761. jbd_unlock_bh_state(bh);
  762. return ret;
  763. }
  764. /**
  765. * bitmap_search_next_usable_block()
  766. * @start: the starting block (group relative) of the search
  767. * @bh: bufferhead contains the block group bitmap
  768. * @maxblocks: the ending block (group relative) of the reservation
  769. *
  770. * The bitmap search --- search forward alternately through the actual
  771. * bitmap on disk and the last-committed copy in journal, until we find a
  772. * bit free in both bitmaps.
  773. */
  774. static ext4_grpblk_t
  775. bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  776. ext4_grpblk_t maxblocks)
  777. {
  778. ext4_grpblk_t next;
  779. struct journal_head *jh = bh2jh(bh);
  780. while (start < maxblocks) {
  781. next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
  782. if (next >= maxblocks)
  783. return -1;
  784. if (ext4_test_allocatable(next, bh))
  785. return next;
  786. jbd_lock_bh_state(bh);
  787. if (jh->b_committed_data)
  788. start = ext4_find_next_zero_bit(jh->b_committed_data,
  789. maxblocks, next);
  790. jbd_unlock_bh_state(bh);
  791. }
  792. return -1;
  793. }
  794. /**
  795. * find_next_usable_block()
  796. * @start: the starting block (group relative) to find next
  797. * allocatable block in bitmap.
  798. * @bh: bufferhead contains the block group bitmap
  799. * @maxblocks: the ending block (group relative) for the search
  800. *
  801. * Find an allocatable block in a bitmap. We honor both the bitmap and
  802. * its last-committed copy (if that exists), and perform the "most
  803. * appropriate allocation" algorithm of looking for a free block near
  804. * the initial goal; then for a free byte somewhere in the bitmap; then
  805. * for any free bit in the bitmap.
  806. */
  807. static ext4_grpblk_t
  808. find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  809. ext4_grpblk_t maxblocks)
  810. {
  811. ext4_grpblk_t here, next;
  812. char *p, *r;
  813. if (start > 0) {
  814. /*
  815. * The goal was occupied; search forward for a free
  816. * block within the next XX blocks.
  817. *
  818. * end_goal is more or less random, but it has to be
  819. * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
  820. * next 64-bit boundary is simple..
  821. */
  822. ext4_grpblk_t end_goal = (start + 63) & ~63;
  823. if (end_goal > maxblocks)
  824. end_goal = maxblocks;
  825. here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
  826. if (here < end_goal && ext4_test_allocatable(here, bh))
  827. return here;
  828. ext4_debug("Bit not found near goal\n");
  829. }
  830. here = start;
  831. if (here < 0)
  832. here = 0;
  833. p = ((char *)bh->b_data) + (here >> 3);
  834. r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
  835. next = (r - ((char *)bh->b_data)) << 3;
  836. if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
  837. return next;
  838. /*
  839. * The bitmap search --- search forward alternately through the actual
  840. * bitmap and the last-committed copy until we find a bit free in
  841. * both
  842. */
  843. here = bitmap_search_next_usable_block(here, bh, maxblocks);
  844. return here;
  845. }
  846. /**
  847. * claim_block()
  848. * @block: the free block (group relative) to allocate
  849. * @bh: the bufferhead containts the block group bitmap
  850. *
  851. * We think we can allocate this block in this bitmap. Try to set the bit.
  852. * If that succeeds then check that nobody has allocated and then freed the
  853. * block since we saw that is was not marked in b_committed_data. If it _was_
  854. * allocated and freed then clear the bit in the bitmap again and return
  855. * zero (failure).
  856. */
  857. static inline int
  858. claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
  859. {
  860. struct journal_head *jh = bh2jh(bh);
  861. int ret;
  862. if (ext4_set_bit_atomic(lock, block, bh->b_data))
  863. return 0;
  864. jbd_lock_bh_state(bh);
  865. if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
  866. ext4_clear_bit_atomic(lock, block, bh->b_data);
  867. ret = 0;
  868. } else {
  869. ret = 1;
  870. }
  871. jbd_unlock_bh_state(bh);
  872. return ret;
  873. }
  874. /**
  875. * ext4_try_to_allocate()
  876. * @sb: superblock
  877. * @handle: handle to this transaction
  878. * @group: given allocation block group
  879. * @bitmap_bh: bufferhead holds the block bitmap
  880. * @grp_goal: given target block within the group
  881. * @count: target number of blocks to allocate
  882. * @my_rsv: reservation window
  883. *
  884. * Attempt to allocate blocks within a give range. Set the range of allocation
  885. * first, then find the first free bit(s) from the bitmap (within the range),
  886. * and at last, allocate the blocks by claiming the found free bit as allocated.
  887. *
  888. * To set the range of this allocation:
  889. * if there is a reservation window, only try to allocate block(s) from the
  890. * file's own reservation window;
  891. * Otherwise, the allocation range starts from the give goal block, ends at
  892. * the block group's last block.
  893. *
  894. * If we failed to allocate the desired block then we may end up crossing to a
  895. * new bitmap. In that case we must release write access to the old one via
  896. * ext4_journal_release_buffer(), else we'll run out of credits.
  897. */
  898. static ext4_grpblk_t
  899. ext4_try_to_allocate(struct super_block *sb, handle_t *handle,
  900. ext4_group_t group, struct buffer_head *bitmap_bh,
  901. ext4_grpblk_t grp_goal, unsigned long *count,
  902. struct ext4_reserve_window *my_rsv)
  903. {
  904. ext4_fsblk_t group_first_block;
  905. ext4_grpblk_t start, end;
  906. unsigned long num = 0;
  907. /* we do allocation within the reservation window if we have a window */
  908. if (my_rsv) {
  909. group_first_block = ext4_group_first_block_no(sb, group);
  910. if (my_rsv->_rsv_start >= group_first_block)
  911. start = my_rsv->_rsv_start - group_first_block;
  912. else
  913. /* reservation window cross group boundary */
  914. start = 0;
  915. end = my_rsv->_rsv_end - group_first_block + 1;
  916. if (end > EXT4_BLOCKS_PER_GROUP(sb))
  917. /* reservation window crosses group boundary */
  918. end = EXT4_BLOCKS_PER_GROUP(sb);
  919. if ((start <= grp_goal) && (grp_goal < end))
  920. start = grp_goal;
  921. else
  922. grp_goal = -1;
  923. } else {
  924. if (grp_goal > 0)
  925. start = grp_goal;
  926. else
  927. start = 0;
  928. end = EXT4_BLOCKS_PER_GROUP(sb);
  929. }
  930. BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
  931. repeat:
  932. if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
  933. grp_goal = find_next_usable_block(start, bitmap_bh, end);
  934. if (grp_goal < 0)
  935. goto fail_access;
  936. if (!my_rsv) {
  937. int i;
  938. for (i = 0; i < 7 && grp_goal > start &&
  939. ext4_test_allocatable(grp_goal - 1,
  940. bitmap_bh);
  941. i++, grp_goal--)
  942. ;
  943. }
  944. }
  945. start = grp_goal;
  946. if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  947. grp_goal, bitmap_bh)) {
  948. /*
  949. * The block was allocated by another thread, or it was
  950. * allocated and then freed by another thread
  951. */
  952. start++;
  953. grp_goal++;
  954. if (start >= end)
  955. goto fail_access;
  956. goto repeat;
  957. }
  958. num++;
  959. grp_goal++;
  960. while (num < *count && grp_goal < end
  961. && ext4_test_allocatable(grp_goal, bitmap_bh)
  962. && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  963. grp_goal, bitmap_bh)) {
  964. num++;
  965. grp_goal++;
  966. }
  967. *count = num;
  968. return grp_goal - num;
  969. fail_access:
  970. *count = num;
  971. return -1;
  972. }
  973. /**
  974. * find_next_reservable_window():
  975. * find a reservable space within the given range.
  976. * It does not allocate the reservation window for now:
  977. * alloc_new_reservation() will do the work later.
  978. *
  979. * @search_head: the head of the searching list;
  980. * This is not necessarily the list head of the whole filesystem
  981. *
  982. * We have both head and start_block to assist the search
  983. * for the reservable space. The list starts from head,
  984. * but we will shift to the place where start_block is,
  985. * then start from there, when looking for a reservable space.
  986. *
  987. * @size: the target new reservation window size
  988. *
  989. * @group_first_block: the first block we consider to start
  990. * the real search from
  991. *
  992. * @last_block:
  993. * the maximum block number that our goal reservable space
  994. * could start from. This is normally the last block in this
  995. * group. The search will end when we found the start of next
  996. * possible reservable space is out of this boundary.
  997. * This could handle the cross boundary reservation window
  998. * request.
  999. *
  1000. * basically we search from the given range, rather than the whole
  1001. * reservation double linked list, (start_block, last_block)
  1002. * to find a free region that is of my size and has not
  1003. * been reserved.
  1004. *
  1005. */
  1006. static int find_next_reservable_window(
  1007. struct ext4_reserve_window_node *search_head,
  1008. struct ext4_reserve_window_node *my_rsv,
  1009. struct super_block * sb,
  1010. ext4_fsblk_t start_block,
  1011. ext4_fsblk_t last_block)
  1012. {
  1013. struct rb_node *next;
  1014. struct ext4_reserve_window_node *rsv, *prev;
  1015. ext4_fsblk_t cur;
  1016. int size = my_rsv->rsv_goal_size;
  1017. /* TODO: make the start of the reservation window byte-aligned */
  1018. /* cur = *start_block & ~7;*/
  1019. cur = start_block;
  1020. rsv = search_head;
  1021. if (!rsv)
  1022. return -1;
  1023. while (1) {
  1024. if (cur <= rsv->rsv_end)
  1025. cur = rsv->rsv_end + 1;
  1026. /* TODO?
  1027. * in the case we could not find a reservable space
  1028. * that is what is expected, during the re-search, we could
  1029. * remember what's the largest reservable space we could have
  1030. * and return that one.
  1031. *
  1032. * For now it will fail if we could not find the reservable
  1033. * space with expected-size (or more)...
  1034. */
  1035. if (cur > last_block)
  1036. return -1; /* fail */
  1037. prev = rsv;
  1038. next = rb_next(&rsv->rsv_node);
  1039. rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
  1040. /*
  1041. * Reached the last reservation, we can just append to the
  1042. * previous one.
  1043. */
  1044. if (!next)
  1045. break;
  1046. if (cur + size <= rsv->rsv_start) {
  1047. /*
  1048. * Found a reserveable space big enough. We could
  1049. * have a reservation across the group boundary here
  1050. */
  1051. break;
  1052. }
  1053. }
  1054. /*
  1055. * we come here either :
  1056. * when we reach the end of the whole list,
  1057. * and there is empty reservable space after last entry in the list.
  1058. * append it to the end of the list.
  1059. *
  1060. * or we found one reservable space in the middle of the list,
  1061. * return the reservation window that we could append to.
  1062. * succeed.
  1063. */
  1064. if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
  1065. rsv_window_remove(sb, my_rsv);
  1066. /*
  1067. * Let's book the whole avaliable window for now. We will check the
  1068. * disk bitmap later and then, if there are free blocks then we adjust
  1069. * the window size if it's larger than requested.
  1070. * Otherwise, we will remove this node from the tree next time
  1071. * call find_next_reservable_window.
  1072. */
  1073. my_rsv->rsv_start = cur;
  1074. my_rsv->rsv_end = cur + size - 1;
  1075. my_rsv->rsv_alloc_hit = 0;
  1076. if (prev != my_rsv)
  1077. ext4_rsv_window_add(sb, my_rsv);
  1078. return 0;
  1079. }
  1080. /**
  1081. * alloc_new_reservation()--allocate a new reservation window
  1082. *
  1083. * To make a new reservation, we search part of the filesystem
  1084. * reservation list (the list that inside the group). We try to
  1085. * allocate a new reservation window near the allocation goal,
  1086. * or the beginning of the group, if there is no goal.
  1087. *
  1088. * We first find a reservable space after the goal, then from
  1089. * there, we check the bitmap for the first free block after
  1090. * it. If there is no free block until the end of group, then the
  1091. * whole group is full, we failed. Otherwise, check if the free
  1092. * block is inside the expected reservable space, if so, we
  1093. * succeed.
  1094. * If the first free block is outside the reservable space, then
  1095. * start from the first free block, we search for next available
  1096. * space, and go on.
  1097. *
  1098. * on succeed, a new reservation will be found and inserted into the list
  1099. * It contains at least one free block, and it does not overlap with other
  1100. * reservation windows.
  1101. *
  1102. * failed: we failed to find a reservation window in this group
  1103. *
  1104. * @rsv: the reservation
  1105. *
  1106. * @grp_goal: The goal (group-relative). It is where the search for a
  1107. * free reservable space should start from.
  1108. * if we have a grp_goal(grp_goal >0 ), then start from there,
  1109. * no grp_goal(grp_goal = -1), we start from the first block
  1110. * of the group.
  1111. *
  1112. * @sb: the super block
  1113. * @group: the group we are trying to allocate in
  1114. * @bitmap_bh: the block group block bitmap
  1115. *
  1116. */
  1117. static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
  1118. ext4_grpblk_t grp_goal, struct super_block *sb,
  1119. ext4_group_t group, struct buffer_head *bitmap_bh)
  1120. {
  1121. struct ext4_reserve_window_node *search_head;
  1122. ext4_fsblk_t group_first_block, group_end_block, start_block;
  1123. ext4_grpblk_t first_free_block;
  1124. struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
  1125. unsigned long size;
  1126. int ret;
  1127. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1128. group_first_block = ext4_group_first_block_no(sb, group);
  1129. group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1130. if (grp_goal < 0)
  1131. start_block = group_first_block;
  1132. else
  1133. start_block = grp_goal + group_first_block;
  1134. size = my_rsv->rsv_goal_size;
  1135. if (!rsv_is_empty(&my_rsv->rsv_window)) {
  1136. /*
  1137. * if the old reservation is cross group boundary
  1138. * and if the goal is inside the old reservation window,
  1139. * we will come here when we just failed to allocate from
  1140. * the first part of the window. We still have another part
  1141. * that belongs to the next group. In this case, there is no
  1142. * point to discard our window and try to allocate a new one
  1143. * in this group(which will fail). we should
  1144. * keep the reservation window, just simply move on.
  1145. *
  1146. * Maybe we could shift the start block of the reservation
  1147. * window to the first block of next group.
  1148. */
  1149. if ((my_rsv->rsv_start <= group_end_block) &&
  1150. (my_rsv->rsv_end > group_end_block) &&
  1151. (start_block >= my_rsv->rsv_start))
  1152. return -1;
  1153. if ((my_rsv->rsv_alloc_hit >
  1154. (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
  1155. /*
  1156. * if the previously allocation hit ratio is
  1157. * greater than 1/2, then we double the size of
  1158. * the reservation window the next time,
  1159. * otherwise we keep the same size window
  1160. */
  1161. size = size * 2;
  1162. if (size > EXT4_MAX_RESERVE_BLOCKS)
  1163. size = EXT4_MAX_RESERVE_BLOCKS;
  1164. my_rsv->rsv_goal_size= size;
  1165. }
  1166. }
  1167. spin_lock(rsv_lock);
  1168. /*
  1169. * shift the search start to the window near the goal block
  1170. */
  1171. search_head = search_reserve_window(fs_rsv_root, start_block);
  1172. /*
  1173. * find_next_reservable_window() simply finds a reservable window
  1174. * inside the given range(start_block, group_end_block).
  1175. *
  1176. * To make sure the reservation window has a free bit inside it, we
  1177. * need to check the bitmap after we found a reservable window.
  1178. */
  1179. retry:
  1180. ret = find_next_reservable_window(search_head, my_rsv, sb,
  1181. start_block, group_end_block);
  1182. if (ret == -1) {
  1183. if (!rsv_is_empty(&my_rsv->rsv_window))
  1184. rsv_window_remove(sb, my_rsv);
  1185. spin_unlock(rsv_lock);
  1186. return -1;
  1187. }
  1188. /*
  1189. * On success, find_next_reservable_window() returns the
  1190. * reservation window where there is a reservable space after it.
  1191. * Before we reserve this reservable space, we need
  1192. * to make sure there is at least a free block inside this region.
  1193. *
  1194. * searching the first free bit on the block bitmap and copy of
  1195. * last committed bitmap alternatively, until we found a allocatable
  1196. * block. Search start from the start block of the reservable space
  1197. * we just found.
  1198. */
  1199. spin_unlock(rsv_lock);
  1200. first_free_block = bitmap_search_next_usable_block(
  1201. my_rsv->rsv_start - group_first_block,
  1202. bitmap_bh, group_end_block - group_first_block + 1);
  1203. if (first_free_block < 0) {
  1204. /*
  1205. * no free block left on the bitmap, no point
  1206. * to reserve the space. return failed.
  1207. */
  1208. spin_lock(rsv_lock);
  1209. if (!rsv_is_empty(&my_rsv->rsv_window))
  1210. rsv_window_remove(sb, my_rsv);
  1211. spin_unlock(rsv_lock);
  1212. return -1; /* failed */
  1213. }
  1214. start_block = first_free_block + group_first_block;
  1215. /*
  1216. * check if the first free block is within the
  1217. * free space we just reserved
  1218. */
  1219. if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
  1220. return 0; /* success */
  1221. /*
  1222. * if the first free bit we found is out of the reservable space
  1223. * continue search for next reservable space,
  1224. * start from where the free block is,
  1225. * we also shift the list head to where we stopped last time
  1226. */
  1227. search_head = my_rsv;
  1228. spin_lock(rsv_lock);
  1229. goto retry;
  1230. }
  1231. /**
  1232. * try_to_extend_reservation()
  1233. * @my_rsv: given reservation window
  1234. * @sb: super block
  1235. * @size: the delta to extend
  1236. *
  1237. * Attempt to expand the reservation window large enough to have
  1238. * required number of free blocks
  1239. *
  1240. * Since ext4_try_to_allocate() will always allocate blocks within
  1241. * the reservation window range, if the window size is too small,
  1242. * multiple blocks allocation has to stop at the end of the reservation
  1243. * window. To make this more efficient, given the total number of
  1244. * blocks needed and the current size of the window, we try to
  1245. * expand the reservation window size if necessary on a best-effort
  1246. * basis before ext4_new_blocks() tries to allocate blocks,
  1247. */
  1248. static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
  1249. struct super_block *sb, int size)
  1250. {
  1251. struct ext4_reserve_window_node *next_rsv;
  1252. struct rb_node *next;
  1253. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1254. if (!spin_trylock(rsv_lock))
  1255. return;
  1256. next = rb_next(&my_rsv->rsv_node);
  1257. if (!next)
  1258. my_rsv->rsv_end += size;
  1259. else {
  1260. next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
  1261. if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
  1262. my_rsv->rsv_end += size;
  1263. else
  1264. my_rsv->rsv_end = next_rsv->rsv_start - 1;
  1265. }
  1266. spin_unlock(rsv_lock);
  1267. }
  1268. /**
  1269. * ext4_try_to_allocate_with_rsv()
  1270. * @sb: superblock
  1271. * @handle: handle to this transaction
  1272. * @group: given allocation block group
  1273. * @bitmap_bh: bufferhead holds the block bitmap
  1274. * @grp_goal: given target block within the group
  1275. * @count: target number of blocks to allocate
  1276. * @my_rsv: reservation window
  1277. * @errp: pointer to store the error code
  1278. *
  1279. * This is the main function used to allocate a new block and its reservation
  1280. * window.
  1281. *
  1282. * Each time when a new block allocation is need, first try to allocate from
  1283. * its own reservation. If it does not have a reservation window, instead of
  1284. * looking for a free bit on bitmap first, then look up the reservation list to
  1285. * see if it is inside somebody else's reservation window, we try to allocate a
  1286. * reservation window for it starting from the goal first. Then do the block
  1287. * allocation within the reservation window.
  1288. *
  1289. * This will avoid keeping on searching the reservation list again and
  1290. * again when somebody is looking for a free block (without
  1291. * reservation), and there are lots of free blocks, but they are all
  1292. * being reserved.
  1293. *
  1294. * We use a red-black tree for the per-filesystem reservation list.
  1295. *
  1296. */
  1297. static ext4_grpblk_t
  1298. ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
  1299. ext4_group_t group, struct buffer_head *bitmap_bh,
  1300. ext4_grpblk_t grp_goal,
  1301. struct ext4_reserve_window_node * my_rsv,
  1302. unsigned long *count, int *errp)
  1303. {
  1304. ext4_fsblk_t group_first_block, group_last_block;
  1305. ext4_grpblk_t ret = 0;
  1306. int fatal;
  1307. unsigned long num = *count;
  1308. *errp = 0;
  1309. /*
  1310. * Make sure we use undo access for the bitmap, because it is critical
  1311. * that we do the frozen_data COW on bitmap buffers in all cases even
  1312. * if the buffer is in BJ_Forget state in the committing transaction.
  1313. */
  1314. BUFFER_TRACE(bitmap_bh, "get undo access for new block");
  1315. fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
  1316. if (fatal) {
  1317. *errp = fatal;
  1318. return -1;
  1319. }
  1320. /*
  1321. * we don't deal with reservation when
  1322. * filesystem is mounted without reservation
  1323. * or the file is not a regular file
  1324. * or last attempt to allocate a block with reservation turned on failed
  1325. */
  1326. if (my_rsv == NULL ) {
  1327. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1328. grp_goal, count, NULL);
  1329. goto out;
  1330. }
  1331. /*
  1332. * grp_goal is a group relative block number (if there is a goal)
  1333. * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
  1334. * first block is a filesystem wide block number
  1335. * first block is the block number of the first block in this group
  1336. */
  1337. group_first_block = ext4_group_first_block_no(sb, group);
  1338. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1339. /*
  1340. * Basically we will allocate a new block from inode's reservation
  1341. * window.
  1342. *
  1343. * We need to allocate a new reservation window, if:
  1344. * a) inode does not have a reservation window; or
  1345. * b) last attempt to allocate a block from existing reservation
  1346. * failed; or
  1347. * c) we come here with a goal and with a reservation window
  1348. *
  1349. * We do not need to allocate a new reservation window if we come here
  1350. * at the beginning with a goal and the goal is inside the window, or
  1351. * we don't have a goal but already have a reservation window.
  1352. * then we could go to allocate from the reservation window directly.
  1353. */
  1354. while (1) {
  1355. if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
  1356. !goal_in_my_reservation(&my_rsv->rsv_window,
  1357. grp_goal, group, sb)) {
  1358. if (my_rsv->rsv_goal_size < *count)
  1359. my_rsv->rsv_goal_size = *count;
  1360. ret = alloc_new_reservation(my_rsv, grp_goal, sb,
  1361. group, bitmap_bh);
  1362. if (ret < 0)
  1363. break; /* failed */
  1364. if (!goal_in_my_reservation(&my_rsv->rsv_window,
  1365. grp_goal, group, sb))
  1366. grp_goal = -1;
  1367. } else if (grp_goal >= 0) {
  1368. int curr = my_rsv->rsv_end -
  1369. (grp_goal + group_first_block) + 1;
  1370. if (curr < *count)
  1371. try_to_extend_reservation(my_rsv, sb,
  1372. *count - curr);
  1373. }
  1374. if ((my_rsv->rsv_start > group_last_block) ||
  1375. (my_rsv->rsv_end < group_first_block)) {
  1376. rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
  1377. BUG();
  1378. }
  1379. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1380. grp_goal, &num, &my_rsv->rsv_window);
  1381. if (ret >= 0) {
  1382. my_rsv->rsv_alloc_hit += num;
  1383. *count = num;
  1384. break; /* succeed */
  1385. }
  1386. num = *count;
  1387. }
  1388. out:
  1389. if (ret >= 0) {
  1390. BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
  1391. "bitmap block");
  1392. fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
  1393. if (fatal) {
  1394. *errp = fatal;
  1395. return -1;
  1396. }
  1397. return ret;
  1398. }
  1399. BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
  1400. ext4_journal_release_buffer(handle, bitmap_bh);
  1401. return ret;
  1402. }
  1403. /**
  1404. * ext4_has_free_blocks()
  1405. * @sbi: in-core super block structure.
  1406. *
  1407. * Check if filesystem has at least 1 free block available for allocation.
  1408. */
  1409. static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
  1410. {
  1411. ext4_fsblk_t free_blocks, root_blocks;
  1412. free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  1413. root_blocks = ext4_r_blocks_count(sbi->s_es);
  1414. if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
  1415. sbi->s_resuid != current->fsuid &&
  1416. (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
  1417. return 0;
  1418. }
  1419. return 1;
  1420. }
  1421. /**
  1422. * ext4_should_retry_alloc()
  1423. * @sb: super block
  1424. * @retries number of attemps has been made
  1425. *
  1426. * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
  1427. * it is profitable to retry the operation, this function will wait
  1428. * for the current or commiting transaction to complete, and then
  1429. * return TRUE.
  1430. *
  1431. * if the total number of retries exceed three times, return FALSE.
  1432. */
  1433. int ext4_should_retry_alloc(struct super_block *sb, int *retries)
  1434. {
  1435. if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
  1436. return 0;
  1437. jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
  1438. return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
  1439. }
  1440. /**
  1441. * ext4_new_blocks() -- core block(s) allocation function
  1442. * @handle: handle to this transaction
  1443. * @inode: file inode
  1444. * @goal: given target block(filesystem wide)
  1445. * @count: target number of blocks to allocate
  1446. * @errp: error code
  1447. *
  1448. * ext4_new_blocks uses a goal block to assist allocation. It tries to
  1449. * allocate block(s) from the block group contains the goal block first. If that
  1450. * fails, it will try to allocate block(s) from other block groups without
  1451. * any specific goal block.
  1452. *
  1453. */
  1454. ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
  1455. ext4_fsblk_t goal, unsigned long *count, int *errp)
  1456. {
  1457. struct buffer_head *bitmap_bh = NULL;
  1458. struct buffer_head *gdp_bh;
  1459. ext4_group_t group_no;
  1460. ext4_group_t goal_group;
  1461. ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */
  1462. ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
  1463. ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */
  1464. ext4_group_t bgi; /* blockgroup iteration index */
  1465. int fatal = 0, err;
  1466. int performed_allocation = 0;
  1467. ext4_grpblk_t free_blocks; /* number of free blocks in a group */
  1468. struct super_block *sb;
  1469. struct ext4_group_desc *gdp;
  1470. struct ext4_super_block *es;
  1471. struct ext4_sb_info *sbi;
  1472. struct ext4_reserve_window_node *my_rsv = NULL;
  1473. struct ext4_block_alloc_info *block_i;
  1474. unsigned short windowsz = 0;
  1475. ext4_group_t ngroups;
  1476. unsigned long num = *count;
  1477. *errp = -ENOSPC;
  1478. sb = inode->i_sb;
  1479. if (!sb) {
  1480. printk("ext4_new_block: nonexistent device");
  1481. return 0;
  1482. }
  1483. /*
  1484. * Check quota for allocation of this block.
  1485. */
  1486. if (DQUOT_ALLOC_BLOCK(inode, num)) {
  1487. *errp = -EDQUOT;
  1488. return 0;
  1489. }
  1490. sbi = EXT4_SB(sb);
  1491. es = EXT4_SB(sb)->s_es;
  1492. ext4_debug("goal=%lu.\n", goal);
  1493. /*
  1494. * Allocate a block from reservation only when
  1495. * filesystem is mounted with reservation(default,-o reservation), and
  1496. * it's a regular file, and
  1497. * the desired window size is greater than 0 (One could use ioctl
  1498. * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
  1499. * reservation on that particular file)
  1500. */
  1501. block_i = EXT4_I(inode)->i_block_alloc_info;
  1502. if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
  1503. my_rsv = &block_i->rsv_window_node;
  1504. if (!ext4_has_free_blocks(sbi)) {
  1505. *errp = -ENOSPC;
  1506. goto out;
  1507. }
  1508. /*
  1509. * First, test whether the goal block is free.
  1510. */
  1511. if (goal < le32_to_cpu(es->s_first_data_block) ||
  1512. goal >= ext4_blocks_count(es))
  1513. goal = le32_to_cpu(es->s_first_data_block);
  1514. ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
  1515. goal_group = group_no;
  1516. retry_alloc:
  1517. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1518. if (!gdp)
  1519. goto io_error;
  1520. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1521. /*
  1522. * if there is not enough free blocks to make a new resevation
  1523. * turn off reservation for this allocation
  1524. */
  1525. if (my_rsv && (free_blocks < windowsz)
  1526. && (rsv_is_empty(&my_rsv->rsv_window)))
  1527. my_rsv = NULL;
  1528. if (free_blocks > 0) {
  1529. bitmap_bh = read_block_bitmap(sb, group_no);
  1530. if (!bitmap_bh)
  1531. goto io_error;
  1532. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1533. group_no, bitmap_bh, grp_target_blk,
  1534. my_rsv, &num, &fatal);
  1535. if (fatal)
  1536. goto out;
  1537. if (grp_alloc_blk >= 0)
  1538. goto allocated;
  1539. }
  1540. ngroups = EXT4_SB(sb)->s_groups_count;
  1541. smp_rmb();
  1542. /*
  1543. * Now search the rest of the groups. We assume that
  1544. * i and gdp correctly point to the last group visited.
  1545. */
  1546. for (bgi = 0; bgi < ngroups; bgi++) {
  1547. group_no++;
  1548. if (group_no >= ngroups)
  1549. group_no = 0;
  1550. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1551. if (!gdp)
  1552. goto io_error;
  1553. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1554. /*
  1555. * skip this group if the number of
  1556. * free blocks is less than half of the reservation
  1557. * window size.
  1558. */
  1559. if (free_blocks <= (windowsz/2))
  1560. continue;
  1561. brelse(bitmap_bh);
  1562. bitmap_bh = read_block_bitmap(sb, group_no);
  1563. if (!bitmap_bh)
  1564. goto io_error;
  1565. /*
  1566. * try to allocate block(s) from this group, without a goal(-1).
  1567. */
  1568. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1569. group_no, bitmap_bh, -1, my_rsv,
  1570. &num, &fatal);
  1571. if (fatal)
  1572. goto out;
  1573. if (grp_alloc_blk >= 0)
  1574. goto allocated;
  1575. }
  1576. /*
  1577. * We may end up a bogus ealier ENOSPC error due to
  1578. * filesystem is "full" of reservations, but
  1579. * there maybe indeed free blocks avaliable on disk
  1580. * In this case, we just forget about the reservations
  1581. * just do block allocation as without reservations.
  1582. */
  1583. if (my_rsv) {
  1584. my_rsv = NULL;
  1585. windowsz = 0;
  1586. group_no = goal_group;
  1587. goto retry_alloc;
  1588. }
  1589. /* No space left on the device */
  1590. *errp = -ENOSPC;
  1591. goto out;
  1592. allocated:
  1593. ext4_debug("using block group %d(%d)\n",
  1594. group_no, gdp->bg_free_blocks_count);
  1595. BUFFER_TRACE(gdp_bh, "get_write_access");
  1596. fatal = ext4_journal_get_write_access(handle, gdp_bh);
  1597. if (fatal)
  1598. goto out;
  1599. ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
  1600. if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
  1601. in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
  1602. in_range(ret_block, ext4_inode_table(sb, gdp),
  1603. EXT4_SB(sb)->s_itb_per_group) ||
  1604. in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
  1605. EXT4_SB(sb)->s_itb_per_group)) {
  1606. ext4_error(sb, "ext4_new_block",
  1607. "Allocating block in system zone - "
  1608. "blocks from %llu, length %lu",
  1609. ret_block, num);
  1610. goto out;
  1611. }
  1612. performed_allocation = 1;
  1613. #ifdef CONFIG_JBD2_DEBUG
  1614. {
  1615. struct buffer_head *debug_bh;
  1616. /* Record bitmap buffer state in the newly allocated block */
  1617. debug_bh = sb_find_get_block(sb, ret_block);
  1618. if (debug_bh) {
  1619. BUFFER_TRACE(debug_bh, "state when allocated");
  1620. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
  1621. brelse(debug_bh);
  1622. }
  1623. }
  1624. jbd_lock_bh_state(bitmap_bh);
  1625. spin_lock(sb_bgl_lock(sbi, group_no));
  1626. if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
  1627. int i;
  1628. for (i = 0; i < num; i++) {
  1629. if (ext4_test_bit(grp_alloc_blk+i,
  1630. bh2jh(bitmap_bh)->b_committed_data)) {
  1631. printk("%s: block was unexpectedly set in "
  1632. "b_committed_data\n", __FUNCTION__);
  1633. }
  1634. }
  1635. }
  1636. ext4_debug("found bit %d\n", grp_alloc_blk);
  1637. spin_unlock(sb_bgl_lock(sbi, group_no));
  1638. jbd_unlock_bh_state(bitmap_bh);
  1639. #endif
  1640. if (ret_block + num - 1 >= ext4_blocks_count(es)) {
  1641. ext4_error(sb, "ext4_new_block",
  1642. "block(%llu) >= blocks count(%llu) - "
  1643. "block_group = %lu, es == %p ", ret_block,
  1644. ext4_blocks_count(es), group_no, es);
  1645. goto out;
  1646. }
  1647. /*
  1648. * It is up to the caller to add the new buffer to a journal
  1649. * list of some description. We don't know in advance whether
  1650. * the caller wants to use it as metadata or data.
  1651. */
  1652. spin_lock(sb_bgl_lock(sbi, group_no));
  1653. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
  1654. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  1655. gdp->bg_free_blocks_count =
  1656. cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
  1657. gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
  1658. spin_unlock(sb_bgl_lock(sbi, group_no));
  1659. percpu_counter_sub(&sbi->s_freeblocks_counter, num);
  1660. BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
  1661. err = ext4_journal_dirty_metadata(handle, gdp_bh);
  1662. if (!fatal)
  1663. fatal = err;
  1664. sb->s_dirt = 1;
  1665. if (fatal)
  1666. goto out;
  1667. *errp = 0;
  1668. brelse(bitmap_bh);
  1669. DQUOT_FREE_BLOCK(inode, *count-num);
  1670. *count = num;
  1671. return ret_block;
  1672. io_error:
  1673. *errp = -EIO;
  1674. out:
  1675. if (fatal) {
  1676. *errp = fatal;
  1677. ext4_std_error(sb, fatal);
  1678. }
  1679. /*
  1680. * Undo the block allocation
  1681. */
  1682. if (!performed_allocation)
  1683. DQUOT_FREE_BLOCK(inode, *count);
  1684. brelse(bitmap_bh);
  1685. return 0;
  1686. }
  1687. ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
  1688. ext4_fsblk_t goal, int *errp)
  1689. {
  1690. unsigned long count = 1;
  1691. return ext4_new_blocks(handle, inode, goal, &count, errp);
  1692. }
  1693. /**
  1694. * ext4_count_free_blocks() -- count filesystem free blocks
  1695. * @sb: superblock
  1696. *
  1697. * Adds up the number of free blocks from each block group.
  1698. */
  1699. ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
  1700. {
  1701. ext4_fsblk_t desc_count;
  1702. struct ext4_group_desc *gdp;
  1703. ext4_group_t i;
  1704. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  1705. #ifdef EXT4FS_DEBUG
  1706. struct ext4_super_block *es;
  1707. ext4_fsblk_t bitmap_count;
  1708. unsigned long x;
  1709. struct buffer_head *bitmap_bh = NULL;
  1710. es = EXT4_SB(sb)->s_es;
  1711. desc_count = 0;
  1712. bitmap_count = 0;
  1713. gdp = NULL;
  1714. smp_rmb();
  1715. for (i = 0; i < ngroups; i++) {
  1716. gdp = ext4_get_group_desc(sb, i, NULL);
  1717. if (!gdp)
  1718. continue;
  1719. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1720. brelse(bitmap_bh);
  1721. bitmap_bh = read_block_bitmap(sb, i);
  1722. if (bitmap_bh == NULL)
  1723. continue;
  1724. x = ext4_count_free(bitmap_bh, sb->s_blocksize);
  1725. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  1726. i, le16_to_cpu(gdp->bg_free_blocks_count), x);
  1727. bitmap_count += x;
  1728. }
  1729. brelse(bitmap_bh);
  1730. printk("ext4_count_free_blocks: stored = %llu"
  1731. ", computed = %llu, %llu\n",
  1732. EXT4_FREE_BLOCKS_COUNT(es),
  1733. desc_count, bitmap_count);
  1734. return bitmap_count;
  1735. #else
  1736. desc_count = 0;
  1737. smp_rmb();
  1738. for (i = 0; i < ngroups; i++) {
  1739. gdp = ext4_get_group_desc(sb, i, NULL);
  1740. if (!gdp)
  1741. continue;
  1742. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1743. }
  1744. return desc_count;
  1745. #endif
  1746. }
  1747. static inline int test_root(ext4_group_t a, int b)
  1748. {
  1749. int num = b;
  1750. while (a > num)
  1751. num *= b;
  1752. return num == a;
  1753. }
  1754. static int ext4_group_sparse(ext4_group_t group)
  1755. {
  1756. if (group <= 1)
  1757. return 1;
  1758. if (!(group & 1))
  1759. return 0;
  1760. return (test_root(group, 7) || test_root(group, 5) ||
  1761. test_root(group, 3));
  1762. }
  1763. /**
  1764. * ext4_bg_has_super - number of blocks used by the superblock in group
  1765. * @sb: superblock for filesystem
  1766. * @group: group number to check
  1767. *
  1768. * Return the number of blocks used by the superblock (primary or backup)
  1769. * in this group. Currently this will be only 0 or 1.
  1770. */
  1771. int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
  1772. {
  1773. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  1774. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
  1775. !ext4_group_sparse(group))
  1776. return 0;
  1777. return 1;
  1778. }
  1779. static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
  1780. ext4_group_t group)
  1781. {
  1782. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1783. ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
  1784. ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
  1785. if (group == first || group == first + 1 || group == last)
  1786. return 1;
  1787. return 0;
  1788. }
  1789. static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
  1790. ext4_group_t group)
  1791. {
  1792. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  1793. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
  1794. !ext4_group_sparse(group))
  1795. return 0;
  1796. return EXT4_SB(sb)->s_gdb_count;
  1797. }
  1798. /**
  1799. * ext4_bg_num_gdb - number of blocks used by the group table in group
  1800. * @sb: superblock for filesystem
  1801. * @group: group number to check
  1802. *
  1803. * Return the number of blocks used by the group descriptor table
  1804. * (primary or backup) in this group. In the future there may be a
  1805. * different number of descriptor blocks in each group.
  1806. */
  1807. unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
  1808. {
  1809. unsigned long first_meta_bg =
  1810. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
  1811. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1812. if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
  1813. metagroup < first_meta_bg)
  1814. return ext4_bg_num_gdb_nometa(sb,group);
  1815. return ext4_bg_num_gdb_meta(sb,group);
  1816. }