balloc.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092
  1. /*
  2. * linux/fs/ext4/balloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
  10. * Big-endian to little-endian byte-swapping/bitmaps by
  11. * David S. Miller (davem@caip.rutgers.edu), 1995
  12. */
  13. #include <linux/time.h>
  14. #include <linux/capability.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/quotaops.h>
  18. #include <linux/buffer_head.h>
  19. #include "ext4.h"
  20. #include "ext4_jbd2.h"
  21. #include "group.h"
  22. /*
  23. * balloc.c contains the blocks allocation and deallocation routines
  24. */
  25. /*
  26. * Calculate the block group number and offset, given a block number
  27. */
  28. void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
  29. ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
  30. {
  31. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  32. ext4_grpblk_t offset;
  33. blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
  34. offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
  35. if (offsetp)
  36. *offsetp = offset;
  37. if (blockgrpp)
  38. *blockgrpp = blocknr;
  39. }
  40. static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
  41. ext4_group_t block_group)
  42. {
  43. ext4_group_t actual_group;
  44. ext4_get_group_no_and_offset(sb, block, &actual_group, 0);
  45. if (actual_group == block_group)
  46. return 1;
  47. return 0;
  48. }
  49. static int ext4_group_used_meta_blocks(struct super_block *sb,
  50. ext4_group_t block_group)
  51. {
  52. ext4_fsblk_t tmp;
  53. struct ext4_sb_info *sbi = EXT4_SB(sb);
  54. /* block bitmap, inode bitmap, and inode table blocks */
  55. int used_blocks = sbi->s_itb_per_group + 2;
  56. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
  57. struct ext4_group_desc *gdp;
  58. struct buffer_head *bh;
  59. gdp = ext4_get_group_desc(sb, block_group, &bh);
  60. if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
  61. block_group))
  62. used_blocks--;
  63. if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
  64. block_group))
  65. used_blocks--;
  66. tmp = ext4_inode_table(sb, gdp);
  67. for (; tmp < ext4_inode_table(sb, gdp) +
  68. sbi->s_itb_per_group; tmp++) {
  69. if (!ext4_block_in_group(sb, tmp, block_group))
  70. used_blocks -= 1;
  71. }
  72. }
  73. return used_blocks;
  74. }
  75. /* Initializes an uninitialized block bitmap if given, and returns the
  76. * number of blocks free in the group. */
  77. unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
  78. ext4_group_t block_group, struct ext4_group_desc *gdp)
  79. {
  80. int bit, bit_max;
  81. unsigned free_blocks, group_blocks;
  82. struct ext4_sb_info *sbi = EXT4_SB(sb);
  83. if (bh) {
  84. J_ASSERT_BH(bh, buffer_locked(bh));
  85. /* If checksum is bad mark all blocks used to prevent allocation
  86. * essentially implementing a per-group read-only flag. */
  87. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  88. ext4_error(sb, __func__,
  89. "Checksum bad for group %lu\n", block_group);
  90. gdp->bg_free_blocks_count = 0;
  91. gdp->bg_free_inodes_count = 0;
  92. gdp->bg_itable_unused = 0;
  93. memset(bh->b_data, 0xff, sb->s_blocksize);
  94. return 0;
  95. }
  96. memset(bh->b_data, 0, sb->s_blocksize);
  97. }
  98. /* Check for superblock and gdt backups in this group */
  99. bit_max = ext4_bg_has_super(sb, block_group);
  100. if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
  101. block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
  102. sbi->s_desc_per_block) {
  103. if (bit_max) {
  104. bit_max += ext4_bg_num_gdb(sb, block_group);
  105. bit_max +=
  106. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
  107. }
  108. } else { /* For META_BG_BLOCK_GROUPS */
  109. int group_rel = (block_group -
  110. le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
  111. EXT4_DESC_PER_BLOCK(sb);
  112. if (group_rel == 0 || group_rel == 1 ||
  113. (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
  114. bit_max += 1;
  115. }
  116. if (block_group == sbi->s_groups_count - 1) {
  117. /*
  118. * Even though mke2fs always initialize first and last group
  119. * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
  120. * to make sure we calculate the right free blocks
  121. */
  122. group_blocks = ext4_blocks_count(sbi->s_es) -
  123. le32_to_cpu(sbi->s_es->s_first_data_block) -
  124. (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
  125. } else {
  126. group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
  127. }
  128. free_blocks = group_blocks - bit_max;
  129. if (bh) {
  130. ext4_fsblk_t start, tmp;
  131. int flex_bg = 0;
  132. for (bit = 0; bit < bit_max; bit++)
  133. ext4_set_bit(bit, bh->b_data);
  134. start = ext4_group_first_block_no(sb, block_group);
  135. if (EXT4_HAS_INCOMPAT_FEATURE(sb,
  136. EXT4_FEATURE_INCOMPAT_FLEX_BG))
  137. flex_bg = 1;
  138. /* Set bits for block and inode bitmaps, and inode table */
  139. tmp = ext4_block_bitmap(sb, gdp);
  140. if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
  141. ext4_set_bit(tmp - start, bh->b_data);
  142. tmp = ext4_inode_bitmap(sb, gdp);
  143. if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
  144. ext4_set_bit(tmp - start, bh->b_data);
  145. tmp = ext4_inode_table(sb, gdp);
  146. for (; tmp < ext4_inode_table(sb, gdp) +
  147. sbi->s_itb_per_group; tmp++) {
  148. if (!flex_bg ||
  149. ext4_block_in_group(sb, tmp, block_group))
  150. ext4_set_bit(tmp - start, bh->b_data);
  151. }
  152. /*
  153. * Also if the number of blocks within the group is
  154. * less than the blocksize * 8 ( which is the size
  155. * of bitmap ), set rest of the block bitmap to 1
  156. */
  157. mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
  158. }
  159. return free_blocks - ext4_group_used_meta_blocks(sb, block_group);
  160. }
  161. /*
  162. * The free blocks are managed by bitmaps. A file system contains several
  163. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  164. * block for inodes, N blocks for the inode table and data blocks.
  165. *
  166. * The file system contains group descriptors which are located after the
  167. * super block. Each descriptor contains the number of the bitmap block and
  168. * the free blocks count in the block. The descriptors are loaded in memory
  169. * when a file system is mounted (see ext4_fill_super).
  170. */
  171. #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
  172. /**
  173. * ext4_get_group_desc() -- load group descriptor from disk
  174. * @sb: super block
  175. * @block_group: given block group
  176. * @bh: pointer to the buffer head to store the block
  177. * group descriptor
  178. */
  179. struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
  180. ext4_group_t block_group,
  181. struct buffer_head ** bh)
  182. {
  183. unsigned long group_desc;
  184. unsigned long offset;
  185. struct ext4_group_desc * desc;
  186. struct ext4_sb_info *sbi = EXT4_SB(sb);
  187. if (block_group >= sbi->s_groups_count) {
  188. ext4_error (sb, "ext4_get_group_desc",
  189. "block_group >= groups_count - "
  190. "block_group = %lu, groups_count = %lu",
  191. block_group, sbi->s_groups_count);
  192. return NULL;
  193. }
  194. smp_rmb();
  195. group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
  196. offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
  197. if (!sbi->s_group_desc[group_desc]) {
  198. ext4_error (sb, "ext4_get_group_desc",
  199. "Group descriptor not loaded - "
  200. "block_group = %lu, group_desc = %lu, desc = %lu",
  201. block_group, group_desc, offset);
  202. return NULL;
  203. }
  204. desc = (struct ext4_group_desc *)(
  205. (__u8 *)sbi->s_group_desc[group_desc]->b_data +
  206. offset * EXT4_DESC_SIZE(sb));
  207. if (bh)
  208. *bh = sbi->s_group_desc[group_desc];
  209. return desc;
  210. }
  211. static int ext4_valid_block_bitmap(struct super_block *sb,
  212. struct ext4_group_desc *desc,
  213. unsigned int block_group,
  214. struct buffer_head *bh)
  215. {
  216. ext4_grpblk_t offset;
  217. ext4_grpblk_t next_zero_bit;
  218. ext4_fsblk_t bitmap_blk;
  219. ext4_fsblk_t group_first_block;
  220. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
  221. /* with FLEX_BG, the inode/block bitmaps and itable
  222. * blocks may not be in the group at all
  223. * so the bitmap validation will be skipped for those groups
  224. * or it has to also read the block group where the bitmaps
  225. * are located to verify they are set.
  226. */
  227. return 1;
  228. }
  229. group_first_block = ext4_group_first_block_no(sb, block_group);
  230. /* check whether block bitmap block number is set */
  231. bitmap_blk = ext4_block_bitmap(sb, desc);
  232. offset = bitmap_blk - group_first_block;
  233. if (!ext4_test_bit(offset, bh->b_data))
  234. /* bad block bitmap */
  235. goto err_out;
  236. /* check whether the inode bitmap block number is set */
  237. bitmap_blk = ext4_inode_bitmap(sb, desc);
  238. offset = bitmap_blk - group_first_block;
  239. if (!ext4_test_bit(offset, bh->b_data))
  240. /* bad block bitmap */
  241. goto err_out;
  242. /* check whether the inode table block number is set */
  243. bitmap_blk = ext4_inode_table(sb, desc);
  244. offset = bitmap_blk - group_first_block;
  245. next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
  246. offset + EXT4_SB(sb)->s_itb_per_group,
  247. offset);
  248. if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
  249. /* good bitmap for inode tables */
  250. return 1;
  251. err_out:
  252. ext4_error(sb, __func__,
  253. "Invalid block bitmap - "
  254. "block_group = %d, block = %llu",
  255. block_group, bitmap_blk);
  256. return 0;
  257. }
  258. /**
  259. * read_block_bitmap()
  260. * @sb: super block
  261. * @block_group: given block group
  262. *
  263. * Read the bitmap for a given block_group,and validate the
  264. * bits for block/inode/inode tables are set in the bitmaps
  265. *
  266. * Return buffer_head on success or NULL in case of failure.
  267. */
  268. struct buffer_head *
  269. read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
  270. {
  271. struct ext4_group_desc * desc;
  272. struct buffer_head * bh = NULL;
  273. ext4_fsblk_t bitmap_blk;
  274. desc = ext4_get_group_desc(sb, block_group, NULL);
  275. if (!desc)
  276. return NULL;
  277. bitmap_blk = ext4_block_bitmap(sb, desc);
  278. bh = sb_getblk(sb, bitmap_blk);
  279. if (unlikely(!bh)) {
  280. ext4_error(sb, __func__,
  281. "Cannot read block bitmap - "
  282. "block_group = %d, block_bitmap = %llu",
  283. (int)block_group, (unsigned long long)bitmap_blk);
  284. return NULL;
  285. }
  286. if (bh_uptodate_or_lock(bh))
  287. return bh;
  288. if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  289. ext4_init_block_bitmap(sb, bh, block_group, desc);
  290. set_buffer_uptodate(bh);
  291. unlock_buffer(bh);
  292. return bh;
  293. }
  294. if (bh_submit_read(bh) < 0) {
  295. put_bh(bh);
  296. ext4_error(sb, __func__,
  297. "Cannot read block bitmap - "
  298. "block_group = %d, block_bitmap = %llu",
  299. (int)block_group, (unsigned long long)bitmap_blk);
  300. return NULL;
  301. }
  302. ext4_valid_block_bitmap(sb, desc, block_group, bh);
  303. /*
  304. * file system mounted not to panic on error,
  305. * continue with corrupt bitmap
  306. */
  307. return bh;
  308. }
  309. /*
  310. * The reservation window structure operations
  311. * --------------------------------------------
  312. * Operations include:
  313. * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
  314. *
  315. * We use a red-black tree to represent per-filesystem reservation
  316. * windows.
  317. *
  318. */
  319. /**
  320. * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
  321. * @rb_root: root of per-filesystem reservation rb tree
  322. * @verbose: verbose mode
  323. * @fn: function which wishes to dump the reservation map
  324. *
  325. * If verbose is turned on, it will print the whole block reservation
  326. * windows(start, end). Otherwise, it will only print out the "bad" windows,
  327. * those windows that overlap with their immediate neighbors.
  328. */
  329. #if 1
  330. static void __rsv_window_dump(struct rb_root *root, int verbose,
  331. const char *fn)
  332. {
  333. struct rb_node *n;
  334. struct ext4_reserve_window_node *rsv, *prev;
  335. int bad;
  336. restart:
  337. n = rb_first(root);
  338. bad = 0;
  339. prev = NULL;
  340. printk("Block Allocation Reservation Windows Map (%s):\n", fn);
  341. while (n) {
  342. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  343. if (verbose)
  344. printk("reservation window 0x%p "
  345. "start: %llu, end: %llu\n",
  346. rsv, rsv->rsv_start, rsv->rsv_end);
  347. if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
  348. printk("Bad reservation %p (start >= end)\n",
  349. rsv);
  350. bad = 1;
  351. }
  352. if (prev && prev->rsv_end >= rsv->rsv_start) {
  353. printk("Bad reservation %p (prev->end >= start)\n",
  354. rsv);
  355. bad = 1;
  356. }
  357. if (bad) {
  358. if (!verbose) {
  359. printk("Restarting reservation walk in verbose mode\n");
  360. verbose = 1;
  361. goto restart;
  362. }
  363. }
  364. n = rb_next(n);
  365. prev = rsv;
  366. }
  367. printk("Window map complete.\n");
  368. BUG_ON(bad);
  369. }
  370. #define rsv_window_dump(root, verbose) \
  371. __rsv_window_dump((root), (verbose), __func__)
  372. #else
  373. #define rsv_window_dump(root, verbose) do {} while (0)
  374. #endif
  375. /**
  376. * goal_in_my_reservation()
  377. * @rsv: inode's reservation window
  378. * @grp_goal: given goal block relative to the allocation block group
  379. * @group: the current allocation block group
  380. * @sb: filesystem super block
  381. *
  382. * Test if the given goal block (group relative) is within the file's
  383. * own block reservation window range.
  384. *
  385. * If the reservation window is outside the goal allocation group, return 0;
  386. * grp_goal (given goal block) could be -1, which means no specific
  387. * goal block. In this case, always return 1.
  388. * If the goal block is within the reservation window, return 1;
  389. * otherwise, return 0;
  390. */
  391. static int
  392. goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
  393. ext4_group_t group, struct super_block *sb)
  394. {
  395. ext4_fsblk_t group_first_block, group_last_block;
  396. group_first_block = ext4_group_first_block_no(sb, group);
  397. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  398. if ((rsv->_rsv_start > group_last_block) ||
  399. (rsv->_rsv_end < group_first_block))
  400. return 0;
  401. if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
  402. || (grp_goal + group_first_block > rsv->_rsv_end)))
  403. return 0;
  404. return 1;
  405. }
  406. /**
  407. * search_reserve_window()
  408. * @rb_root: root of reservation tree
  409. * @goal: target allocation block
  410. *
  411. * Find the reserved window which includes the goal, or the previous one
  412. * if the goal is not in any window.
  413. * Returns NULL if there are no windows or if all windows start after the goal.
  414. */
  415. static struct ext4_reserve_window_node *
  416. search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
  417. {
  418. struct rb_node *n = root->rb_node;
  419. struct ext4_reserve_window_node *rsv;
  420. if (!n)
  421. return NULL;
  422. do {
  423. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  424. if (goal < rsv->rsv_start)
  425. n = n->rb_left;
  426. else if (goal > rsv->rsv_end)
  427. n = n->rb_right;
  428. else
  429. return rsv;
  430. } while (n);
  431. /*
  432. * We've fallen off the end of the tree: the goal wasn't inside
  433. * any particular node. OK, the previous node must be to one
  434. * side of the interval containing the goal. If it's the RHS,
  435. * we need to back up one.
  436. */
  437. if (rsv->rsv_start > goal) {
  438. n = rb_prev(&rsv->rsv_node);
  439. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  440. }
  441. return rsv;
  442. }
  443. /**
  444. * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
  445. * @sb: super block
  446. * @rsv: reservation window to add
  447. *
  448. * Must be called with rsv_lock hold.
  449. */
  450. void ext4_rsv_window_add(struct super_block *sb,
  451. struct ext4_reserve_window_node *rsv)
  452. {
  453. struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
  454. struct rb_node *node = &rsv->rsv_node;
  455. ext4_fsblk_t start = rsv->rsv_start;
  456. struct rb_node ** p = &root->rb_node;
  457. struct rb_node * parent = NULL;
  458. struct ext4_reserve_window_node *this;
  459. while (*p)
  460. {
  461. parent = *p;
  462. this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
  463. if (start < this->rsv_start)
  464. p = &(*p)->rb_left;
  465. else if (start > this->rsv_end)
  466. p = &(*p)->rb_right;
  467. else {
  468. rsv_window_dump(root, 1);
  469. BUG();
  470. }
  471. }
  472. rb_link_node(node, parent, p);
  473. rb_insert_color(node, root);
  474. }
  475. /**
  476. * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
  477. * @sb: super block
  478. * @rsv: reservation window to remove
  479. *
  480. * Mark the block reservation window as not allocated, and unlink it
  481. * from the filesystem reservation window rb tree. Must be called with
  482. * rsv_lock hold.
  483. */
  484. static void rsv_window_remove(struct super_block *sb,
  485. struct ext4_reserve_window_node *rsv)
  486. {
  487. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  488. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  489. rsv->rsv_alloc_hit = 0;
  490. rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
  491. }
  492. /*
  493. * rsv_is_empty() -- Check if the reservation window is allocated.
  494. * @rsv: given reservation window to check
  495. *
  496. * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
  497. */
  498. static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
  499. {
  500. /* a valid reservation end block could not be 0 */
  501. return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  502. }
  503. /**
  504. * ext4_init_block_alloc_info()
  505. * @inode: file inode structure
  506. *
  507. * Allocate and initialize the reservation window structure, and
  508. * link the window to the ext4 inode structure at last
  509. *
  510. * The reservation window structure is only dynamically allocated
  511. * and linked to ext4 inode the first time the open file
  512. * needs a new block. So, before every ext4_new_block(s) call, for
  513. * regular files, we should check whether the reservation window
  514. * structure exists or not. In the latter case, this function is called.
  515. * Fail to do so will result in block reservation being turned off for that
  516. * open file.
  517. *
  518. * This function is called from ext4_get_blocks_handle(), also called
  519. * when setting the reservation window size through ioctl before the file
  520. * is open for write (needs block allocation).
  521. *
  522. * Needs down_write(i_data_sem) protection prior to call this function.
  523. */
  524. void ext4_init_block_alloc_info(struct inode *inode)
  525. {
  526. struct ext4_inode_info *ei = EXT4_I(inode);
  527. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  528. struct super_block *sb = inode->i_sb;
  529. block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
  530. if (block_i) {
  531. struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
  532. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  533. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  534. /*
  535. * if filesystem is mounted with NORESERVATION, the goal
  536. * reservation window size is set to zero to indicate
  537. * block reservation is off
  538. */
  539. if (!test_opt(sb, RESERVATION))
  540. rsv->rsv_goal_size = 0;
  541. else
  542. rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
  543. rsv->rsv_alloc_hit = 0;
  544. block_i->last_alloc_logical_block = 0;
  545. block_i->last_alloc_physical_block = 0;
  546. }
  547. ei->i_block_alloc_info = block_i;
  548. }
  549. /**
  550. * ext4_discard_reservation()
  551. * @inode: inode
  552. *
  553. * Discard(free) block reservation window on last file close, or truncate
  554. * or at last iput().
  555. *
  556. * It is being called in three cases:
  557. * ext4_release_file(): last writer close the file
  558. * ext4_clear_inode(): last iput(), when nobody link to this file.
  559. * ext4_truncate(): when the block indirect map is about to change.
  560. *
  561. */
  562. void ext4_discard_reservation(struct inode *inode)
  563. {
  564. struct ext4_inode_info *ei = EXT4_I(inode);
  565. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  566. struct ext4_reserve_window_node *rsv;
  567. spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
  568. ext4_mb_discard_inode_preallocations(inode);
  569. if (!block_i)
  570. return;
  571. rsv = &block_i->rsv_window_node;
  572. if (!rsv_is_empty(&rsv->rsv_window)) {
  573. spin_lock(rsv_lock);
  574. if (!rsv_is_empty(&rsv->rsv_window))
  575. rsv_window_remove(inode->i_sb, rsv);
  576. spin_unlock(rsv_lock);
  577. }
  578. }
  579. /**
  580. * ext4_free_blocks_sb() -- Free given blocks and update quota
  581. * @handle: handle to this transaction
  582. * @sb: super block
  583. * @block: start physcial block to free
  584. * @count: number of blocks to free
  585. * @pdquot_freed_blocks: pointer to quota
  586. */
  587. void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
  588. ext4_fsblk_t block, unsigned long count,
  589. unsigned long *pdquot_freed_blocks)
  590. {
  591. struct buffer_head *bitmap_bh = NULL;
  592. struct buffer_head *gd_bh;
  593. ext4_group_t block_group;
  594. ext4_grpblk_t bit;
  595. unsigned long i;
  596. unsigned long overflow;
  597. struct ext4_group_desc * desc;
  598. struct ext4_super_block * es;
  599. struct ext4_sb_info *sbi;
  600. int err = 0, ret;
  601. ext4_grpblk_t group_freed;
  602. *pdquot_freed_blocks = 0;
  603. sbi = EXT4_SB(sb);
  604. es = sbi->s_es;
  605. if (block < le32_to_cpu(es->s_first_data_block) ||
  606. block + count < block ||
  607. block + count > ext4_blocks_count(es)) {
  608. ext4_error (sb, "ext4_free_blocks",
  609. "Freeing blocks not in datazone - "
  610. "block = %llu, count = %lu", block, count);
  611. goto error_return;
  612. }
  613. ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
  614. do_more:
  615. overflow = 0;
  616. ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
  617. /*
  618. * Check to see if we are freeing blocks across a group
  619. * boundary.
  620. */
  621. if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
  622. overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
  623. count -= overflow;
  624. }
  625. brelse(bitmap_bh);
  626. bitmap_bh = read_block_bitmap(sb, block_group);
  627. if (!bitmap_bh)
  628. goto error_return;
  629. desc = ext4_get_group_desc (sb, block_group, &gd_bh);
  630. if (!desc)
  631. goto error_return;
  632. if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
  633. in_range(ext4_inode_bitmap(sb, desc), block, count) ||
  634. in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
  635. in_range(block + count - 1, ext4_inode_table(sb, desc),
  636. sbi->s_itb_per_group)) {
  637. ext4_error (sb, "ext4_free_blocks",
  638. "Freeing blocks in system zones - "
  639. "Block = %llu, count = %lu",
  640. block, count);
  641. goto error_return;
  642. }
  643. /*
  644. * We are about to start releasing blocks in the bitmap,
  645. * so we need undo access.
  646. */
  647. /* @@@ check errors */
  648. BUFFER_TRACE(bitmap_bh, "getting undo access");
  649. err = ext4_journal_get_undo_access(handle, bitmap_bh);
  650. if (err)
  651. goto error_return;
  652. /*
  653. * We are about to modify some metadata. Call the journal APIs
  654. * to unshare ->b_data if a currently-committing transaction is
  655. * using it
  656. */
  657. BUFFER_TRACE(gd_bh, "get_write_access");
  658. err = ext4_journal_get_write_access(handle, gd_bh);
  659. if (err)
  660. goto error_return;
  661. jbd_lock_bh_state(bitmap_bh);
  662. for (i = 0, group_freed = 0; i < count; i++) {
  663. /*
  664. * An HJ special. This is expensive...
  665. */
  666. #ifdef CONFIG_JBD2_DEBUG
  667. jbd_unlock_bh_state(bitmap_bh);
  668. {
  669. struct buffer_head *debug_bh;
  670. debug_bh = sb_find_get_block(sb, block + i);
  671. if (debug_bh) {
  672. BUFFER_TRACE(debug_bh, "Deleted!");
  673. if (!bh2jh(bitmap_bh)->b_committed_data)
  674. BUFFER_TRACE(debug_bh,
  675. "No commited data in bitmap");
  676. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
  677. __brelse(debug_bh);
  678. }
  679. }
  680. jbd_lock_bh_state(bitmap_bh);
  681. #endif
  682. if (need_resched()) {
  683. jbd_unlock_bh_state(bitmap_bh);
  684. cond_resched();
  685. jbd_lock_bh_state(bitmap_bh);
  686. }
  687. /* @@@ This prevents newly-allocated data from being
  688. * freed and then reallocated within the same
  689. * transaction.
  690. *
  691. * Ideally we would want to allow that to happen, but to
  692. * do so requires making jbd2_journal_forget() capable of
  693. * revoking the queued write of a data block, which
  694. * implies blocking on the journal lock. *forget()
  695. * cannot block due to truncate races.
  696. *
  697. * Eventually we can fix this by making jbd2_journal_forget()
  698. * return a status indicating whether or not it was able
  699. * to revoke the buffer. On successful revoke, it is
  700. * safe not to set the allocation bit in the committed
  701. * bitmap, because we know that there is no outstanding
  702. * activity on the buffer any more and so it is safe to
  703. * reallocate it.
  704. */
  705. BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
  706. J_ASSERT_BH(bitmap_bh,
  707. bh2jh(bitmap_bh)->b_committed_data != NULL);
  708. ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
  709. bh2jh(bitmap_bh)->b_committed_data);
  710. /*
  711. * We clear the bit in the bitmap after setting the committed
  712. * data bit, because this is the reverse order to that which
  713. * the allocator uses.
  714. */
  715. BUFFER_TRACE(bitmap_bh, "clear bit");
  716. if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
  717. bit + i, bitmap_bh->b_data)) {
  718. jbd_unlock_bh_state(bitmap_bh);
  719. ext4_error(sb, __func__,
  720. "bit already cleared for block %llu",
  721. (ext4_fsblk_t)(block + i));
  722. jbd_lock_bh_state(bitmap_bh);
  723. BUFFER_TRACE(bitmap_bh, "bit already cleared");
  724. } else {
  725. group_freed++;
  726. }
  727. }
  728. jbd_unlock_bh_state(bitmap_bh);
  729. spin_lock(sb_bgl_lock(sbi, block_group));
  730. le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
  731. desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
  732. spin_unlock(sb_bgl_lock(sbi, block_group));
  733. percpu_counter_add(&sbi->s_freeblocks_counter, count);
  734. /* We dirtied the bitmap block */
  735. BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
  736. err = ext4_journal_dirty_metadata(handle, bitmap_bh);
  737. /* And the group descriptor block */
  738. BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
  739. ret = ext4_journal_dirty_metadata(handle, gd_bh);
  740. if (!err) err = ret;
  741. *pdquot_freed_blocks += group_freed;
  742. if (overflow && !err) {
  743. block += count;
  744. count = overflow;
  745. goto do_more;
  746. }
  747. sb->s_dirt = 1;
  748. error_return:
  749. brelse(bitmap_bh);
  750. ext4_std_error(sb, err);
  751. return;
  752. }
  753. /**
  754. * ext4_free_blocks() -- Free given blocks and update quota
  755. * @handle: handle for this transaction
  756. * @inode: inode
  757. * @block: start physical block to free
  758. * @count: number of blocks to count
  759. * @metadata: Are these metadata blocks
  760. */
  761. void ext4_free_blocks(handle_t *handle, struct inode *inode,
  762. ext4_fsblk_t block, unsigned long count,
  763. int metadata)
  764. {
  765. struct super_block * sb;
  766. unsigned long dquot_freed_blocks;
  767. /* this isn't the right place to decide whether block is metadata
  768. * inode.c/extents.c knows better, but for safety ... */
  769. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
  770. ext4_should_journal_data(inode))
  771. metadata = 1;
  772. sb = inode->i_sb;
  773. if (!test_opt(sb, MBALLOC) || !EXT4_SB(sb)->s_group_info)
  774. ext4_free_blocks_sb(handle, sb, block, count,
  775. &dquot_freed_blocks);
  776. else
  777. ext4_mb_free_blocks(handle, inode, block, count,
  778. metadata, &dquot_freed_blocks);
  779. if (dquot_freed_blocks)
  780. DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
  781. return;
  782. }
  783. /**
  784. * ext4_test_allocatable()
  785. * @nr: given allocation block group
  786. * @bh: bufferhead contains the bitmap of the given block group
  787. *
  788. * For ext4 allocations, we must not reuse any blocks which are
  789. * allocated in the bitmap buffer's "last committed data" copy. This
  790. * prevents deletes from freeing up the page for reuse until we have
  791. * committed the delete transaction.
  792. *
  793. * If we didn't do this, then deleting something and reallocating it as
  794. * data would allow the old block to be overwritten before the
  795. * transaction committed (because we force data to disk before commit).
  796. * This would lead to corruption if we crashed between overwriting the
  797. * data and committing the delete.
  798. *
  799. * @@@ We may want to make this allocation behaviour conditional on
  800. * data-writes at some point, and disable it for metadata allocations or
  801. * sync-data inodes.
  802. */
  803. static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
  804. {
  805. int ret;
  806. struct journal_head *jh = bh2jh(bh);
  807. if (ext4_test_bit(nr, bh->b_data))
  808. return 0;
  809. jbd_lock_bh_state(bh);
  810. if (!jh->b_committed_data)
  811. ret = 1;
  812. else
  813. ret = !ext4_test_bit(nr, jh->b_committed_data);
  814. jbd_unlock_bh_state(bh);
  815. return ret;
  816. }
  817. /**
  818. * bitmap_search_next_usable_block()
  819. * @start: the starting block (group relative) of the search
  820. * @bh: bufferhead contains the block group bitmap
  821. * @maxblocks: the ending block (group relative) of the reservation
  822. *
  823. * The bitmap search --- search forward alternately through the actual
  824. * bitmap on disk and the last-committed copy in journal, until we find a
  825. * bit free in both bitmaps.
  826. */
  827. static ext4_grpblk_t
  828. bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  829. ext4_grpblk_t maxblocks)
  830. {
  831. ext4_grpblk_t next;
  832. struct journal_head *jh = bh2jh(bh);
  833. while (start < maxblocks) {
  834. next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
  835. if (next >= maxblocks)
  836. return -1;
  837. if (ext4_test_allocatable(next, bh))
  838. return next;
  839. jbd_lock_bh_state(bh);
  840. if (jh->b_committed_data)
  841. start = ext4_find_next_zero_bit(jh->b_committed_data,
  842. maxblocks, next);
  843. jbd_unlock_bh_state(bh);
  844. }
  845. return -1;
  846. }
  847. /**
  848. * find_next_usable_block()
  849. * @start: the starting block (group relative) to find next
  850. * allocatable block in bitmap.
  851. * @bh: bufferhead contains the block group bitmap
  852. * @maxblocks: the ending block (group relative) for the search
  853. *
  854. * Find an allocatable block in a bitmap. We honor both the bitmap and
  855. * its last-committed copy (if that exists), and perform the "most
  856. * appropriate allocation" algorithm of looking for a free block near
  857. * the initial goal; then for a free byte somewhere in the bitmap; then
  858. * for any free bit in the bitmap.
  859. */
  860. static ext4_grpblk_t
  861. find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  862. ext4_grpblk_t maxblocks)
  863. {
  864. ext4_grpblk_t here, next;
  865. char *p, *r;
  866. if (start > 0) {
  867. /*
  868. * The goal was occupied; search forward for a free
  869. * block within the next XX blocks.
  870. *
  871. * end_goal is more or less random, but it has to be
  872. * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
  873. * next 64-bit boundary is simple..
  874. */
  875. ext4_grpblk_t end_goal = (start + 63) & ~63;
  876. if (end_goal > maxblocks)
  877. end_goal = maxblocks;
  878. here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
  879. if (here < end_goal && ext4_test_allocatable(here, bh))
  880. return here;
  881. ext4_debug("Bit not found near goal\n");
  882. }
  883. here = start;
  884. if (here < 0)
  885. here = 0;
  886. p = ((char *)bh->b_data) + (here >> 3);
  887. r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
  888. next = (r - ((char *)bh->b_data)) << 3;
  889. if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
  890. return next;
  891. /*
  892. * The bitmap search --- search forward alternately through the actual
  893. * bitmap and the last-committed copy until we find a bit free in
  894. * both
  895. */
  896. here = bitmap_search_next_usable_block(here, bh, maxblocks);
  897. return here;
  898. }
  899. /**
  900. * claim_block()
  901. * @block: the free block (group relative) to allocate
  902. * @bh: the bufferhead containts the block group bitmap
  903. *
  904. * We think we can allocate this block in this bitmap. Try to set the bit.
  905. * If that succeeds then check that nobody has allocated and then freed the
  906. * block since we saw that is was not marked in b_committed_data. If it _was_
  907. * allocated and freed then clear the bit in the bitmap again and return
  908. * zero (failure).
  909. */
  910. static inline int
  911. claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
  912. {
  913. struct journal_head *jh = bh2jh(bh);
  914. int ret;
  915. if (ext4_set_bit_atomic(lock, block, bh->b_data))
  916. return 0;
  917. jbd_lock_bh_state(bh);
  918. if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
  919. ext4_clear_bit_atomic(lock, block, bh->b_data);
  920. ret = 0;
  921. } else {
  922. ret = 1;
  923. }
  924. jbd_unlock_bh_state(bh);
  925. return ret;
  926. }
  927. /**
  928. * ext4_try_to_allocate()
  929. * @sb: superblock
  930. * @handle: handle to this transaction
  931. * @group: given allocation block group
  932. * @bitmap_bh: bufferhead holds the block bitmap
  933. * @grp_goal: given target block within the group
  934. * @count: target number of blocks to allocate
  935. * @my_rsv: reservation window
  936. *
  937. * Attempt to allocate blocks within a give range. Set the range of allocation
  938. * first, then find the first free bit(s) from the bitmap (within the range),
  939. * and at last, allocate the blocks by claiming the found free bit as allocated.
  940. *
  941. * To set the range of this allocation:
  942. * if there is a reservation window, only try to allocate block(s) from the
  943. * file's own reservation window;
  944. * Otherwise, the allocation range starts from the give goal block, ends at
  945. * the block group's last block.
  946. *
  947. * If we failed to allocate the desired block then we may end up crossing to a
  948. * new bitmap. In that case we must release write access to the old one via
  949. * ext4_journal_release_buffer(), else we'll run out of credits.
  950. */
  951. static ext4_grpblk_t
  952. ext4_try_to_allocate(struct super_block *sb, handle_t *handle,
  953. ext4_group_t group, struct buffer_head *bitmap_bh,
  954. ext4_grpblk_t grp_goal, unsigned long *count,
  955. struct ext4_reserve_window *my_rsv)
  956. {
  957. ext4_fsblk_t group_first_block;
  958. ext4_grpblk_t start, end;
  959. unsigned long num = 0;
  960. /* we do allocation within the reservation window if we have a window */
  961. if (my_rsv) {
  962. group_first_block = ext4_group_first_block_no(sb, group);
  963. if (my_rsv->_rsv_start >= group_first_block)
  964. start = my_rsv->_rsv_start - group_first_block;
  965. else
  966. /* reservation window cross group boundary */
  967. start = 0;
  968. end = my_rsv->_rsv_end - group_first_block + 1;
  969. if (end > EXT4_BLOCKS_PER_GROUP(sb))
  970. /* reservation window crosses group boundary */
  971. end = EXT4_BLOCKS_PER_GROUP(sb);
  972. if ((start <= grp_goal) && (grp_goal < end))
  973. start = grp_goal;
  974. else
  975. grp_goal = -1;
  976. } else {
  977. if (grp_goal > 0)
  978. start = grp_goal;
  979. else
  980. start = 0;
  981. end = EXT4_BLOCKS_PER_GROUP(sb);
  982. }
  983. BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
  984. repeat:
  985. if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
  986. grp_goal = find_next_usable_block(start, bitmap_bh, end);
  987. if (grp_goal < 0)
  988. goto fail_access;
  989. if (!my_rsv) {
  990. int i;
  991. for (i = 0; i < 7 && grp_goal > start &&
  992. ext4_test_allocatable(grp_goal - 1,
  993. bitmap_bh);
  994. i++, grp_goal--)
  995. ;
  996. }
  997. }
  998. start = grp_goal;
  999. if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  1000. grp_goal, bitmap_bh)) {
  1001. /*
  1002. * The block was allocated by another thread, or it was
  1003. * allocated and then freed by another thread
  1004. */
  1005. start++;
  1006. grp_goal++;
  1007. if (start >= end)
  1008. goto fail_access;
  1009. goto repeat;
  1010. }
  1011. num++;
  1012. grp_goal++;
  1013. while (num < *count && grp_goal < end
  1014. && ext4_test_allocatable(grp_goal, bitmap_bh)
  1015. && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  1016. grp_goal, bitmap_bh)) {
  1017. num++;
  1018. grp_goal++;
  1019. }
  1020. *count = num;
  1021. return grp_goal - num;
  1022. fail_access:
  1023. *count = num;
  1024. return -1;
  1025. }
  1026. /**
  1027. * find_next_reservable_window():
  1028. * find a reservable space within the given range.
  1029. * It does not allocate the reservation window for now:
  1030. * alloc_new_reservation() will do the work later.
  1031. *
  1032. * @search_head: the head of the searching list;
  1033. * This is not necessarily the list head of the whole filesystem
  1034. *
  1035. * We have both head and start_block to assist the search
  1036. * for the reservable space. The list starts from head,
  1037. * but we will shift to the place where start_block is,
  1038. * then start from there, when looking for a reservable space.
  1039. *
  1040. * @size: the target new reservation window size
  1041. *
  1042. * @group_first_block: the first block we consider to start
  1043. * the real search from
  1044. *
  1045. * @last_block:
  1046. * the maximum block number that our goal reservable space
  1047. * could start from. This is normally the last block in this
  1048. * group. The search will end when we found the start of next
  1049. * possible reservable space is out of this boundary.
  1050. * This could handle the cross boundary reservation window
  1051. * request.
  1052. *
  1053. * basically we search from the given range, rather than the whole
  1054. * reservation double linked list, (start_block, last_block)
  1055. * to find a free region that is of my size and has not
  1056. * been reserved.
  1057. *
  1058. */
  1059. static int find_next_reservable_window(
  1060. struct ext4_reserve_window_node *search_head,
  1061. struct ext4_reserve_window_node *my_rsv,
  1062. struct super_block * sb,
  1063. ext4_fsblk_t start_block,
  1064. ext4_fsblk_t last_block)
  1065. {
  1066. struct rb_node *next;
  1067. struct ext4_reserve_window_node *rsv, *prev;
  1068. ext4_fsblk_t cur;
  1069. int size = my_rsv->rsv_goal_size;
  1070. /* TODO: make the start of the reservation window byte-aligned */
  1071. /* cur = *start_block & ~7;*/
  1072. cur = start_block;
  1073. rsv = search_head;
  1074. if (!rsv)
  1075. return -1;
  1076. while (1) {
  1077. if (cur <= rsv->rsv_end)
  1078. cur = rsv->rsv_end + 1;
  1079. /* TODO?
  1080. * in the case we could not find a reservable space
  1081. * that is what is expected, during the re-search, we could
  1082. * remember what's the largest reservable space we could have
  1083. * and return that one.
  1084. *
  1085. * For now it will fail if we could not find the reservable
  1086. * space with expected-size (or more)...
  1087. */
  1088. if (cur > last_block)
  1089. return -1; /* fail */
  1090. prev = rsv;
  1091. next = rb_next(&rsv->rsv_node);
  1092. rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
  1093. /*
  1094. * Reached the last reservation, we can just append to the
  1095. * previous one.
  1096. */
  1097. if (!next)
  1098. break;
  1099. if (cur + size <= rsv->rsv_start) {
  1100. /*
  1101. * Found a reserveable space big enough. We could
  1102. * have a reservation across the group boundary here
  1103. */
  1104. break;
  1105. }
  1106. }
  1107. /*
  1108. * we come here either :
  1109. * when we reach the end of the whole list,
  1110. * and there is empty reservable space after last entry in the list.
  1111. * append it to the end of the list.
  1112. *
  1113. * or we found one reservable space in the middle of the list,
  1114. * return the reservation window that we could append to.
  1115. * succeed.
  1116. */
  1117. if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
  1118. rsv_window_remove(sb, my_rsv);
  1119. /*
  1120. * Let's book the whole avaliable window for now. We will check the
  1121. * disk bitmap later and then, if there are free blocks then we adjust
  1122. * the window size if it's larger than requested.
  1123. * Otherwise, we will remove this node from the tree next time
  1124. * call find_next_reservable_window.
  1125. */
  1126. my_rsv->rsv_start = cur;
  1127. my_rsv->rsv_end = cur + size - 1;
  1128. my_rsv->rsv_alloc_hit = 0;
  1129. if (prev != my_rsv)
  1130. ext4_rsv_window_add(sb, my_rsv);
  1131. return 0;
  1132. }
  1133. /**
  1134. * alloc_new_reservation()--allocate a new reservation window
  1135. *
  1136. * To make a new reservation, we search part of the filesystem
  1137. * reservation list (the list that inside the group). We try to
  1138. * allocate a new reservation window near the allocation goal,
  1139. * or the beginning of the group, if there is no goal.
  1140. *
  1141. * We first find a reservable space after the goal, then from
  1142. * there, we check the bitmap for the first free block after
  1143. * it. If there is no free block until the end of group, then the
  1144. * whole group is full, we failed. Otherwise, check if the free
  1145. * block is inside the expected reservable space, if so, we
  1146. * succeed.
  1147. * If the first free block is outside the reservable space, then
  1148. * start from the first free block, we search for next available
  1149. * space, and go on.
  1150. *
  1151. * on succeed, a new reservation will be found and inserted into the list
  1152. * It contains at least one free block, and it does not overlap with other
  1153. * reservation windows.
  1154. *
  1155. * failed: we failed to find a reservation window in this group
  1156. *
  1157. * @rsv: the reservation
  1158. *
  1159. * @grp_goal: The goal (group-relative). It is where the search for a
  1160. * free reservable space should start from.
  1161. * if we have a grp_goal(grp_goal >0 ), then start from there,
  1162. * no grp_goal(grp_goal = -1), we start from the first block
  1163. * of the group.
  1164. *
  1165. * @sb: the super block
  1166. * @group: the group we are trying to allocate in
  1167. * @bitmap_bh: the block group block bitmap
  1168. *
  1169. */
  1170. static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
  1171. ext4_grpblk_t grp_goal, struct super_block *sb,
  1172. ext4_group_t group, struct buffer_head *bitmap_bh)
  1173. {
  1174. struct ext4_reserve_window_node *search_head;
  1175. ext4_fsblk_t group_first_block, group_end_block, start_block;
  1176. ext4_grpblk_t first_free_block;
  1177. struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
  1178. unsigned long size;
  1179. int ret;
  1180. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1181. group_first_block = ext4_group_first_block_no(sb, group);
  1182. group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1183. if (grp_goal < 0)
  1184. start_block = group_first_block;
  1185. else
  1186. start_block = grp_goal + group_first_block;
  1187. size = my_rsv->rsv_goal_size;
  1188. if (!rsv_is_empty(&my_rsv->rsv_window)) {
  1189. /*
  1190. * if the old reservation is cross group boundary
  1191. * and if the goal is inside the old reservation window,
  1192. * we will come here when we just failed to allocate from
  1193. * the first part of the window. We still have another part
  1194. * that belongs to the next group. In this case, there is no
  1195. * point to discard our window and try to allocate a new one
  1196. * in this group(which will fail). we should
  1197. * keep the reservation window, just simply move on.
  1198. *
  1199. * Maybe we could shift the start block of the reservation
  1200. * window to the first block of next group.
  1201. */
  1202. if ((my_rsv->rsv_start <= group_end_block) &&
  1203. (my_rsv->rsv_end > group_end_block) &&
  1204. (start_block >= my_rsv->rsv_start))
  1205. return -1;
  1206. if ((my_rsv->rsv_alloc_hit >
  1207. (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
  1208. /*
  1209. * if the previously allocation hit ratio is
  1210. * greater than 1/2, then we double the size of
  1211. * the reservation window the next time,
  1212. * otherwise we keep the same size window
  1213. */
  1214. size = size * 2;
  1215. if (size > EXT4_MAX_RESERVE_BLOCKS)
  1216. size = EXT4_MAX_RESERVE_BLOCKS;
  1217. my_rsv->rsv_goal_size= size;
  1218. }
  1219. }
  1220. spin_lock(rsv_lock);
  1221. /*
  1222. * shift the search start to the window near the goal block
  1223. */
  1224. search_head = search_reserve_window(fs_rsv_root, start_block);
  1225. /*
  1226. * find_next_reservable_window() simply finds a reservable window
  1227. * inside the given range(start_block, group_end_block).
  1228. *
  1229. * To make sure the reservation window has a free bit inside it, we
  1230. * need to check the bitmap after we found a reservable window.
  1231. */
  1232. retry:
  1233. ret = find_next_reservable_window(search_head, my_rsv, sb,
  1234. start_block, group_end_block);
  1235. if (ret == -1) {
  1236. if (!rsv_is_empty(&my_rsv->rsv_window))
  1237. rsv_window_remove(sb, my_rsv);
  1238. spin_unlock(rsv_lock);
  1239. return -1;
  1240. }
  1241. /*
  1242. * On success, find_next_reservable_window() returns the
  1243. * reservation window where there is a reservable space after it.
  1244. * Before we reserve this reservable space, we need
  1245. * to make sure there is at least a free block inside this region.
  1246. *
  1247. * searching the first free bit on the block bitmap and copy of
  1248. * last committed bitmap alternatively, until we found a allocatable
  1249. * block. Search start from the start block of the reservable space
  1250. * we just found.
  1251. */
  1252. spin_unlock(rsv_lock);
  1253. first_free_block = bitmap_search_next_usable_block(
  1254. my_rsv->rsv_start - group_first_block,
  1255. bitmap_bh, group_end_block - group_first_block + 1);
  1256. if (first_free_block < 0) {
  1257. /*
  1258. * no free block left on the bitmap, no point
  1259. * to reserve the space. return failed.
  1260. */
  1261. spin_lock(rsv_lock);
  1262. if (!rsv_is_empty(&my_rsv->rsv_window))
  1263. rsv_window_remove(sb, my_rsv);
  1264. spin_unlock(rsv_lock);
  1265. return -1; /* failed */
  1266. }
  1267. start_block = first_free_block + group_first_block;
  1268. /*
  1269. * check if the first free block is within the
  1270. * free space we just reserved
  1271. */
  1272. if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
  1273. return 0; /* success */
  1274. /*
  1275. * if the first free bit we found is out of the reservable space
  1276. * continue search for next reservable space,
  1277. * start from where the free block is,
  1278. * we also shift the list head to where we stopped last time
  1279. */
  1280. search_head = my_rsv;
  1281. spin_lock(rsv_lock);
  1282. goto retry;
  1283. }
  1284. /**
  1285. * try_to_extend_reservation()
  1286. * @my_rsv: given reservation window
  1287. * @sb: super block
  1288. * @size: the delta to extend
  1289. *
  1290. * Attempt to expand the reservation window large enough to have
  1291. * required number of free blocks
  1292. *
  1293. * Since ext4_try_to_allocate() will always allocate blocks within
  1294. * the reservation window range, if the window size is too small,
  1295. * multiple blocks allocation has to stop at the end of the reservation
  1296. * window. To make this more efficient, given the total number of
  1297. * blocks needed and the current size of the window, we try to
  1298. * expand the reservation window size if necessary on a best-effort
  1299. * basis before ext4_new_blocks() tries to allocate blocks,
  1300. */
  1301. static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
  1302. struct super_block *sb, int size)
  1303. {
  1304. struct ext4_reserve_window_node *next_rsv;
  1305. struct rb_node *next;
  1306. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1307. if (!spin_trylock(rsv_lock))
  1308. return;
  1309. next = rb_next(&my_rsv->rsv_node);
  1310. if (!next)
  1311. my_rsv->rsv_end += size;
  1312. else {
  1313. next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
  1314. if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
  1315. my_rsv->rsv_end += size;
  1316. else
  1317. my_rsv->rsv_end = next_rsv->rsv_start - 1;
  1318. }
  1319. spin_unlock(rsv_lock);
  1320. }
  1321. /**
  1322. * ext4_try_to_allocate_with_rsv()
  1323. * @sb: superblock
  1324. * @handle: handle to this transaction
  1325. * @group: given allocation block group
  1326. * @bitmap_bh: bufferhead holds the block bitmap
  1327. * @grp_goal: given target block within the group
  1328. * @count: target number of blocks to allocate
  1329. * @my_rsv: reservation window
  1330. * @errp: pointer to store the error code
  1331. *
  1332. * This is the main function used to allocate a new block and its reservation
  1333. * window.
  1334. *
  1335. * Each time when a new block allocation is need, first try to allocate from
  1336. * its own reservation. If it does not have a reservation window, instead of
  1337. * looking for a free bit on bitmap first, then look up the reservation list to
  1338. * see if it is inside somebody else's reservation window, we try to allocate a
  1339. * reservation window for it starting from the goal first. Then do the block
  1340. * allocation within the reservation window.
  1341. *
  1342. * This will avoid keeping on searching the reservation list again and
  1343. * again when somebody is looking for a free block (without
  1344. * reservation), and there are lots of free blocks, but they are all
  1345. * being reserved.
  1346. *
  1347. * We use a red-black tree for the per-filesystem reservation list.
  1348. *
  1349. */
  1350. static ext4_grpblk_t
  1351. ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
  1352. ext4_group_t group, struct buffer_head *bitmap_bh,
  1353. ext4_grpblk_t grp_goal,
  1354. struct ext4_reserve_window_node * my_rsv,
  1355. unsigned long *count, int *errp)
  1356. {
  1357. ext4_fsblk_t group_first_block, group_last_block;
  1358. ext4_grpblk_t ret = 0;
  1359. int fatal;
  1360. unsigned long num = *count;
  1361. *errp = 0;
  1362. /*
  1363. * Make sure we use undo access for the bitmap, because it is critical
  1364. * that we do the frozen_data COW on bitmap buffers in all cases even
  1365. * if the buffer is in BJ_Forget state in the committing transaction.
  1366. */
  1367. BUFFER_TRACE(bitmap_bh, "get undo access for new block");
  1368. fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
  1369. if (fatal) {
  1370. *errp = fatal;
  1371. return -1;
  1372. }
  1373. /*
  1374. * we don't deal with reservation when
  1375. * filesystem is mounted without reservation
  1376. * or the file is not a regular file
  1377. * or last attempt to allocate a block with reservation turned on failed
  1378. */
  1379. if (my_rsv == NULL ) {
  1380. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1381. grp_goal, count, NULL);
  1382. goto out;
  1383. }
  1384. /*
  1385. * grp_goal is a group relative block number (if there is a goal)
  1386. * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
  1387. * first block is a filesystem wide block number
  1388. * first block is the block number of the first block in this group
  1389. */
  1390. group_first_block = ext4_group_first_block_no(sb, group);
  1391. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1392. /*
  1393. * Basically we will allocate a new block from inode's reservation
  1394. * window.
  1395. *
  1396. * We need to allocate a new reservation window, if:
  1397. * a) inode does not have a reservation window; or
  1398. * b) last attempt to allocate a block from existing reservation
  1399. * failed; or
  1400. * c) we come here with a goal and with a reservation window
  1401. *
  1402. * We do not need to allocate a new reservation window if we come here
  1403. * at the beginning with a goal and the goal is inside the window, or
  1404. * we don't have a goal but already have a reservation window.
  1405. * then we could go to allocate from the reservation window directly.
  1406. */
  1407. while (1) {
  1408. if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
  1409. !goal_in_my_reservation(&my_rsv->rsv_window,
  1410. grp_goal, group, sb)) {
  1411. if (my_rsv->rsv_goal_size < *count)
  1412. my_rsv->rsv_goal_size = *count;
  1413. ret = alloc_new_reservation(my_rsv, grp_goal, sb,
  1414. group, bitmap_bh);
  1415. if (ret < 0)
  1416. break; /* failed */
  1417. if (!goal_in_my_reservation(&my_rsv->rsv_window,
  1418. grp_goal, group, sb))
  1419. grp_goal = -1;
  1420. } else if (grp_goal >= 0) {
  1421. int curr = my_rsv->rsv_end -
  1422. (grp_goal + group_first_block) + 1;
  1423. if (curr < *count)
  1424. try_to_extend_reservation(my_rsv, sb,
  1425. *count - curr);
  1426. }
  1427. if ((my_rsv->rsv_start > group_last_block) ||
  1428. (my_rsv->rsv_end < group_first_block)) {
  1429. rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
  1430. BUG();
  1431. }
  1432. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1433. grp_goal, &num, &my_rsv->rsv_window);
  1434. if (ret >= 0) {
  1435. my_rsv->rsv_alloc_hit += num;
  1436. *count = num;
  1437. break; /* succeed */
  1438. }
  1439. num = *count;
  1440. }
  1441. out:
  1442. if (ret >= 0) {
  1443. BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
  1444. "bitmap block");
  1445. fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
  1446. if (fatal) {
  1447. *errp = fatal;
  1448. return -1;
  1449. }
  1450. return ret;
  1451. }
  1452. BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
  1453. ext4_journal_release_buffer(handle, bitmap_bh);
  1454. return ret;
  1455. }
  1456. /**
  1457. * ext4_has_free_blocks()
  1458. * @sbi: in-core super block structure.
  1459. *
  1460. * Check if filesystem has at least 1 free block available for allocation.
  1461. */
  1462. static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
  1463. {
  1464. ext4_fsblk_t free_blocks, root_blocks;
  1465. free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  1466. root_blocks = ext4_r_blocks_count(sbi->s_es);
  1467. if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
  1468. sbi->s_resuid != current->fsuid &&
  1469. (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
  1470. return 0;
  1471. }
  1472. return 1;
  1473. }
  1474. /**
  1475. * ext4_should_retry_alloc()
  1476. * @sb: super block
  1477. * @retries number of attemps has been made
  1478. *
  1479. * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
  1480. * it is profitable to retry the operation, this function will wait
  1481. * for the current or commiting transaction to complete, and then
  1482. * return TRUE.
  1483. *
  1484. * if the total number of retries exceed three times, return FALSE.
  1485. */
  1486. int ext4_should_retry_alloc(struct super_block *sb, int *retries)
  1487. {
  1488. if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
  1489. return 0;
  1490. jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
  1491. return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
  1492. }
  1493. /**
  1494. * ext4_new_blocks_old() -- core block(s) allocation function
  1495. * @handle: handle to this transaction
  1496. * @inode: file inode
  1497. * @goal: given target block(filesystem wide)
  1498. * @count: target number of blocks to allocate
  1499. * @errp: error code
  1500. *
  1501. * ext4_new_blocks uses a goal block to assist allocation. It tries to
  1502. * allocate block(s) from the block group contains the goal block first. If that
  1503. * fails, it will try to allocate block(s) from other block groups without
  1504. * any specific goal block.
  1505. *
  1506. */
  1507. ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
  1508. ext4_fsblk_t goal, unsigned long *count, int *errp)
  1509. {
  1510. struct buffer_head *bitmap_bh = NULL;
  1511. struct buffer_head *gdp_bh;
  1512. ext4_group_t group_no;
  1513. ext4_group_t goal_group;
  1514. ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */
  1515. ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
  1516. ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */
  1517. ext4_group_t bgi; /* blockgroup iteration index */
  1518. int fatal = 0, err;
  1519. int performed_allocation = 0;
  1520. ext4_grpblk_t free_blocks; /* number of free blocks in a group */
  1521. struct super_block *sb;
  1522. struct ext4_group_desc *gdp;
  1523. struct ext4_super_block *es;
  1524. struct ext4_sb_info *sbi;
  1525. struct ext4_reserve_window_node *my_rsv = NULL;
  1526. struct ext4_block_alloc_info *block_i;
  1527. unsigned short windowsz = 0;
  1528. ext4_group_t ngroups;
  1529. unsigned long num = *count;
  1530. *errp = -ENOSPC;
  1531. sb = inode->i_sb;
  1532. if (!sb) {
  1533. printk("ext4_new_block: nonexistent device");
  1534. return 0;
  1535. }
  1536. /*
  1537. * Check quota for allocation of this block.
  1538. */
  1539. if (DQUOT_ALLOC_BLOCK(inode, num)) {
  1540. *errp = -EDQUOT;
  1541. return 0;
  1542. }
  1543. sbi = EXT4_SB(sb);
  1544. es = EXT4_SB(sb)->s_es;
  1545. ext4_debug("goal=%llu.\n", goal);
  1546. /*
  1547. * Allocate a block from reservation only when
  1548. * filesystem is mounted with reservation(default,-o reservation), and
  1549. * it's a regular file, and
  1550. * the desired window size is greater than 0 (One could use ioctl
  1551. * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
  1552. * reservation on that particular file)
  1553. */
  1554. block_i = EXT4_I(inode)->i_block_alloc_info;
  1555. if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
  1556. my_rsv = &block_i->rsv_window_node;
  1557. if (!ext4_has_free_blocks(sbi)) {
  1558. *errp = -ENOSPC;
  1559. goto out;
  1560. }
  1561. /*
  1562. * First, test whether the goal block is free.
  1563. */
  1564. if (goal < le32_to_cpu(es->s_first_data_block) ||
  1565. goal >= ext4_blocks_count(es))
  1566. goal = le32_to_cpu(es->s_first_data_block);
  1567. ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
  1568. goal_group = group_no;
  1569. retry_alloc:
  1570. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1571. if (!gdp)
  1572. goto io_error;
  1573. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1574. /*
  1575. * if there is not enough free blocks to make a new resevation
  1576. * turn off reservation for this allocation
  1577. */
  1578. if (my_rsv && (free_blocks < windowsz)
  1579. && (rsv_is_empty(&my_rsv->rsv_window)))
  1580. my_rsv = NULL;
  1581. if (free_blocks > 0) {
  1582. bitmap_bh = read_block_bitmap(sb, group_no);
  1583. if (!bitmap_bh)
  1584. goto io_error;
  1585. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1586. group_no, bitmap_bh, grp_target_blk,
  1587. my_rsv, &num, &fatal);
  1588. if (fatal)
  1589. goto out;
  1590. if (grp_alloc_blk >= 0)
  1591. goto allocated;
  1592. }
  1593. ngroups = EXT4_SB(sb)->s_groups_count;
  1594. smp_rmb();
  1595. /*
  1596. * Now search the rest of the groups. We assume that
  1597. * group_no and gdp correctly point to the last group visited.
  1598. */
  1599. for (bgi = 0; bgi < ngroups; bgi++) {
  1600. group_no++;
  1601. if (group_no >= ngroups)
  1602. group_no = 0;
  1603. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1604. if (!gdp)
  1605. goto io_error;
  1606. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1607. /*
  1608. * skip this group if the number of
  1609. * free blocks is less than half of the reservation
  1610. * window size.
  1611. */
  1612. if (free_blocks <= (windowsz/2))
  1613. continue;
  1614. brelse(bitmap_bh);
  1615. bitmap_bh = read_block_bitmap(sb, group_no);
  1616. if (!bitmap_bh)
  1617. goto io_error;
  1618. /*
  1619. * try to allocate block(s) from this group, without a goal(-1).
  1620. */
  1621. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1622. group_no, bitmap_bh, -1, my_rsv,
  1623. &num, &fatal);
  1624. if (fatal)
  1625. goto out;
  1626. if (grp_alloc_blk >= 0)
  1627. goto allocated;
  1628. }
  1629. /*
  1630. * We may end up a bogus ealier ENOSPC error due to
  1631. * filesystem is "full" of reservations, but
  1632. * there maybe indeed free blocks avaliable on disk
  1633. * In this case, we just forget about the reservations
  1634. * just do block allocation as without reservations.
  1635. */
  1636. if (my_rsv) {
  1637. my_rsv = NULL;
  1638. windowsz = 0;
  1639. group_no = goal_group;
  1640. goto retry_alloc;
  1641. }
  1642. /* No space left on the device */
  1643. *errp = -ENOSPC;
  1644. goto out;
  1645. allocated:
  1646. ext4_debug("using block group %lu(%d)\n",
  1647. group_no, gdp->bg_free_blocks_count);
  1648. BUFFER_TRACE(gdp_bh, "get_write_access");
  1649. fatal = ext4_journal_get_write_access(handle, gdp_bh);
  1650. if (fatal)
  1651. goto out;
  1652. ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
  1653. if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
  1654. in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
  1655. in_range(ret_block, ext4_inode_table(sb, gdp),
  1656. EXT4_SB(sb)->s_itb_per_group) ||
  1657. in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
  1658. EXT4_SB(sb)->s_itb_per_group)) {
  1659. ext4_error(sb, "ext4_new_block",
  1660. "Allocating block in system zone - "
  1661. "blocks from %llu, length %lu",
  1662. ret_block, num);
  1663. /*
  1664. * claim_block marked the blocks we allocated
  1665. * as in use. So we may want to selectively
  1666. * mark some of the blocks as free
  1667. */
  1668. goto retry_alloc;
  1669. }
  1670. performed_allocation = 1;
  1671. #ifdef CONFIG_JBD2_DEBUG
  1672. {
  1673. struct buffer_head *debug_bh;
  1674. /* Record bitmap buffer state in the newly allocated block */
  1675. debug_bh = sb_find_get_block(sb, ret_block);
  1676. if (debug_bh) {
  1677. BUFFER_TRACE(debug_bh, "state when allocated");
  1678. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
  1679. brelse(debug_bh);
  1680. }
  1681. }
  1682. jbd_lock_bh_state(bitmap_bh);
  1683. spin_lock(sb_bgl_lock(sbi, group_no));
  1684. if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
  1685. int i;
  1686. for (i = 0; i < num; i++) {
  1687. if (ext4_test_bit(grp_alloc_blk+i,
  1688. bh2jh(bitmap_bh)->b_committed_data)) {
  1689. printk("%s: block was unexpectedly set in "
  1690. "b_committed_data\n", __func__);
  1691. }
  1692. }
  1693. }
  1694. ext4_debug("found bit %d\n", grp_alloc_blk);
  1695. spin_unlock(sb_bgl_lock(sbi, group_no));
  1696. jbd_unlock_bh_state(bitmap_bh);
  1697. #endif
  1698. if (ret_block + num - 1 >= ext4_blocks_count(es)) {
  1699. ext4_error(sb, "ext4_new_block",
  1700. "block(%llu) >= blocks count(%llu) - "
  1701. "block_group = %lu, es == %p ", ret_block,
  1702. ext4_blocks_count(es), group_no, es);
  1703. goto out;
  1704. }
  1705. /*
  1706. * It is up to the caller to add the new buffer to a journal
  1707. * list of some description. We don't know in advance whether
  1708. * the caller wants to use it as metadata or data.
  1709. */
  1710. spin_lock(sb_bgl_lock(sbi, group_no));
  1711. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
  1712. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  1713. le16_add_cpu(&gdp->bg_free_blocks_count, -num);
  1714. gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
  1715. spin_unlock(sb_bgl_lock(sbi, group_no));
  1716. percpu_counter_sub(&sbi->s_freeblocks_counter, num);
  1717. BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
  1718. err = ext4_journal_dirty_metadata(handle, gdp_bh);
  1719. if (!fatal)
  1720. fatal = err;
  1721. sb->s_dirt = 1;
  1722. if (fatal)
  1723. goto out;
  1724. *errp = 0;
  1725. brelse(bitmap_bh);
  1726. DQUOT_FREE_BLOCK(inode, *count-num);
  1727. *count = num;
  1728. return ret_block;
  1729. io_error:
  1730. *errp = -EIO;
  1731. out:
  1732. if (fatal) {
  1733. *errp = fatal;
  1734. ext4_std_error(sb, fatal);
  1735. }
  1736. /*
  1737. * Undo the block allocation
  1738. */
  1739. if (!performed_allocation)
  1740. DQUOT_FREE_BLOCK(inode, *count);
  1741. brelse(bitmap_bh);
  1742. return 0;
  1743. }
  1744. ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
  1745. ext4_fsblk_t goal, int *errp)
  1746. {
  1747. struct ext4_allocation_request ar;
  1748. ext4_fsblk_t ret;
  1749. if (!test_opt(inode->i_sb, MBALLOC)) {
  1750. unsigned long count = 1;
  1751. ret = ext4_new_blocks_old(handle, inode, goal, &count, errp);
  1752. return ret;
  1753. }
  1754. memset(&ar, 0, sizeof(ar));
  1755. ar.inode = inode;
  1756. ar.goal = goal;
  1757. ar.len = 1;
  1758. ret = ext4_mb_new_blocks(handle, &ar, errp);
  1759. return ret;
  1760. }
  1761. ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
  1762. ext4_fsblk_t goal, unsigned long *count, int *errp)
  1763. {
  1764. struct ext4_allocation_request ar;
  1765. ext4_fsblk_t ret;
  1766. if (!test_opt(inode->i_sb, MBALLOC)) {
  1767. ret = ext4_new_blocks_old(handle, inode, goal, count, errp);
  1768. return ret;
  1769. }
  1770. memset(&ar, 0, sizeof(ar));
  1771. ar.inode = inode;
  1772. ar.goal = goal;
  1773. ar.len = *count;
  1774. ret = ext4_mb_new_blocks(handle, &ar, errp);
  1775. *count = ar.len;
  1776. return ret;
  1777. }
  1778. /**
  1779. * ext4_count_free_blocks() -- count filesystem free blocks
  1780. * @sb: superblock
  1781. *
  1782. * Adds up the number of free blocks from each block group.
  1783. */
  1784. ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
  1785. {
  1786. ext4_fsblk_t desc_count;
  1787. struct ext4_group_desc *gdp;
  1788. ext4_group_t i;
  1789. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  1790. #ifdef EXT4FS_DEBUG
  1791. struct ext4_super_block *es;
  1792. ext4_fsblk_t bitmap_count;
  1793. unsigned long x;
  1794. struct buffer_head *bitmap_bh = NULL;
  1795. es = EXT4_SB(sb)->s_es;
  1796. desc_count = 0;
  1797. bitmap_count = 0;
  1798. gdp = NULL;
  1799. smp_rmb();
  1800. for (i = 0; i < ngroups; i++) {
  1801. gdp = ext4_get_group_desc(sb, i, NULL);
  1802. if (!gdp)
  1803. continue;
  1804. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1805. brelse(bitmap_bh);
  1806. bitmap_bh = read_block_bitmap(sb, i);
  1807. if (bitmap_bh == NULL)
  1808. continue;
  1809. x = ext4_count_free(bitmap_bh, sb->s_blocksize);
  1810. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  1811. i, le16_to_cpu(gdp->bg_free_blocks_count), x);
  1812. bitmap_count += x;
  1813. }
  1814. brelse(bitmap_bh);
  1815. printk("ext4_count_free_blocks: stored = %llu"
  1816. ", computed = %llu, %llu\n",
  1817. ext4_free_blocks_count(es),
  1818. desc_count, bitmap_count);
  1819. return bitmap_count;
  1820. #else
  1821. desc_count = 0;
  1822. smp_rmb();
  1823. for (i = 0; i < ngroups; i++) {
  1824. gdp = ext4_get_group_desc(sb, i, NULL);
  1825. if (!gdp)
  1826. continue;
  1827. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1828. }
  1829. return desc_count;
  1830. #endif
  1831. }
  1832. static inline int test_root(ext4_group_t a, int b)
  1833. {
  1834. int num = b;
  1835. while (a > num)
  1836. num *= b;
  1837. return num == a;
  1838. }
  1839. static int ext4_group_sparse(ext4_group_t group)
  1840. {
  1841. if (group <= 1)
  1842. return 1;
  1843. if (!(group & 1))
  1844. return 0;
  1845. return (test_root(group, 7) || test_root(group, 5) ||
  1846. test_root(group, 3));
  1847. }
  1848. /**
  1849. * ext4_bg_has_super - number of blocks used by the superblock in group
  1850. * @sb: superblock for filesystem
  1851. * @group: group number to check
  1852. *
  1853. * Return the number of blocks used by the superblock (primary or backup)
  1854. * in this group. Currently this will be only 0 or 1.
  1855. */
  1856. int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
  1857. {
  1858. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  1859. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
  1860. !ext4_group_sparse(group))
  1861. return 0;
  1862. return 1;
  1863. }
  1864. static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
  1865. ext4_group_t group)
  1866. {
  1867. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1868. ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
  1869. ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
  1870. if (group == first || group == first + 1 || group == last)
  1871. return 1;
  1872. return 0;
  1873. }
  1874. static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
  1875. ext4_group_t group)
  1876. {
  1877. return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
  1878. }
  1879. /**
  1880. * ext4_bg_num_gdb - number of blocks used by the group table in group
  1881. * @sb: superblock for filesystem
  1882. * @group: group number to check
  1883. *
  1884. * Return the number of blocks used by the group descriptor table
  1885. * (primary or backup) in this group. In the future there may be a
  1886. * different number of descriptor blocks in each group.
  1887. */
  1888. unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
  1889. {
  1890. unsigned long first_meta_bg =
  1891. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
  1892. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1893. if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
  1894. metagroup < first_meta_bg)
  1895. return ext4_bg_num_gdb_nometa(sb,group);
  1896. return ext4_bg_num_gdb_meta(sb,group);
  1897. }