balloc.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035
  1. /*
  2. * linux/fs/ext4/balloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
  10. * Big-endian to little-endian byte-swapping/bitmaps by
  11. * David S. Miller (davem@caip.rutgers.edu), 1995
  12. */
  13. #include <linux/time.h>
  14. #include <linux/capability.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/quotaops.h>
  18. #include <linux/buffer_head.h>
  19. #include "ext4.h"
  20. #include "ext4_jbd2.h"
  21. #include "group.h"
  22. /*
  23. * balloc.c contains the blocks allocation and deallocation routines
  24. */
  25. /*
  26. * Calculate the block group number and offset, given a block number
  27. */
  28. void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
  29. ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
  30. {
  31. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  32. ext4_grpblk_t offset;
  33. blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
  34. offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
  35. if (offsetp)
  36. *offsetp = offset;
  37. if (blockgrpp)
  38. *blockgrpp = blocknr;
  39. }
  40. /* Initializes an uninitialized block bitmap if given, and returns the
  41. * number of blocks free in the group. */
  42. unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
  43. ext4_group_t block_group, struct ext4_group_desc *gdp)
  44. {
  45. int bit, bit_max;
  46. unsigned free_blocks, group_blocks;
  47. struct ext4_sb_info *sbi = EXT4_SB(sb);
  48. if (bh) {
  49. J_ASSERT_BH(bh, buffer_locked(bh));
  50. /* If checksum is bad mark all blocks used to prevent allocation
  51. * essentially implementing a per-group read-only flag. */
  52. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  53. ext4_error(sb, __func__,
  54. "Checksum bad for group %lu\n", block_group);
  55. gdp->bg_free_blocks_count = 0;
  56. gdp->bg_free_inodes_count = 0;
  57. gdp->bg_itable_unused = 0;
  58. memset(bh->b_data, 0xff, sb->s_blocksize);
  59. return 0;
  60. }
  61. memset(bh->b_data, 0, sb->s_blocksize);
  62. }
  63. /* Check for superblock and gdt backups in this group */
  64. bit_max = ext4_bg_has_super(sb, block_group);
  65. if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
  66. block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
  67. sbi->s_desc_per_block) {
  68. if (bit_max) {
  69. bit_max += ext4_bg_num_gdb(sb, block_group);
  70. bit_max +=
  71. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
  72. }
  73. } else { /* For META_BG_BLOCK_GROUPS */
  74. int group_rel = (block_group -
  75. le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
  76. EXT4_DESC_PER_BLOCK(sb);
  77. if (group_rel == 0 || group_rel == 1 ||
  78. (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
  79. bit_max += 1;
  80. }
  81. if (block_group == sbi->s_groups_count - 1) {
  82. /*
  83. * Even though mke2fs always initialize first and last group
  84. * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
  85. * to make sure we calculate the right free blocks
  86. */
  87. group_blocks = ext4_blocks_count(sbi->s_es) -
  88. le32_to_cpu(sbi->s_es->s_first_data_block) -
  89. (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
  90. } else {
  91. group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
  92. }
  93. free_blocks = group_blocks - bit_max;
  94. if (bh) {
  95. ext4_fsblk_t start;
  96. for (bit = 0; bit < bit_max; bit++)
  97. ext4_set_bit(bit, bh->b_data);
  98. start = ext4_group_first_block_no(sb, block_group);
  99. /* Set bits for block and inode bitmaps, and inode table */
  100. ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
  101. ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
  102. for (bit = (ext4_inode_table(sb, gdp) - start),
  103. bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
  104. ext4_set_bit(bit, bh->b_data);
  105. /*
  106. * Also if the number of blocks within the group is
  107. * less than the blocksize * 8 ( which is the size
  108. * of bitmap ), set rest of the block bitmap to 1
  109. */
  110. mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
  111. }
  112. return free_blocks - sbi->s_itb_per_group - 2;
  113. }
  114. /*
  115. * The free blocks are managed by bitmaps. A file system contains several
  116. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  117. * block for inodes, N blocks for the inode table and data blocks.
  118. *
  119. * The file system contains group descriptors which are located after the
  120. * super block. Each descriptor contains the number of the bitmap block and
  121. * the free blocks count in the block. The descriptors are loaded in memory
  122. * when a file system is mounted (see ext4_fill_super).
  123. */
  124. #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
  125. /**
  126. * ext4_get_group_desc() -- load group descriptor from disk
  127. * @sb: super block
  128. * @block_group: given block group
  129. * @bh: pointer to the buffer head to store the block
  130. * group descriptor
  131. */
  132. struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
  133. ext4_group_t block_group,
  134. struct buffer_head ** bh)
  135. {
  136. unsigned long group_desc;
  137. unsigned long offset;
  138. struct ext4_group_desc * desc;
  139. struct ext4_sb_info *sbi = EXT4_SB(sb);
  140. if (block_group >= sbi->s_groups_count) {
  141. ext4_error (sb, "ext4_get_group_desc",
  142. "block_group >= groups_count - "
  143. "block_group = %lu, groups_count = %lu",
  144. block_group, sbi->s_groups_count);
  145. return NULL;
  146. }
  147. smp_rmb();
  148. group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
  149. offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
  150. if (!sbi->s_group_desc[group_desc]) {
  151. ext4_error (sb, "ext4_get_group_desc",
  152. "Group descriptor not loaded - "
  153. "block_group = %lu, group_desc = %lu, desc = %lu",
  154. block_group, group_desc, offset);
  155. return NULL;
  156. }
  157. desc = (struct ext4_group_desc *)(
  158. (__u8 *)sbi->s_group_desc[group_desc]->b_data +
  159. offset * EXT4_DESC_SIZE(sb));
  160. if (bh)
  161. *bh = sbi->s_group_desc[group_desc];
  162. return desc;
  163. }
  164. static int ext4_valid_block_bitmap(struct super_block *sb,
  165. struct ext4_group_desc *desc,
  166. unsigned int block_group,
  167. struct buffer_head *bh)
  168. {
  169. ext4_grpblk_t offset;
  170. ext4_grpblk_t next_zero_bit;
  171. ext4_fsblk_t bitmap_blk;
  172. ext4_fsblk_t group_first_block;
  173. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
  174. /* with FLEX_BG, the inode/block bitmaps and itable
  175. * blocks may not be in the group at all
  176. * so the bitmap validation will be skipped for those groups
  177. * or it has to also read the block group where the bitmaps
  178. * are located to verify they are set.
  179. */
  180. return 1;
  181. }
  182. group_first_block = ext4_group_first_block_no(sb, block_group);
  183. /* check whether block bitmap block number is set */
  184. bitmap_blk = ext4_block_bitmap(sb, desc);
  185. offset = bitmap_blk - group_first_block;
  186. if (!ext4_test_bit(offset, bh->b_data))
  187. /* bad block bitmap */
  188. goto err_out;
  189. /* check whether the inode bitmap block number is set */
  190. bitmap_blk = ext4_inode_bitmap(sb, desc);
  191. offset = bitmap_blk - group_first_block;
  192. if (!ext4_test_bit(offset, bh->b_data))
  193. /* bad block bitmap */
  194. goto err_out;
  195. /* check whether the inode table block number is set */
  196. bitmap_blk = ext4_inode_table(sb, desc);
  197. offset = bitmap_blk - group_first_block;
  198. next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
  199. offset + EXT4_SB(sb)->s_itb_per_group,
  200. offset);
  201. if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
  202. /* good bitmap for inode tables */
  203. return 1;
  204. err_out:
  205. ext4_error(sb, __func__,
  206. "Invalid block bitmap - "
  207. "block_group = %d, block = %llu",
  208. block_group, bitmap_blk);
  209. return 0;
  210. }
  211. /**
  212. * read_block_bitmap()
  213. * @sb: super block
  214. * @block_group: given block group
  215. *
  216. * Read the bitmap for a given block_group,and validate the
  217. * bits for block/inode/inode tables are set in the bitmaps
  218. *
  219. * Return buffer_head on success or NULL in case of failure.
  220. */
  221. struct buffer_head *
  222. read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
  223. {
  224. struct ext4_group_desc * desc;
  225. struct buffer_head * bh = NULL;
  226. ext4_fsblk_t bitmap_blk;
  227. desc = ext4_get_group_desc(sb, block_group, NULL);
  228. if (!desc)
  229. return NULL;
  230. bitmap_blk = ext4_block_bitmap(sb, desc);
  231. bh = sb_getblk(sb, bitmap_blk);
  232. if (unlikely(!bh)) {
  233. ext4_error(sb, __func__,
  234. "Cannot read block bitmap - "
  235. "block_group = %d, block_bitmap = %llu",
  236. (int)block_group, (unsigned long long)bitmap_blk);
  237. return NULL;
  238. }
  239. if (bh_uptodate_or_lock(bh))
  240. return bh;
  241. if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  242. ext4_init_block_bitmap(sb, bh, block_group, desc);
  243. set_buffer_uptodate(bh);
  244. unlock_buffer(bh);
  245. return bh;
  246. }
  247. if (bh_submit_read(bh) < 0) {
  248. put_bh(bh);
  249. ext4_error(sb, __func__,
  250. "Cannot read block bitmap - "
  251. "block_group = %d, block_bitmap = %llu",
  252. (int)block_group, (unsigned long long)bitmap_blk);
  253. return NULL;
  254. }
  255. if (!ext4_valid_block_bitmap(sb, desc, block_group, bh)) {
  256. put_bh(bh);
  257. return NULL;
  258. }
  259. return bh;
  260. }
  261. /*
  262. * The reservation window structure operations
  263. * --------------------------------------------
  264. * Operations include:
  265. * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
  266. *
  267. * We use a red-black tree to represent per-filesystem reservation
  268. * windows.
  269. *
  270. */
  271. /**
  272. * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
  273. * @rb_root: root of per-filesystem reservation rb tree
  274. * @verbose: verbose mode
  275. * @fn: function which wishes to dump the reservation map
  276. *
  277. * If verbose is turned on, it will print the whole block reservation
  278. * windows(start, end). Otherwise, it will only print out the "bad" windows,
  279. * those windows that overlap with their immediate neighbors.
  280. */
  281. #if 1
  282. static void __rsv_window_dump(struct rb_root *root, int verbose,
  283. const char *fn)
  284. {
  285. struct rb_node *n;
  286. struct ext4_reserve_window_node *rsv, *prev;
  287. int bad;
  288. restart:
  289. n = rb_first(root);
  290. bad = 0;
  291. prev = NULL;
  292. printk("Block Allocation Reservation Windows Map (%s):\n", fn);
  293. while (n) {
  294. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  295. if (verbose)
  296. printk("reservation window 0x%p "
  297. "start: %llu, end: %llu\n",
  298. rsv, rsv->rsv_start, rsv->rsv_end);
  299. if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
  300. printk("Bad reservation %p (start >= end)\n",
  301. rsv);
  302. bad = 1;
  303. }
  304. if (prev && prev->rsv_end >= rsv->rsv_start) {
  305. printk("Bad reservation %p (prev->end >= start)\n",
  306. rsv);
  307. bad = 1;
  308. }
  309. if (bad) {
  310. if (!verbose) {
  311. printk("Restarting reservation walk in verbose mode\n");
  312. verbose = 1;
  313. goto restart;
  314. }
  315. }
  316. n = rb_next(n);
  317. prev = rsv;
  318. }
  319. printk("Window map complete.\n");
  320. if (bad)
  321. BUG();
  322. }
  323. #define rsv_window_dump(root, verbose) \
  324. __rsv_window_dump((root), (verbose), __func__)
  325. #else
  326. #define rsv_window_dump(root, verbose) do {} while (0)
  327. #endif
  328. /**
  329. * goal_in_my_reservation()
  330. * @rsv: inode's reservation window
  331. * @grp_goal: given goal block relative to the allocation block group
  332. * @group: the current allocation block group
  333. * @sb: filesystem super block
  334. *
  335. * Test if the given goal block (group relative) is within the file's
  336. * own block reservation window range.
  337. *
  338. * If the reservation window is outside the goal allocation group, return 0;
  339. * grp_goal (given goal block) could be -1, which means no specific
  340. * goal block. In this case, always return 1.
  341. * If the goal block is within the reservation window, return 1;
  342. * otherwise, return 0;
  343. */
  344. static int
  345. goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
  346. ext4_group_t group, struct super_block *sb)
  347. {
  348. ext4_fsblk_t group_first_block, group_last_block;
  349. group_first_block = ext4_group_first_block_no(sb, group);
  350. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  351. if ((rsv->_rsv_start > group_last_block) ||
  352. (rsv->_rsv_end < group_first_block))
  353. return 0;
  354. if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
  355. || (grp_goal + group_first_block > rsv->_rsv_end)))
  356. return 0;
  357. return 1;
  358. }
  359. /**
  360. * search_reserve_window()
  361. * @rb_root: root of reservation tree
  362. * @goal: target allocation block
  363. *
  364. * Find the reserved window which includes the goal, or the previous one
  365. * if the goal is not in any window.
  366. * Returns NULL if there are no windows or if all windows start after the goal.
  367. */
  368. static struct ext4_reserve_window_node *
  369. search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
  370. {
  371. struct rb_node *n = root->rb_node;
  372. struct ext4_reserve_window_node *rsv;
  373. if (!n)
  374. return NULL;
  375. do {
  376. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  377. if (goal < rsv->rsv_start)
  378. n = n->rb_left;
  379. else if (goal > rsv->rsv_end)
  380. n = n->rb_right;
  381. else
  382. return rsv;
  383. } while (n);
  384. /*
  385. * We've fallen off the end of the tree: the goal wasn't inside
  386. * any particular node. OK, the previous node must be to one
  387. * side of the interval containing the goal. If it's the RHS,
  388. * we need to back up one.
  389. */
  390. if (rsv->rsv_start > goal) {
  391. n = rb_prev(&rsv->rsv_node);
  392. rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
  393. }
  394. return rsv;
  395. }
  396. /**
  397. * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
  398. * @sb: super block
  399. * @rsv: reservation window to add
  400. *
  401. * Must be called with rsv_lock hold.
  402. */
  403. void ext4_rsv_window_add(struct super_block *sb,
  404. struct ext4_reserve_window_node *rsv)
  405. {
  406. struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
  407. struct rb_node *node = &rsv->rsv_node;
  408. ext4_fsblk_t start = rsv->rsv_start;
  409. struct rb_node ** p = &root->rb_node;
  410. struct rb_node * parent = NULL;
  411. struct ext4_reserve_window_node *this;
  412. while (*p)
  413. {
  414. parent = *p;
  415. this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
  416. if (start < this->rsv_start)
  417. p = &(*p)->rb_left;
  418. else if (start > this->rsv_end)
  419. p = &(*p)->rb_right;
  420. else {
  421. rsv_window_dump(root, 1);
  422. BUG();
  423. }
  424. }
  425. rb_link_node(node, parent, p);
  426. rb_insert_color(node, root);
  427. }
  428. /**
  429. * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
  430. * @sb: super block
  431. * @rsv: reservation window to remove
  432. *
  433. * Mark the block reservation window as not allocated, and unlink it
  434. * from the filesystem reservation window rb tree. Must be called with
  435. * rsv_lock hold.
  436. */
  437. static void rsv_window_remove(struct super_block *sb,
  438. struct ext4_reserve_window_node *rsv)
  439. {
  440. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  441. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  442. rsv->rsv_alloc_hit = 0;
  443. rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
  444. }
  445. /*
  446. * rsv_is_empty() -- Check if the reservation window is allocated.
  447. * @rsv: given reservation window to check
  448. *
  449. * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
  450. */
  451. static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
  452. {
  453. /* a valid reservation end block could not be 0 */
  454. return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  455. }
  456. /**
  457. * ext4_init_block_alloc_info()
  458. * @inode: file inode structure
  459. *
  460. * Allocate and initialize the reservation window structure, and
  461. * link the window to the ext4 inode structure at last
  462. *
  463. * The reservation window structure is only dynamically allocated
  464. * and linked to ext4 inode the first time the open file
  465. * needs a new block. So, before every ext4_new_block(s) call, for
  466. * regular files, we should check whether the reservation window
  467. * structure exists or not. In the latter case, this function is called.
  468. * Fail to do so will result in block reservation being turned off for that
  469. * open file.
  470. *
  471. * This function is called from ext4_get_blocks_handle(), also called
  472. * when setting the reservation window size through ioctl before the file
  473. * is open for write (needs block allocation).
  474. *
  475. * Needs down_write(i_data_sem) protection prior to call this function.
  476. */
  477. void ext4_init_block_alloc_info(struct inode *inode)
  478. {
  479. struct ext4_inode_info *ei = EXT4_I(inode);
  480. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  481. struct super_block *sb = inode->i_sb;
  482. block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
  483. if (block_i) {
  484. struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
  485. rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  486. rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
  487. /*
  488. * if filesystem is mounted with NORESERVATION, the goal
  489. * reservation window size is set to zero to indicate
  490. * block reservation is off
  491. */
  492. if (!test_opt(sb, RESERVATION))
  493. rsv->rsv_goal_size = 0;
  494. else
  495. rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
  496. rsv->rsv_alloc_hit = 0;
  497. block_i->last_alloc_logical_block = 0;
  498. block_i->last_alloc_physical_block = 0;
  499. }
  500. ei->i_block_alloc_info = block_i;
  501. }
  502. /**
  503. * ext4_discard_reservation()
  504. * @inode: inode
  505. *
  506. * Discard(free) block reservation window on last file close, or truncate
  507. * or at last iput().
  508. *
  509. * It is being called in three cases:
  510. * ext4_release_file(): last writer close the file
  511. * ext4_clear_inode(): last iput(), when nobody link to this file.
  512. * ext4_truncate(): when the block indirect map is about to change.
  513. *
  514. */
  515. void ext4_discard_reservation(struct inode *inode)
  516. {
  517. struct ext4_inode_info *ei = EXT4_I(inode);
  518. struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
  519. struct ext4_reserve_window_node *rsv;
  520. spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
  521. ext4_mb_discard_inode_preallocations(inode);
  522. if (!block_i)
  523. return;
  524. rsv = &block_i->rsv_window_node;
  525. if (!rsv_is_empty(&rsv->rsv_window)) {
  526. spin_lock(rsv_lock);
  527. if (!rsv_is_empty(&rsv->rsv_window))
  528. rsv_window_remove(inode->i_sb, rsv);
  529. spin_unlock(rsv_lock);
  530. }
  531. }
  532. /**
  533. * ext4_free_blocks_sb() -- Free given blocks and update quota
  534. * @handle: handle to this transaction
  535. * @sb: super block
  536. * @block: start physcial block to free
  537. * @count: number of blocks to free
  538. * @pdquot_freed_blocks: pointer to quota
  539. */
  540. void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
  541. ext4_fsblk_t block, unsigned long count,
  542. unsigned long *pdquot_freed_blocks)
  543. {
  544. struct buffer_head *bitmap_bh = NULL;
  545. struct buffer_head *gd_bh;
  546. ext4_group_t block_group;
  547. ext4_grpblk_t bit;
  548. unsigned long i;
  549. unsigned long overflow;
  550. struct ext4_group_desc * desc;
  551. struct ext4_super_block * es;
  552. struct ext4_sb_info *sbi;
  553. int err = 0, ret;
  554. ext4_grpblk_t group_freed;
  555. *pdquot_freed_blocks = 0;
  556. sbi = EXT4_SB(sb);
  557. es = sbi->s_es;
  558. if (block < le32_to_cpu(es->s_first_data_block) ||
  559. block + count < block ||
  560. block + count > ext4_blocks_count(es)) {
  561. ext4_error (sb, "ext4_free_blocks",
  562. "Freeing blocks not in datazone - "
  563. "block = %llu, count = %lu", block, count);
  564. goto error_return;
  565. }
  566. ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
  567. do_more:
  568. overflow = 0;
  569. ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
  570. /*
  571. * Check to see if we are freeing blocks across a group
  572. * boundary.
  573. */
  574. if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
  575. overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
  576. count -= overflow;
  577. }
  578. brelse(bitmap_bh);
  579. bitmap_bh = read_block_bitmap(sb, block_group);
  580. if (!bitmap_bh)
  581. goto error_return;
  582. desc = ext4_get_group_desc (sb, block_group, &gd_bh);
  583. if (!desc)
  584. goto error_return;
  585. if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
  586. in_range(ext4_inode_bitmap(sb, desc), block, count) ||
  587. in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
  588. in_range(block + count - 1, ext4_inode_table(sb, desc),
  589. sbi->s_itb_per_group)) {
  590. ext4_error (sb, "ext4_free_blocks",
  591. "Freeing blocks in system zones - "
  592. "Block = %llu, count = %lu",
  593. block, count);
  594. goto error_return;
  595. }
  596. /*
  597. * We are about to start releasing blocks in the bitmap,
  598. * so we need undo access.
  599. */
  600. /* @@@ check errors */
  601. BUFFER_TRACE(bitmap_bh, "getting undo access");
  602. err = ext4_journal_get_undo_access(handle, bitmap_bh);
  603. if (err)
  604. goto error_return;
  605. /*
  606. * We are about to modify some metadata. Call the journal APIs
  607. * to unshare ->b_data if a currently-committing transaction is
  608. * using it
  609. */
  610. BUFFER_TRACE(gd_bh, "get_write_access");
  611. err = ext4_journal_get_write_access(handle, gd_bh);
  612. if (err)
  613. goto error_return;
  614. jbd_lock_bh_state(bitmap_bh);
  615. for (i = 0, group_freed = 0; i < count; i++) {
  616. /*
  617. * An HJ special. This is expensive...
  618. */
  619. #ifdef CONFIG_JBD2_DEBUG
  620. jbd_unlock_bh_state(bitmap_bh);
  621. {
  622. struct buffer_head *debug_bh;
  623. debug_bh = sb_find_get_block(sb, block + i);
  624. if (debug_bh) {
  625. BUFFER_TRACE(debug_bh, "Deleted!");
  626. if (!bh2jh(bitmap_bh)->b_committed_data)
  627. BUFFER_TRACE(debug_bh,
  628. "No commited data in bitmap");
  629. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
  630. __brelse(debug_bh);
  631. }
  632. }
  633. jbd_lock_bh_state(bitmap_bh);
  634. #endif
  635. if (need_resched()) {
  636. jbd_unlock_bh_state(bitmap_bh);
  637. cond_resched();
  638. jbd_lock_bh_state(bitmap_bh);
  639. }
  640. /* @@@ This prevents newly-allocated data from being
  641. * freed and then reallocated within the same
  642. * transaction.
  643. *
  644. * Ideally we would want to allow that to happen, but to
  645. * do so requires making jbd2_journal_forget() capable of
  646. * revoking the queued write of a data block, which
  647. * implies blocking on the journal lock. *forget()
  648. * cannot block due to truncate races.
  649. *
  650. * Eventually we can fix this by making jbd2_journal_forget()
  651. * return a status indicating whether or not it was able
  652. * to revoke the buffer. On successful revoke, it is
  653. * safe not to set the allocation bit in the committed
  654. * bitmap, because we know that there is no outstanding
  655. * activity on the buffer any more and so it is safe to
  656. * reallocate it.
  657. */
  658. BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
  659. J_ASSERT_BH(bitmap_bh,
  660. bh2jh(bitmap_bh)->b_committed_data != NULL);
  661. ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
  662. bh2jh(bitmap_bh)->b_committed_data);
  663. /*
  664. * We clear the bit in the bitmap after setting the committed
  665. * data bit, because this is the reverse order to that which
  666. * the allocator uses.
  667. */
  668. BUFFER_TRACE(bitmap_bh, "clear bit");
  669. if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
  670. bit + i, bitmap_bh->b_data)) {
  671. jbd_unlock_bh_state(bitmap_bh);
  672. ext4_error(sb, __func__,
  673. "bit already cleared for block %llu",
  674. (ext4_fsblk_t)(block + i));
  675. jbd_lock_bh_state(bitmap_bh);
  676. BUFFER_TRACE(bitmap_bh, "bit already cleared");
  677. } else {
  678. group_freed++;
  679. }
  680. }
  681. jbd_unlock_bh_state(bitmap_bh);
  682. spin_lock(sb_bgl_lock(sbi, block_group));
  683. le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
  684. desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
  685. spin_unlock(sb_bgl_lock(sbi, block_group));
  686. percpu_counter_add(&sbi->s_freeblocks_counter, count);
  687. /* We dirtied the bitmap block */
  688. BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
  689. err = ext4_journal_dirty_metadata(handle, bitmap_bh);
  690. /* And the group descriptor block */
  691. BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
  692. ret = ext4_journal_dirty_metadata(handle, gd_bh);
  693. if (!err) err = ret;
  694. *pdquot_freed_blocks += group_freed;
  695. if (overflow && !err) {
  696. block += count;
  697. count = overflow;
  698. goto do_more;
  699. }
  700. sb->s_dirt = 1;
  701. error_return:
  702. brelse(bitmap_bh);
  703. ext4_std_error(sb, err);
  704. return;
  705. }
  706. /**
  707. * ext4_free_blocks() -- Free given blocks and update quota
  708. * @handle: handle for this transaction
  709. * @inode: inode
  710. * @block: start physical block to free
  711. * @count: number of blocks to count
  712. * @metadata: Are these metadata blocks
  713. */
  714. void ext4_free_blocks(handle_t *handle, struct inode *inode,
  715. ext4_fsblk_t block, unsigned long count,
  716. int metadata)
  717. {
  718. struct super_block * sb;
  719. unsigned long dquot_freed_blocks;
  720. /* this isn't the right place to decide whether block is metadata
  721. * inode.c/extents.c knows better, but for safety ... */
  722. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
  723. ext4_should_journal_data(inode))
  724. metadata = 1;
  725. sb = inode->i_sb;
  726. if (!test_opt(sb, MBALLOC) || !EXT4_SB(sb)->s_group_info)
  727. ext4_free_blocks_sb(handle, sb, block, count,
  728. &dquot_freed_blocks);
  729. else
  730. ext4_mb_free_blocks(handle, inode, block, count,
  731. metadata, &dquot_freed_blocks);
  732. if (dquot_freed_blocks)
  733. DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
  734. return;
  735. }
  736. /**
  737. * ext4_test_allocatable()
  738. * @nr: given allocation block group
  739. * @bh: bufferhead contains the bitmap of the given block group
  740. *
  741. * For ext4 allocations, we must not reuse any blocks which are
  742. * allocated in the bitmap buffer's "last committed data" copy. This
  743. * prevents deletes from freeing up the page for reuse until we have
  744. * committed the delete transaction.
  745. *
  746. * If we didn't do this, then deleting something and reallocating it as
  747. * data would allow the old block to be overwritten before the
  748. * transaction committed (because we force data to disk before commit).
  749. * This would lead to corruption if we crashed between overwriting the
  750. * data and committing the delete.
  751. *
  752. * @@@ We may want to make this allocation behaviour conditional on
  753. * data-writes at some point, and disable it for metadata allocations or
  754. * sync-data inodes.
  755. */
  756. static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
  757. {
  758. int ret;
  759. struct journal_head *jh = bh2jh(bh);
  760. if (ext4_test_bit(nr, bh->b_data))
  761. return 0;
  762. jbd_lock_bh_state(bh);
  763. if (!jh->b_committed_data)
  764. ret = 1;
  765. else
  766. ret = !ext4_test_bit(nr, jh->b_committed_data);
  767. jbd_unlock_bh_state(bh);
  768. return ret;
  769. }
  770. /**
  771. * bitmap_search_next_usable_block()
  772. * @start: the starting block (group relative) of the search
  773. * @bh: bufferhead contains the block group bitmap
  774. * @maxblocks: the ending block (group relative) of the reservation
  775. *
  776. * The bitmap search --- search forward alternately through the actual
  777. * bitmap on disk and the last-committed copy in journal, until we find a
  778. * bit free in both bitmaps.
  779. */
  780. static ext4_grpblk_t
  781. bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  782. ext4_grpblk_t maxblocks)
  783. {
  784. ext4_grpblk_t next;
  785. struct journal_head *jh = bh2jh(bh);
  786. while (start < maxblocks) {
  787. next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
  788. if (next >= maxblocks)
  789. return -1;
  790. if (ext4_test_allocatable(next, bh))
  791. return next;
  792. jbd_lock_bh_state(bh);
  793. if (jh->b_committed_data)
  794. start = ext4_find_next_zero_bit(jh->b_committed_data,
  795. maxblocks, next);
  796. jbd_unlock_bh_state(bh);
  797. }
  798. return -1;
  799. }
  800. /**
  801. * find_next_usable_block()
  802. * @start: the starting block (group relative) to find next
  803. * allocatable block in bitmap.
  804. * @bh: bufferhead contains the block group bitmap
  805. * @maxblocks: the ending block (group relative) for the search
  806. *
  807. * Find an allocatable block in a bitmap. We honor both the bitmap and
  808. * its last-committed copy (if that exists), and perform the "most
  809. * appropriate allocation" algorithm of looking for a free block near
  810. * the initial goal; then for a free byte somewhere in the bitmap; then
  811. * for any free bit in the bitmap.
  812. */
  813. static ext4_grpblk_t
  814. find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
  815. ext4_grpblk_t maxblocks)
  816. {
  817. ext4_grpblk_t here, next;
  818. char *p, *r;
  819. if (start > 0) {
  820. /*
  821. * The goal was occupied; search forward for a free
  822. * block within the next XX blocks.
  823. *
  824. * end_goal is more or less random, but it has to be
  825. * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
  826. * next 64-bit boundary is simple..
  827. */
  828. ext4_grpblk_t end_goal = (start + 63) & ~63;
  829. if (end_goal > maxblocks)
  830. end_goal = maxblocks;
  831. here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
  832. if (here < end_goal && ext4_test_allocatable(here, bh))
  833. return here;
  834. ext4_debug("Bit not found near goal\n");
  835. }
  836. here = start;
  837. if (here < 0)
  838. here = 0;
  839. p = ((char *)bh->b_data) + (here >> 3);
  840. r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
  841. next = (r - ((char *)bh->b_data)) << 3;
  842. if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
  843. return next;
  844. /*
  845. * The bitmap search --- search forward alternately through the actual
  846. * bitmap and the last-committed copy until we find a bit free in
  847. * both
  848. */
  849. here = bitmap_search_next_usable_block(here, bh, maxblocks);
  850. return here;
  851. }
  852. /**
  853. * claim_block()
  854. * @block: the free block (group relative) to allocate
  855. * @bh: the bufferhead containts the block group bitmap
  856. *
  857. * We think we can allocate this block in this bitmap. Try to set the bit.
  858. * If that succeeds then check that nobody has allocated and then freed the
  859. * block since we saw that is was not marked in b_committed_data. If it _was_
  860. * allocated and freed then clear the bit in the bitmap again and return
  861. * zero (failure).
  862. */
  863. static inline int
  864. claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
  865. {
  866. struct journal_head *jh = bh2jh(bh);
  867. int ret;
  868. if (ext4_set_bit_atomic(lock, block, bh->b_data))
  869. return 0;
  870. jbd_lock_bh_state(bh);
  871. if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
  872. ext4_clear_bit_atomic(lock, block, bh->b_data);
  873. ret = 0;
  874. } else {
  875. ret = 1;
  876. }
  877. jbd_unlock_bh_state(bh);
  878. return ret;
  879. }
  880. /**
  881. * ext4_try_to_allocate()
  882. * @sb: superblock
  883. * @handle: handle to this transaction
  884. * @group: given allocation block group
  885. * @bitmap_bh: bufferhead holds the block bitmap
  886. * @grp_goal: given target block within the group
  887. * @count: target number of blocks to allocate
  888. * @my_rsv: reservation window
  889. *
  890. * Attempt to allocate blocks within a give range. Set the range of allocation
  891. * first, then find the first free bit(s) from the bitmap (within the range),
  892. * and at last, allocate the blocks by claiming the found free bit as allocated.
  893. *
  894. * To set the range of this allocation:
  895. * if there is a reservation window, only try to allocate block(s) from the
  896. * file's own reservation window;
  897. * Otherwise, the allocation range starts from the give goal block, ends at
  898. * the block group's last block.
  899. *
  900. * If we failed to allocate the desired block then we may end up crossing to a
  901. * new bitmap. In that case we must release write access to the old one via
  902. * ext4_journal_release_buffer(), else we'll run out of credits.
  903. */
  904. static ext4_grpblk_t
  905. ext4_try_to_allocate(struct super_block *sb, handle_t *handle,
  906. ext4_group_t group, struct buffer_head *bitmap_bh,
  907. ext4_grpblk_t grp_goal, unsigned long *count,
  908. struct ext4_reserve_window *my_rsv)
  909. {
  910. ext4_fsblk_t group_first_block;
  911. ext4_grpblk_t start, end;
  912. unsigned long num = 0;
  913. /* we do allocation within the reservation window if we have a window */
  914. if (my_rsv) {
  915. group_first_block = ext4_group_first_block_no(sb, group);
  916. if (my_rsv->_rsv_start >= group_first_block)
  917. start = my_rsv->_rsv_start - group_first_block;
  918. else
  919. /* reservation window cross group boundary */
  920. start = 0;
  921. end = my_rsv->_rsv_end - group_first_block + 1;
  922. if (end > EXT4_BLOCKS_PER_GROUP(sb))
  923. /* reservation window crosses group boundary */
  924. end = EXT4_BLOCKS_PER_GROUP(sb);
  925. if ((start <= grp_goal) && (grp_goal < end))
  926. start = grp_goal;
  927. else
  928. grp_goal = -1;
  929. } else {
  930. if (grp_goal > 0)
  931. start = grp_goal;
  932. else
  933. start = 0;
  934. end = EXT4_BLOCKS_PER_GROUP(sb);
  935. }
  936. BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
  937. repeat:
  938. if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
  939. grp_goal = find_next_usable_block(start, bitmap_bh, end);
  940. if (grp_goal < 0)
  941. goto fail_access;
  942. if (!my_rsv) {
  943. int i;
  944. for (i = 0; i < 7 && grp_goal > start &&
  945. ext4_test_allocatable(grp_goal - 1,
  946. bitmap_bh);
  947. i++, grp_goal--)
  948. ;
  949. }
  950. }
  951. start = grp_goal;
  952. if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  953. grp_goal, bitmap_bh)) {
  954. /*
  955. * The block was allocated by another thread, or it was
  956. * allocated and then freed by another thread
  957. */
  958. start++;
  959. grp_goal++;
  960. if (start >= end)
  961. goto fail_access;
  962. goto repeat;
  963. }
  964. num++;
  965. grp_goal++;
  966. while (num < *count && grp_goal < end
  967. && ext4_test_allocatable(grp_goal, bitmap_bh)
  968. && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
  969. grp_goal, bitmap_bh)) {
  970. num++;
  971. grp_goal++;
  972. }
  973. *count = num;
  974. return grp_goal - num;
  975. fail_access:
  976. *count = num;
  977. return -1;
  978. }
  979. /**
  980. * find_next_reservable_window():
  981. * find a reservable space within the given range.
  982. * It does not allocate the reservation window for now:
  983. * alloc_new_reservation() will do the work later.
  984. *
  985. * @search_head: the head of the searching list;
  986. * This is not necessarily the list head of the whole filesystem
  987. *
  988. * We have both head and start_block to assist the search
  989. * for the reservable space. The list starts from head,
  990. * but we will shift to the place where start_block is,
  991. * then start from there, when looking for a reservable space.
  992. *
  993. * @size: the target new reservation window size
  994. *
  995. * @group_first_block: the first block we consider to start
  996. * the real search from
  997. *
  998. * @last_block:
  999. * the maximum block number that our goal reservable space
  1000. * could start from. This is normally the last block in this
  1001. * group. The search will end when we found the start of next
  1002. * possible reservable space is out of this boundary.
  1003. * This could handle the cross boundary reservation window
  1004. * request.
  1005. *
  1006. * basically we search from the given range, rather than the whole
  1007. * reservation double linked list, (start_block, last_block)
  1008. * to find a free region that is of my size and has not
  1009. * been reserved.
  1010. *
  1011. */
  1012. static int find_next_reservable_window(
  1013. struct ext4_reserve_window_node *search_head,
  1014. struct ext4_reserve_window_node *my_rsv,
  1015. struct super_block * sb,
  1016. ext4_fsblk_t start_block,
  1017. ext4_fsblk_t last_block)
  1018. {
  1019. struct rb_node *next;
  1020. struct ext4_reserve_window_node *rsv, *prev;
  1021. ext4_fsblk_t cur;
  1022. int size = my_rsv->rsv_goal_size;
  1023. /* TODO: make the start of the reservation window byte-aligned */
  1024. /* cur = *start_block & ~7;*/
  1025. cur = start_block;
  1026. rsv = search_head;
  1027. if (!rsv)
  1028. return -1;
  1029. while (1) {
  1030. if (cur <= rsv->rsv_end)
  1031. cur = rsv->rsv_end + 1;
  1032. /* TODO?
  1033. * in the case we could not find a reservable space
  1034. * that is what is expected, during the re-search, we could
  1035. * remember what's the largest reservable space we could have
  1036. * and return that one.
  1037. *
  1038. * For now it will fail if we could not find the reservable
  1039. * space with expected-size (or more)...
  1040. */
  1041. if (cur > last_block)
  1042. return -1; /* fail */
  1043. prev = rsv;
  1044. next = rb_next(&rsv->rsv_node);
  1045. rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
  1046. /*
  1047. * Reached the last reservation, we can just append to the
  1048. * previous one.
  1049. */
  1050. if (!next)
  1051. break;
  1052. if (cur + size <= rsv->rsv_start) {
  1053. /*
  1054. * Found a reserveable space big enough. We could
  1055. * have a reservation across the group boundary here
  1056. */
  1057. break;
  1058. }
  1059. }
  1060. /*
  1061. * we come here either :
  1062. * when we reach the end of the whole list,
  1063. * and there is empty reservable space after last entry in the list.
  1064. * append it to the end of the list.
  1065. *
  1066. * or we found one reservable space in the middle of the list,
  1067. * return the reservation window that we could append to.
  1068. * succeed.
  1069. */
  1070. if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
  1071. rsv_window_remove(sb, my_rsv);
  1072. /*
  1073. * Let's book the whole avaliable window for now. We will check the
  1074. * disk bitmap later and then, if there are free blocks then we adjust
  1075. * the window size if it's larger than requested.
  1076. * Otherwise, we will remove this node from the tree next time
  1077. * call find_next_reservable_window.
  1078. */
  1079. my_rsv->rsv_start = cur;
  1080. my_rsv->rsv_end = cur + size - 1;
  1081. my_rsv->rsv_alloc_hit = 0;
  1082. if (prev != my_rsv)
  1083. ext4_rsv_window_add(sb, my_rsv);
  1084. return 0;
  1085. }
  1086. /**
  1087. * alloc_new_reservation()--allocate a new reservation window
  1088. *
  1089. * To make a new reservation, we search part of the filesystem
  1090. * reservation list (the list that inside the group). We try to
  1091. * allocate a new reservation window near the allocation goal,
  1092. * or the beginning of the group, if there is no goal.
  1093. *
  1094. * We first find a reservable space after the goal, then from
  1095. * there, we check the bitmap for the first free block after
  1096. * it. If there is no free block until the end of group, then the
  1097. * whole group is full, we failed. Otherwise, check if the free
  1098. * block is inside the expected reservable space, if so, we
  1099. * succeed.
  1100. * If the first free block is outside the reservable space, then
  1101. * start from the first free block, we search for next available
  1102. * space, and go on.
  1103. *
  1104. * on succeed, a new reservation will be found and inserted into the list
  1105. * It contains at least one free block, and it does not overlap with other
  1106. * reservation windows.
  1107. *
  1108. * failed: we failed to find a reservation window in this group
  1109. *
  1110. * @rsv: the reservation
  1111. *
  1112. * @grp_goal: The goal (group-relative). It is where the search for a
  1113. * free reservable space should start from.
  1114. * if we have a grp_goal(grp_goal >0 ), then start from there,
  1115. * no grp_goal(grp_goal = -1), we start from the first block
  1116. * of the group.
  1117. *
  1118. * @sb: the super block
  1119. * @group: the group we are trying to allocate in
  1120. * @bitmap_bh: the block group block bitmap
  1121. *
  1122. */
  1123. static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
  1124. ext4_grpblk_t grp_goal, struct super_block *sb,
  1125. ext4_group_t group, struct buffer_head *bitmap_bh)
  1126. {
  1127. struct ext4_reserve_window_node *search_head;
  1128. ext4_fsblk_t group_first_block, group_end_block, start_block;
  1129. ext4_grpblk_t first_free_block;
  1130. struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
  1131. unsigned long size;
  1132. int ret;
  1133. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1134. group_first_block = ext4_group_first_block_no(sb, group);
  1135. group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1136. if (grp_goal < 0)
  1137. start_block = group_first_block;
  1138. else
  1139. start_block = grp_goal + group_first_block;
  1140. size = my_rsv->rsv_goal_size;
  1141. if (!rsv_is_empty(&my_rsv->rsv_window)) {
  1142. /*
  1143. * if the old reservation is cross group boundary
  1144. * and if the goal is inside the old reservation window,
  1145. * we will come here when we just failed to allocate from
  1146. * the first part of the window. We still have another part
  1147. * that belongs to the next group. In this case, there is no
  1148. * point to discard our window and try to allocate a new one
  1149. * in this group(which will fail). we should
  1150. * keep the reservation window, just simply move on.
  1151. *
  1152. * Maybe we could shift the start block of the reservation
  1153. * window to the first block of next group.
  1154. */
  1155. if ((my_rsv->rsv_start <= group_end_block) &&
  1156. (my_rsv->rsv_end > group_end_block) &&
  1157. (start_block >= my_rsv->rsv_start))
  1158. return -1;
  1159. if ((my_rsv->rsv_alloc_hit >
  1160. (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
  1161. /*
  1162. * if the previously allocation hit ratio is
  1163. * greater than 1/2, then we double the size of
  1164. * the reservation window the next time,
  1165. * otherwise we keep the same size window
  1166. */
  1167. size = size * 2;
  1168. if (size > EXT4_MAX_RESERVE_BLOCKS)
  1169. size = EXT4_MAX_RESERVE_BLOCKS;
  1170. my_rsv->rsv_goal_size= size;
  1171. }
  1172. }
  1173. spin_lock(rsv_lock);
  1174. /*
  1175. * shift the search start to the window near the goal block
  1176. */
  1177. search_head = search_reserve_window(fs_rsv_root, start_block);
  1178. /*
  1179. * find_next_reservable_window() simply finds a reservable window
  1180. * inside the given range(start_block, group_end_block).
  1181. *
  1182. * To make sure the reservation window has a free bit inside it, we
  1183. * need to check the bitmap after we found a reservable window.
  1184. */
  1185. retry:
  1186. ret = find_next_reservable_window(search_head, my_rsv, sb,
  1187. start_block, group_end_block);
  1188. if (ret == -1) {
  1189. if (!rsv_is_empty(&my_rsv->rsv_window))
  1190. rsv_window_remove(sb, my_rsv);
  1191. spin_unlock(rsv_lock);
  1192. return -1;
  1193. }
  1194. /*
  1195. * On success, find_next_reservable_window() returns the
  1196. * reservation window where there is a reservable space after it.
  1197. * Before we reserve this reservable space, we need
  1198. * to make sure there is at least a free block inside this region.
  1199. *
  1200. * searching the first free bit on the block bitmap and copy of
  1201. * last committed bitmap alternatively, until we found a allocatable
  1202. * block. Search start from the start block of the reservable space
  1203. * we just found.
  1204. */
  1205. spin_unlock(rsv_lock);
  1206. first_free_block = bitmap_search_next_usable_block(
  1207. my_rsv->rsv_start - group_first_block,
  1208. bitmap_bh, group_end_block - group_first_block + 1);
  1209. if (first_free_block < 0) {
  1210. /*
  1211. * no free block left on the bitmap, no point
  1212. * to reserve the space. return failed.
  1213. */
  1214. spin_lock(rsv_lock);
  1215. if (!rsv_is_empty(&my_rsv->rsv_window))
  1216. rsv_window_remove(sb, my_rsv);
  1217. spin_unlock(rsv_lock);
  1218. return -1; /* failed */
  1219. }
  1220. start_block = first_free_block + group_first_block;
  1221. /*
  1222. * check if the first free block is within the
  1223. * free space we just reserved
  1224. */
  1225. if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
  1226. return 0; /* success */
  1227. /*
  1228. * if the first free bit we found is out of the reservable space
  1229. * continue search for next reservable space,
  1230. * start from where the free block is,
  1231. * we also shift the list head to where we stopped last time
  1232. */
  1233. search_head = my_rsv;
  1234. spin_lock(rsv_lock);
  1235. goto retry;
  1236. }
  1237. /**
  1238. * try_to_extend_reservation()
  1239. * @my_rsv: given reservation window
  1240. * @sb: super block
  1241. * @size: the delta to extend
  1242. *
  1243. * Attempt to expand the reservation window large enough to have
  1244. * required number of free blocks
  1245. *
  1246. * Since ext4_try_to_allocate() will always allocate blocks within
  1247. * the reservation window range, if the window size is too small,
  1248. * multiple blocks allocation has to stop at the end of the reservation
  1249. * window. To make this more efficient, given the total number of
  1250. * blocks needed and the current size of the window, we try to
  1251. * expand the reservation window size if necessary on a best-effort
  1252. * basis before ext4_new_blocks() tries to allocate blocks,
  1253. */
  1254. static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
  1255. struct super_block *sb, int size)
  1256. {
  1257. struct ext4_reserve_window_node *next_rsv;
  1258. struct rb_node *next;
  1259. spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
  1260. if (!spin_trylock(rsv_lock))
  1261. return;
  1262. next = rb_next(&my_rsv->rsv_node);
  1263. if (!next)
  1264. my_rsv->rsv_end += size;
  1265. else {
  1266. next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
  1267. if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
  1268. my_rsv->rsv_end += size;
  1269. else
  1270. my_rsv->rsv_end = next_rsv->rsv_start - 1;
  1271. }
  1272. spin_unlock(rsv_lock);
  1273. }
  1274. /**
  1275. * ext4_try_to_allocate_with_rsv()
  1276. * @sb: superblock
  1277. * @handle: handle to this transaction
  1278. * @group: given allocation block group
  1279. * @bitmap_bh: bufferhead holds the block bitmap
  1280. * @grp_goal: given target block within the group
  1281. * @count: target number of blocks to allocate
  1282. * @my_rsv: reservation window
  1283. * @errp: pointer to store the error code
  1284. *
  1285. * This is the main function used to allocate a new block and its reservation
  1286. * window.
  1287. *
  1288. * Each time when a new block allocation is need, first try to allocate from
  1289. * its own reservation. If it does not have a reservation window, instead of
  1290. * looking for a free bit on bitmap first, then look up the reservation list to
  1291. * see if it is inside somebody else's reservation window, we try to allocate a
  1292. * reservation window for it starting from the goal first. Then do the block
  1293. * allocation within the reservation window.
  1294. *
  1295. * This will avoid keeping on searching the reservation list again and
  1296. * again when somebody is looking for a free block (without
  1297. * reservation), and there are lots of free blocks, but they are all
  1298. * being reserved.
  1299. *
  1300. * We use a red-black tree for the per-filesystem reservation list.
  1301. *
  1302. */
  1303. static ext4_grpblk_t
  1304. ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
  1305. ext4_group_t group, struct buffer_head *bitmap_bh,
  1306. ext4_grpblk_t grp_goal,
  1307. struct ext4_reserve_window_node * my_rsv,
  1308. unsigned long *count, int *errp)
  1309. {
  1310. ext4_fsblk_t group_first_block, group_last_block;
  1311. ext4_grpblk_t ret = 0;
  1312. int fatal;
  1313. unsigned long num = *count;
  1314. *errp = 0;
  1315. /*
  1316. * Make sure we use undo access for the bitmap, because it is critical
  1317. * that we do the frozen_data COW on bitmap buffers in all cases even
  1318. * if the buffer is in BJ_Forget state in the committing transaction.
  1319. */
  1320. BUFFER_TRACE(bitmap_bh, "get undo access for new block");
  1321. fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
  1322. if (fatal) {
  1323. *errp = fatal;
  1324. return -1;
  1325. }
  1326. /*
  1327. * we don't deal with reservation when
  1328. * filesystem is mounted without reservation
  1329. * or the file is not a regular file
  1330. * or last attempt to allocate a block with reservation turned on failed
  1331. */
  1332. if (my_rsv == NULL ) {
  1333. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1334. grp_goal, count, NULL);
  1335. goto out;
  1336. }
  1337. /*
  1338. * grp_goal is a group relative block number (if there is a goal)
  1339. * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
  1340. * first block is a filesystem wide block number
  1341. * first block is the block number of the first block in this group
  1342. */
  1343. group_first_block = ext4_group_first_block_no(sb, group);
  1344. group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
  1345. /*
  1346. * Basically we will allocate a new block from inode's reservation
  1347. * window.
  1348. *
  1349. * We need to allocate a new reservation window, if:
  1350. * a) inode does not have a reservation window; or
  1351. * b) last attempt to allocate a block from existing reservation
  1352. * failed; or
  1353. * c) we come here with a goal and with a reservation window
  1354. *
  1355. * We do not need to allocate a new reservation window if we come here
  1356. * at the beginning with a goal and the goal is inside the window, or
  1357. * we don't have a goal but already have a reservation window.
  1358. * then we could go to allocate from the reservation window directly.
  1359. */
  1360. while (1) {
  1361. if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
  1362. !goal_in_my_reservation(&my_rsv->rsv_window,
  1363. grp_goal, group, sb)) {
  1364. if (my_rsv->rsv_goal_size < *count)
  1365. my_rsv->rsv_goal_size = *count;
  1366. ret = alloc_new_reservation(my_rsv, grp_goal, sb,
  1367. group, bitmap_bh);
  1368. if (ret < 0)
  1369. break; /* failed */
  1370. if (!goal_in_my_reservation(&my_rsv->rsv_window,
  1371. grp_goal, group, sb))
  1372. grp_goal = -1;
  1373. } else if (grp_goal >= 0) {
  1374. int curr = my_rsv->rsv_end -
  1375. (grp_goal + group_first_block) + 1;
  1376. if (curr < *count)
  1377. try_to_extend_reservation(my_rsv, sb,
  1378. *count - curr);
  1379. }
  1380. if ((my_rsv->rsv_start > group_last_block) ||
  1381. (my_rsv->rsv_end < group_first_block)) {
  1382. rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
  1383. BUG();
  1384. }
  1385. ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
  1386. grp_goal, &num, &my_rsv->rsv_window);
  1387. if (ret >= 0) {
  1388. my_rsv->rsv_alloc_hit += num;
  1389. *count = num;
  1390. break; /* succeed */
  1391. }
  1392. num = *count;
  1393. }
  1394. out:
  1395. if (ret >= 0) {
  1396. BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
  1397. "bitmap block");
  1398. fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
  1399. if (fatal) {
  1400. *errp = fatal;
  1401. return -1;
  1402. }
  1403. return ret;
  1404. }
  1405. BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
  1406. ext4_journal_release_buffer(handle, bitmap_bh);
  1407. return ret;
  1408. }
  1409. /**
  1410. * ext4_has_free_blocks()
  1411. * @sbi: in-core super block structure.
  1412. *
  1413. * Check if filesystem has at least 1 free block available for allocation.
  1414. */
  1415. static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
  1416. {
  1417. ext4_fsblk_t free_blocks, root_blocks;
  1418. free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  1419. root_blocks = ext4_r_blocks_count(sbi->s_es);
  1420. if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
  1421. sbi->s_resuid != current->fsuid &&
  1422. (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
  1423. return 0;
  1424. }
  1425. return 1;
  1426. }
  1427. /**
  1428. * ext4_should_retry_alloc()
  1429. * @sb: super block
  1430. * @retries number of attemps has been made
  1431. *
  1432. * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
  1433. * it is profitable to retry the operation, this function will wait
  1434. * for the current or commiting transaction to complete, and then
  1435. * return TRUE.
  1436. *
  1437. * if the total number of retries exceed three times, return FALSE.
  1438. */
  1439. int ext4_should_retry_alloc(struct super_block *sb, int *retries)
  1440. {
  1441. if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
  1442. return 0;
  1443. jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
  1444. return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
  1445. }
  1446. /**
  1447. * ext4_new_blocks_old() -- core block(s) allocation function
  1448. * @handle: handle to this transaction
  1449. * @inode: file inode
  1450. * @goal: given target block(filesystem wide)
  1451. * @count: target number of blocks to allocate
  1452. * @errp: error code
  1453. *
  1454. * ext4_new_blocks uses a goal block to assist allocation. It tries to
  1455. * allocate block(s) from the block group contains the goal block first. If that
  1456. * fails, it will try to allocate block(s) from other block groups without
  1457. * any specific goal block.
  1458. *
  1459. */
  1460. ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
  1461. ext4_fsblk_t goal, unsigned long *count, int *errp)
  1462. {
  1463. struct buffer_head *bitmap_bh = NULL;
  1464. struct buffer_head *gdp_bh;
  1465. ext4_group_t group_no;
  1466. ext4_group_t goal_group;
  1467. ext4_grpblk_t grp_target_blk; /* blockgroup relative goal block */
  1468. ext4_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
  1469. ext4_fsblk_t ret_block; /* filesyetem-wide allocated block */
  1470. ext4_group_t bgi; /* blockgroup iteration index */
  1471. int fatal = 0, err;
  1472. int performed_allocation = 0;
  1473. ext4_grpblk_t free_blocks; /* number of free blocks in a group */
  1474. struct super_block *sb;
  1475. struct ext4_group_desc *gdp;
  1476. struct ext4_super_block *es;
  1477. struct ext4_sb_info *sbi;
  1478. struct ext4_reserve_window_node *my_rsv = NULL;
  1479. struct ext4_block_alloc_info *block_i;
  1480. unsigned short windowsz = 0;
  1481. ext4_group_t ngroups;
  1482. unsigned long num = *count;
  1483. *errp = -ENOSPC;
  1484. sb = inode->i_sb;
  1485. if (!sb) {
  1486. printk("ext4_new_block: nonexistent device");
  1487. return 0;
  1488. }
  1489. /*
  1490. * Check quota for allocation of this block.
  1491. */
  1492. if (DQUOT_ALLOC_BLOCK(inode, num)) {
  1493. *errp = -EDQUOT;
  1494. return 0;
  1495. }
  1496. sbi = EXT4_SB(sb);
  1497. es = EXT4_SB(sb)->s_es;
  1498. ext4_debug("goal=%llu.\n", goal);
  1499. /*
  1500. * Allocate a block from reservation only when
  1501. * filesystem is mounted with reservation(default,-o reservation), and
  1502. * it's a regular file, and
  1503. * the desired window size is greater than 0 (One could use ioctl
  1504. * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
  1505. * reservation on that particular file)
  1506. */
  1507. block_i = EXT4_I(inode)->i_block_alloc_info;
  1508. if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
  1509. my_rsv = &block_i->rsv_window_node;
  1510. if (!ext4_has_free_blocks(sbi)) {
  1511. *errp = -ENOSPC;
  1512. goto out;
  1513. }
  1514. /*
  1515. * First, test whether the goal block is free.
  1516. */
  1517. if (goal < le32_to_cpu(es->s_first_data_block) ||
  1518. goal >= ext4_blocks_count(es))
  1519. goal = le32_to_cpu(es->s_first_data_block);
  1520. ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
  1521. goal_group = group_no;
  1522. retry_alloc:
  1523. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1524. if (!gdp)
  1525. goto io_error;
  1526. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1527. /*
  1528. * if there is not enough free blocks to make a new resevation
  1529. * turn off reservation for this allocation
  1530. */
  1531. if (my_rsv && (free_blocks < windowsz)
  1532. && (rsv_is_empty(&my_rsv->rsv_window)))
  1533. my_rsv = NULL;
  1534. if (free_blocks > 0) {
  1535. bitmap_bh = read_block_bitmap(sb, group_no);
  1536. if (!bitmap_bh)
  1537. goto io_error;
  1538. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1539. group_no, bitmap_bh, grp_target_blk,
  1540. my_rsv, &num, &fatal);
  1541. if (fatal)
  1542. goto out;
  1543. if (grp_alloc_blk >= 0)
  1544. goto allocated;
  1545. }
  1546. ngroups = EXT4_SB(sb)->s_groups_count;
  1547. smp_rmb();
  1548. /*
  1549. * Now search the rest of the groups. We assume that
  1550. * group_no and gdp correctly point to the last group visited.
  1551. */
  1552. for (bgi = 0; bgi < ngroups; bgi++) {
  1553. group_no++;
  1554. if (group_no >= ngroups)
  1555. group_no = 0;
  1556. gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
  1557. if (!gdp)
  1558. goto io_error;
  1559. free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
  1560. /*
  1561. * skip this group if the number of
  1562. * free blocks is less than half of the reservation
  1563. * window size.
  1564. */
  1565. if (free_blocks <= (windowsz/2))
  1566. continue;
  1567. brelse(bitmap_bh);
  1568. bitmap_bh = read_block_bitmap(sb, group_no);
  1569. if (!bitmap_bh)
  1570. goto io_error;
  1571. /*
  1572. * try to allocate block(s) from this group, without a goal(-1).
  1573. */
  1574. grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
  1575. group_no, bitmap_bh, -1, my_rsv,
  1576. &num, &fatal);
  1577. if (fatal)
  1578. goto out;
  1579. if (grp_alloc_blk >= 0)
  1580. goto allocated;
  1581. }
  1582. /*
  1583. * We may end up a bogus ealier ENOSPC error due to
  1584. * filesystem is "full" of reservations, but
  1585. * there maybe indeed free blocks avaliable on disk
  1586. * In this case, we just forget about the reservations
  1587. * just do block allocation as without reservations.
  1588. */
  1589. if (my_rsv) {
  1590. my_rsv = NULL;
  1591. windowsz = 0;
  1592. group_no = goal_group;
  1593. goto retry_alloc;
  1594. }
  1595. /* No space left on the device */
  1596. *errp = -ENOSPC;
  1597. goto out;
  1598. allocated:
  1599. ext4_debug("using block group %lu(%d)\n",
  1600. group_no, gdp->bg_free_blocks_count);
  1601. BUFFER_TRACE(gdp_bh, "get_write_access");
  1602. fatal = ext4_journal_get_write_access(handle, gdp_bh);
  1603. if (fatal)
  1604. goto out;
  1605. ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
  1606. if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
  1607. in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
  1608. in_range(ret_block, ext4_inode_table(sb, gdp),
  1609. EXT4_SB(sb)->s_itb_per_group) ||
  1610. in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
  1611. EXT4_SB(sb)->s_itb_per_group)) {
  1612. ext4_error(sb, "ext4_new_block",
  1613. "Allocating block in system zone - "
  1614. "blocks from %llu, length %lu",
  1615. ret_block, num);
  1616. goto out;
  1617. }
  1618. performed_allocation = 1;
  1619. #ifdef CONFIG_JBD2_DEBUG
  1620. {
  1621. struct buffer_head *debug_bh;
  1622. /* Record bitmap buffer state in the newly allocated block */
  1623. debug_bh = sb_find_get_block(sb, ret_block);
  1624. if (debug_bh) {
  1625. BUFFER_TRACE(debug_bh, "state when allocated");
  1626. BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
  1627. brelse(debug_bh);
  1628. }
  1629. }
  1630. jbd_lock_bh_state(bitmap_bh);
  1631. spin_lock(sb_bgl_lock(sbi, group_no));
  1632. if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
  1633. int i;
  1634. for (i = 0; i < num; i++) {
  1635. if (ext4_test_bit(grp_alloc_blk+i,
  1636. bh2jh(bitmap_bh)->b_committed_data)) {
  1637. printk("%s: block was unexpectedly set in "
  1638. "b_committed_data\n", __func__);
  1639. }
  1640. }
  1641. }
  1642. ext4_debug("found bit %d\n", grp_alloc_blk);
  1643. spin_unlock(sb_bgl_lock(sbi, group_no));
  1644. jbd_unlock_bh_state(bitmap_bh);
  1645. #endif
  1646. if (ret_block + num - 1 >= ext4_blocks_count(es)) {
  1647. ext4_error(sb, "ext4_new_block",
  1648. "block(%llu) >= blocks count(%llu) - "
  1649. "block_group = %lu, es == %p ", ret_block,
  1650. ext4_blocks_count(es), group_no, es);
  1651. goto out;
  1652. }
  1653. /*
  1654. * It is up to the caller to add the new buffer to a journal
  1655. * list of some description. We don't know in advance whether
  1656. * the caller wants to use it as metadata or data.
  1657. */
  1658. spin_lock(sb_bgl_lock(sbi, group_no));
  1659. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
  1660. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  1661. le16_add_cpu(&gdp->bg_free_blocks_count, -num);
  1662. gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
  1663. spin_unlock(sb_bgl_lock(sbi, group_no));
  1664. percpu_counter_sub(&sbi->s_freeblocks_counter, num);
  1665. BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
  1666. err = ext4_journal_dirty_metadata(handle, gdp_bh);
  1667. if (!fatal)
  1668. fatal = err;
  1669. sb->s_dirt = 1;
  1670. if (fatal)
  1671. goto out;
  1672. *errp = 0;
  1673. brelse(bitmap_bh);
  1674. DQUOT_FREE_BLOCK(inode, *count-num);
  1675. *count = num;
  1676. return ret_block;
  1677. io_error:
  1678. *errp = -EIO;
  1679. out:
  1680. if (fatal) {
  1681. *errp = fatal;
  1682. ext4_std_error(sb, fatal);
  1683. }
  1684. /*
  1685. * Undo the block allocation
  1686. */
  1687. if (!performed_allocation)
  1688. DQUOT_FREE_BLOCK(inode, *count);
  1689. brelse(bitmap_bh);
  1690. return 0;
  1691. }
  1692. ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
  1693. ext4_fsblk_t goal, int *errp)
  1694. {
  1695. struct ext4_allocation_request ar;
  1696. ext4_fsblk_t ret;
  1697. if (!test_opt(inode->i_sb, MBALLOC)) {
  1698. unsigned long count = 1;
  1699. ret = ext4_new_blocks_old(handle, inode, goal, &count, errp);
  1700. return ret;
  1701. }
  1702. memset(&ar, 0, sizeof(ar));
  1703. ar.inode = inode;
  1704. ar.goal = goal;
  1705. ar.len = 1;
  1706. ret = ext4_mb_new_blocks(handle, &ar, errp);
  1707. return ret;
  1708. }
  1709. ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
  1710. ext4_fsblk_t goal, unsigned long *count, int *errp)
  1711. {
  1712. struct ext4_allocation_request ar;
  1713. ext4_fsblk_t ret;
  1714. if (!test_opt(inode->i_sb, MBALLOC)) {
  1715. ret = ext4_new_blocks_old(handle, inode, goal, count, errp);
  1716. return ret;
  1717. }
  1718. memset(&ar, 0, sizeof(ar));
  1719. ar.inode = inode;
  1720. ar.goal = goal;
  1721. ar.len = *count;
  1722. ret = ext4_mb_new_blocks(handle, &ar, errp);
  1723. *count = ar.len;
  1724. return ret;
  1725. }
  1726. /**
  1727. * ext4_count_free_blocks() -- count filesystem free blocks
  1728. * @sb: superblock
  1729. *
  1730. * Adds up the number of free blocks from each block group.
  1731. */
  1732. ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
  1733. {
  1734. ext4_fsblk_t desc_count;
  1735. struct ext4_group_desc *gdp;
  1736. ext4_group_t i;
  1737. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  1738. #ifdef EXT4FS_DEBUG
  1739. struct ext4_super_block *es;
  1740. ext4_fsblk_t bitmap_count;
  1741. unsigned long x;
  1742. struct buffer_head *bitmap_bh = NULL;
  1743. es = EXT4_SB(sb)->s_es;
  1744. desc_count = 0;
  1745. bitmap_count = 0;
  1746. gdp = NULL;
  1747. smp_rmb();
  1748. for (i = 0; i < ngroups; i++) {
  1749. gdp = ext4_get_group_desc(sb, i, NULL);
  1750. if (!gdp)
  1751. continue;
  1752. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1753. brelse(bitmap_bh);
  1754. bitmap_bh = read_block_bitmap(sb, i);
  1755. if (bitmap_bh == NULL)
  1756. continue;
  1757. x = ext4_count_free(bitmap_bh, sb->s_blocksize);
  1758. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  1759. i, le16_to_cpu(gdp->bg_free_blocks_count), x);
  1760. bitmap_count += x;
  1761. }
  1762. brelse(bitmap_bh);
  1763. printk("ext4_count_free_blocks: stored = %llu"
  1764. ", computed = %llu, %llu\n",
  1765. ext4_free_blocks_count(es),
  1766. desc_count, bitmap_count);
  1767. return bitmap_count;
  1768. #else
  1769. desc_count = 0;
  1770. smp_rmb();
  1771. for (i = 0; i < ngroups; i++) {
  1772. gdp = ext4_get_group_desc(sb, i, NULL);
  1773. if (!gdp)
  1774. continue;
  1775. desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
  1776. }
  1777. return desc_count;
  1778. #endif
  1779. }
  1780. static inline int test_root(ext4_group_t a, int b)
  1781. {
  1782. int num = b;
  1783. while (a > num)
  1784. num *= b;
  1785. return num == a;
  1786. }
  1787. static int ext4_group_sparse(ext4_group_t group)
  1788. {
  1789. if (group <= 1)
  1790. return 1;
  1791. if (!(group & 1))
  1792. return 0;
  1793. return (test_root(group, 7) || test_root(group, 5) ||
  1794. test_root(group, 3));
  1795. }
  1796. /**
  1797. * ext4_bg_has_super - number of blocks used by the superblock in group
  1798. * @sb: superblock for filesystem
  1799. * @group: group number to check
  1800. *
  1801. * Return the number of blocks used by the superblock (primary or backup)
  1802. * in this group. Currently this will be only 0 or 1.
  1803. */
  1804. int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
  1805. {
  1806. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  1807. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
  1808. !ext4_group_sparse(group))
  1809. return 0;
  1810. return 1;
  1811. }
  1812. static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
  1813. ext4_group_t group)
  1814. {
  1815. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1816. ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
  1817. ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
  1818. if (group == first || group == first + 1 || group == last)
  1819. return 1;
  1820. return 0;
  1821. }
  1822. static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
  1823. ext4_group_t group)
  1824. {
  1825. return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
  1826. }
  1827. /**
  1828. * ext4_bg_num_gdb - number of blocks used by the group table in group
  1829. * @sb: superblock for filesystem
  1830. * @group: group number to check
  1831. *
  1832. * Return the number of blocks used by the group descriptor table
  1833. * (primary or backup) in this group. In the future there may be a
  1834. * different number of descriptor blocks in each group.
  1835. */
  1836. unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
  1837. {
  1838. unsigned long first_meta_bg =
  1839. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
  1840. unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
  1841. if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
  1842. metagroup < first_meta_bg)
  1843. return ext4_bg_num_gdb_nometa(sb,group);
  1844. return ext4_bg_num_gdb_meta(sb,group);
  1845. }