resize.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * linux/fs/ext4/resize.c
  3. *
  4. * Support for resizing an ext4 filesystem while it is mounted.
  5. *
  6. * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
  7. *
  8. * This could probably be made into a module, because it is not often in use.
  9. */
  10. #define EXT4FS_DEBUG
  11. #include <linux/sched.h>
  12. #include <linux/smp_lock.h>
  13. #include <linux/ext4_jbd2.h>
  14. #include <linux/errno.h>
  15. #include <linux/slab.h>
  16. #define outside(b, first, last) ((b) < (first) || (b) >= (last))
  17. #define inside(b, first, last) ((b) >= (first) && (b) < (last))
  18. static int verify_group_input(struct super_block *sb,
  19. struct ext4_new_group_data *input)
  20. {
  21. struct ext4_sb_info *sbi = EXT4_SB(sb);
  22. struct ext4_super_block *es = sbi->s_es;
  23. ext4_fsblk_t start = le32_to_cpu(es->s_blocks_count);
  24. ext4_fsblk_t end = start + input->blocks_count;
  25. unsigned group = input->group;
  26. ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
  27. unsigned overhead = ext4_bg_has_super(sb, group) ?
  28. (1 + ext4_bg_num_gdb(sb, group) +
  29. le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
  30. ext4_fsblk_t metaend = start + overhead;
  31. struct buffer_head *bh = NULL;
  32. ext4_grpblk_t free_blocks_count, offset;
  33. int err = -EINVAL;
  34. input->free_blocks_count = free_blocks_count =
  35. input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
  36. if (test_opt(sb, DEBUG))
  37. printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
  38. "(%d free, %u reserved)\n",
  39. ext4_bg_has_super(sb, input->group) ? "normal" :
  40. "no-super", input->group, input->blocks_count,
  41. free_blocks_count, input->reserved_blocks);
  42. ext4_get_group_no_and_offset(sb, start, NULL, &offset);
  43. if (group != sbi->s_groups_count)
  44. ext4_warning(sb, __FUNCTION__,
  45. "Cannot add at group %u (only %lu groups)",
  46. input->group, sbi->s_groups_count);
  47. else if (offset != 0)
  48. ext4_warning(sb, __FUNCTION__, "Last group not full");
  49. else if (input->reserved_blocks > input->blocks_count / 5)
  50. ext4_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
  51. input->reserved_blocks);
  52. else if (free_blocks_count < 0)
  53. ext4_warning(sb, __FUNCTION__, "Bad blocks count %u",
  54. input->blocks_count);
  55. else if (!(bh = sb_bread(sb, end - 1)))
  56. ext4_warning(sb, __FUNCTION__,
  57. "Cannot read last block ("E3FSBLK")",
  58. end - 1);
  59. else if (outside(input->block_bitmap, start, end))
  60. ext4_warning(sb, __FUNCTION__,
  61. "Block bitmap not in group (block %u)",
  62. input->block_bitmap);
  63. else if (outside(input->inode_bitmap, start, end))
  64. ext4_warning(sb, __FUNCTION__,
  65. "Inode bitmap not in group (block %u)",
  66. input->inode_bitmap);
  67. else if (outside(input->inode_table, start, end) ||
  68. outside(itend - 1, start, end))
  69. ext4_warning(sb, __FUNCTION__,
  70. "Inode table not in group (blocks %u-"E3FSBLK")",
  71. input->inode_table, itend - 1);
  72. else if (input->inode_bitmap == input->block_bitmap)
  73. ext4_warning(sb, __FUNCTION__,
  74. "Block bitmap same as inode bitmap (%u)",
  75. input->block_bitmap);
  76. else if (inside(input->block_bitmap, input->inode_table, itend))
  77. ext4_warning(sb, __FUNCTION__,
  78. "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
  79. input->block_bitmap, input->inode_table, itend-1);
  80. else if (inside(input->inode_bitmap, input->inode_table, itend))
  81. ext4_warning(sb, __FUNCTION__,
  82. "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
  83. input->inode_bitmap, input->inode_table, itend-1);
  84. else if (inside(input->block_bitmap, start, metaend))
  85. ext4_warning(sb, __FUNCTION__,
  86. "Block bitmap (%u) in GDT table"
  87. " ("E3FSBLK"-"E3FSBLK")",
  88. input->block_bitmap, start, metaend - 1);
  89. else if (inside(input->inode_bitmap, start, metaend))
  90. ext4_warning(sb, __FUNCTION__,
  91. "Inode bitmap (%u) in GDT table"
  92. " ("E3FSBLK"-"E3FSBLK")",
  93. input->inode_bitmap, start, metaend - 1);
  94. else if (inside(input->inode_table, start, metaend) ||
  95. inside(itend - 1, start, metaend))
  96. ext4_warning(sb, __FUNCTION__,
  97. "Inode table (%u-"E3FSBLK") overlaps"
  98. "GDT table ("E3FSBLK"-"E3FSBLK")",
  99. input->inode_table, itend - 1, start, metaend - 1);
  100. else
  101. err = 0;
  102. brelse(bh);
  103. return err;
  104. }
  105. static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
  106. ext4_fsblk_t blk)
  107. {
  108. struct buffer_head *bh;
  109. int err;
  110. bh = sb_getblk(sb, blk);
  111. if (!bh)
  112. return ERR_PTR(-EIO);
  113. if ((err = ext4_journal_get_write_access(handle, bh))) {
  114. brelse(bh);
  115. bh = ERR_PTR(err);
  116. } else {
  117. lock_buffer(bh);
  118. memset(bh->b_data, 0, sb->s_blocksize);
  119. set_buffer_uptodate(bh);
  120. unlock_buffer(bh);
  121. }
  122. return bh;
  123. }
  124. /*
  125. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  126. * need to use it within a single byte (to ensure we get endianness right).
  127. * We can use memset for the rest of the bitmap as there are no other users.
  128. */
  129. static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  130. {
  131. int i;
  132. if (start_bit >= end_bit)
  133. return;
  134. ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  135. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  136. ext4_set_bit(i, bitmap);
  137. if (i < end_bit)
  138. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  139. }
  140. /*
  141. * Set up the block and inode bitmaps, and the inode table for the new group.
  142. * This doesn't need to be part of the main transaction, since we are only
  143. * changing blocks outside the actual filesystem. We still do journaling to
  144. * ensure the recovery is correct in case of a failure just after resize.
  145. * If any part of this fails, we simply abort the resize.
  146. */
  147. static int setup_new_group_blocks(struct super_block *sb,
  148. struct ext4_new_group_data *input)
  149. {
  150. struct ext4_sb_info *sbi = EXT4_SB(sb);
  151. ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group);
  152. int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
  153. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
  154. unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group);
  155. struct buffer_head *bh;
  156. handle_t *handle;
  157. ext4_fsblk_t block;
  158. ext4_grpblk_t bit;
  159. int i;
  160. int err = 0, err2;
  161. handle = ext4_journal_start_sb(sb, reserved_gdb + gdblocks +
  162. 2 + sbi->s_itb_per_group);
  163. if (IS_ERR(handle))
  164. return PTR_ERR(handle);
  165. lock_super(sb);
  166. if (input->group != sbi->s_groups_count) {
  167. err = -EBUSY;
  168. goto exit_journal;
  169. }
  170. if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
  171. err = PTR_ERR(bh);
  172. goto exit_journal;
  173. }
  174. if (ext4_bg_has_super(sb, input->group)) {
  175. ext4_debug("mark backup superblock %#04lx (+0)\n", start);
  176. ext4_set_bit(0, bh->b_data);
  177. }
  178. /* Copy all of the GDT blocks into the backup in this group */
  179. for (i = 0, bit = 1, block = start + 1;
  180. i < gdblocks; i++, block++, bit++) {
  181. struct buffer_head *gdb;
  182. ext4_debug("update backup group %#04lx (+%d)\n", block, bit);
  183. gdb = sb_getblk(sb, block);
  184. if (!gdb) {
  185. err = -EIO;
  186. goto exit_bh;
  187. }
  188. if ((err = ext4_journal_get_write_access(handle, gdb))) {
  189. brelse(gdb);
  190. goto exit_bh;
  191. }
  192. lock_buffer(bh);
  193. memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, bh->b_size);
  194. set_buffer_uptodate(gdb);
  195. unlock_buffer(bh);
  196. ext4_journal_dirty_metadata(handle, gdb);
  197. ext4_set_bit(bit, bh->b_data);
  198. brelse(gdb);
  199. }
  200. /* Zero out all of the reserved backup group descriptor table blocks */
  201. for (i = 0, bit = gdblocks + 1, block = start + bit;
  202. i < reserved_gdb; i++, block++, bit++) {
  203. struct buffer_head *gdb;
  204. ext4_debug("clear reserved block %#04lx (+%d)\n", block, bit);
  205. if (IS_ERR(gdb = bclean(handle, sb, block))) {
  206. err = PTR_ERR(bh);
  207. goto exit_bh;
  208. }
  209. ext4_journal_dirty_metadata(handle, gdb);
  210. ext4_set_bit(bit, bh->b_data);
  211. brelse(gdb);
  212. }
  213. ext4_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
  214. input->block_bitmap - start);
  215. ext4_set_bit(input->block_bitmap - start, bh->b_data);
  216. ext4_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
  217. input->inode_bitmap - start);
  218. ext4_set_bit(input->inode_bitmap - start, bh->b_data);
  219. /* Zero out all of the inode table blocks */
  220. for (i = 0, block = input->inode_table, bit = block - start;
  221. i < sbi->s_itb_per_group; i++, bit++, block++) {
  222. struct buffer_head *it;
  223. ext4_debug("clear inode block %#04lx (+%d)\n", block, bit);
  224. if (IS_ERR(it = bclean(handle, sb, block))) {
  225. err = PTR_ERR(it);
  226. goto exit_bh;
  227. }
  228. ext4_journal_dirty_metadata(handle, it);
  229. brelse(it);
  230. ext4_set_bit(bit, bh->b_data);
  231. }
  232. mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb),
  233. bh->b_data);
  234. ext4_journal_dirty_metadata(handle, bh);
  235. brelse(bh);
  236. /* Mark unused entries in inode bitmap used */
  237. ext4_debug("clear inode bitmap %#04x (+%ld)\n",
  238. input->inode_bitmap, input->inode_bitmap - start);
  239. if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
  240. err = PTR_ERR(bh);
  241. goto exit_journal;
  242. }
  243. mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
  244. bh->b_data);
  245. ext4_journal_dirty_metadata(handle, bh);
  246. exit_bh:
  247. brelse(bh);
  248. exit_journal:
  249. unlock_super(sb);
  250. if ((err2 = ext4_journal_stop(handle)) && !err)
  251. err = err2;
  252. return err;
  253. }
  254. /*
  255. * Iterate through the groups which hold BACKUP superblock/GDT copies in an
  256. * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
  257. * calling this for the first time. In a sparse filesystem it will be the
  258. * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
  259. * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
  260. */
  261. static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
  262. unsigned *five, unsigned *seven)
  263. {
  264. unsigned *min = three;
  265. int mult = 3;
  266. unsigned ret;
  267. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  268. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  269. ret = *min;
  270. *min += 1;
  271. return ret;
  272. }
  273. if (*five < *min) {
  274. min = five;
  275. mult = 5;
  276. }
  277. if (*seven < *min) {
  278. min = seven;
  279. mult = 7;
  280. }
  281. ret = *min;
  282. *min *= mult;
  283. return ret;
  284. }
  285. /*
  286. * Check that all of the backup GDT blocks are held in the primary GDT block.
  287. * It is assumed that they are stored in group order. Returns the number of
  288. * groups in current filesystem that have BACKUPS, or -ve error code.
  289. */
  290. static int verify_reserved_gdb(struct super_block *sb,
  291. struct buffer_head *primary)
  292. {
  293. const ext4_fsblk_t blk = primary->b_blocknr;
  294. const unsigned long end = EXT4_SB(sb)->s_groups_count;
  295. unsigned three = 1;
  296. unsigned five = 5;
  297. unsigned seven = 7;
  298. unsigned grp;
  299. __le32 *p = (__le32 *)primary->b_data;
  300. int gdbackups = 0;
  301. while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
  302. if (le32_to_cpu(*p++) != grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
  303. ext4_warning(sb, __FUNCTION__,
  304. "reserved GDT "E3FSBLK
  305. " missing grp %d ("E3FSBLK")",
  306. blk, grp,
  307. grp * EXT4_BLOCKS_PER_GROUP(sb) + blk);
  308. return -EINVAL;
  309. }
  310. if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
  311. return -EFBIG;
  312. }
  313. return gdbackups;
  314. }
  315. /*
  316. * Called when we need to bring a reserved group descriptor table block into
  317. * use from the resize inode. The primary copy of the new GDT block currently
  318. * is an indirect block (under the double indirect block in the resize inode).
  319. * The new backup GDT blocks will be stored as leaf blocks in this indirect
  320. * block, in group order. Even though we know all the block numbers we need,
  321. * we check to ensure that the resize inode has actually reserved these blocks.
  322. *
  323. * Don't need to update the block bitmaps because the blocks are still in use.
  324. *
  325. * We get all of the error cases out of the way, so that we are sure to not
  326. * fail once we start modifying the data on disk, because JBD has no rollback.
  327. */
  328. static int add_new_gdb(handle_t *handle, struct inode *inode,
  329. struct ext4_new_group_data *input,
  330. struct buffer_head **primary)
  331. {
  332. struct super_block *sb = inode->i_sb;
  333. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  334. unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
  335. ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
  336. struct buffer_head **o_group_desc, **n_group_desc;
  337. struct buffer_head *dind;
  338. int gdbackups;
  339. struct ext4_iloc iloc;
  340. __le32 *data;
  341. int err;
  342. if (test_opt(sb, DEBUG))
  343. printk(KERN_DEBUG
  344. "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
  345. gdb_num);
  346. /*
  347. * If we are not using the primary superblock/GDT copy don't resize,
  348. * because the user tools have no way of handling this. Probably a
  349. * bad time to do it anyways.
  350. */
  351. if (EXT4_SB(sb)->s_sbh->b_blocknr !=
  352. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
  353. ext4_warning(sb, __FUNCTION__,
  354. "won't resize using backup superblock at %llu",
  355. (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
  356. return -EPERM;
  357. }
  358. *primary = sb_bread(sb, gdblock);
  359. if (!*primary)
  360. return -EIO;
  361. if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
  362. err = gdbackups;
  363. goto exit_bh;
  364. }
  365. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  366. dind = sb_bread(sb, le32_to_cpu(*data));
  367. if (!dind) {
  368. err = -EIO;
  369. goto exit_bh;
  370. }
  371. data = (__le32 *)dind->b_data;
  372. if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
  373. ext4_warning(sb, __FUNCTION__,
  374. "new group %u GDT block "E3FSBLK" not reserved",
  375. input->group, gdblock);
  376. err = -EINVAL;
  377. goto exit_dind;
  378. }
  379. if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
  380. goto exit_dind;
  381. if ((err = ext4_journal_get_write_access(handle, *primary)))
  382. goto exit_sbh;
  383. if ((err = ext4_journal_get_write_access(handle, dind)))
  384. goto exit_primary;
  385. /* ext4_reserve_inode_write() gets a reference on the iloc */
  386. if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
  387. goto exit_dindj;
  388. n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
  389. GFP_KERNEL);
  390. if (!n_group_desc) {
  391. err = -ENOMEM;
  392. ext4_warning (sb, __FUNCTION__,
  393. "not enough memory for %lu groups", gdb_num + 1);
  394. goto exit_inode;
  395. }
  396. /*
  397. * Finally, we have all of the possible failures behind us...
  398. *
  399. * Remove new GDT block from inode double-indirect block and clear out
  400. * the new GDT block for use (which also "frees" the backup GDT blocks
  401. * from the reserved inode). We don't need to change the bitmaps for
  402. * these blocks, because they are marked as in-use from being in the
  403. * reserved inode, and will become GDT blocks (primary and backup).
  404. */
  405. data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
  406. ext4_journal_dirty_metadata(handle, dind);
  407. brelse(dind);
  408. inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
  409. ext4_mark_iloc_dirty(handle, inode, &iloc);
  410. memset((*primary)->b_data, 0, sb->s_blocksize);
  411. ext4_journal_dirty_metadata(handle, *primary);
  412. o_group_desc = EXT4_SB(sb)->s_group_desc;
  413. memcpy(n_group_desc, o_group_desc,
  414. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  415. n_group_desc[gdb_num] = *primary;
  416. EXT4_SB(sb)->s_group_desc = n_group_desc;
  417. EXT4_SB(sb)->s_gdb_count++;
  418. kfree(o_group_desc);
  419. es->s_reserved_gdt_blocks =
  420. cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
  421. ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
  422. return 0;
  423. exit_inode:
  424. //ext4_journal_release_buffer(handle, iloc.bh);
  425. brelse(iloc.bh);
  426. exit_dindj:
  427. //ext4_journal_release_buffer(handle, dind);
  428. exit_primary:
  429. //ext4_journal_release_buffer(handle, *primary);
  430. exit_sbh:
  431. //ext4_journal_release_buffer(handle, *primary);
  432. exit_dind:
  433. brelse(dind);
  434. exit_bh:
  435. brelse(*primary);
  436. ext4_debug("leaving with error %d\n", err);
  437. return err;
  438. }
  439. /*
  440. * Called when we are adding a new group which has a backup copy of each of
  441. * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
  442. * We need to add these reserved backup GDT blocks to the resize inode, so
  443. * that they are kept for future resizing and not allocated to files.
  444. *
  445. * Each reserved backup GDT block will go into a different indirect block.
  446. * The indirect blocks are actually the primary reserved GDT blocks,
  447. * so we know in advance what their block numbers are. We only get the
  448. * double-indirect block to verify it is pointing to the primary reserved
  449. * GDT blocks so we don't overwrite a data block by accident. The reserved
  450. * backup GDT blocks are stored in their reserved primary GDT block.
  451. */
  452. static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
  453. struct ext4_new_group_data *input)
  454. {
  455. struct super_block *sb = inode->i_sb;
  456. int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  457. struct buffer_head **primary;
  458. struct buffer_head *dind;
  459. struct ext4_iloc iloc;
  460. ext4_fsblk_t blk;
  461. __le32 *data, *end;
  462. int gdbackups = 0;
  463. int res, i;
  464. int err;
  465. primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
  466. if (!primary)
  467. return -ENOMEM;
  468. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  469. dind = sb_bread(sb, le32_to_cpu(*data));
  470. if (!dind) {
  471. err = -EIO;
  472. goto exit_free;
  473. }
  474. blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
  475. data = (__le32 *)dind->b_data + EXT4_SB(sb)->s_gdb_count;
  476. end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
  477. /* Get each reserved primary GDT block and verify it holds backups */
  478. for (res = 0; res < reserved_gdb; res++, blk++) {
  479. if (le32_to_cpu(*data) != blk) {
  480. ext4_warning(sb, __FUNCTION__,
  481. "reserved block "E3FSBLK
  482. " not at offset %ld",
  483. blk,
  484. (long)(data - (__le32 *)dind->b_data));
  485. err = -EINVAL;
  486. goto exit_bh;
  487. }
  488. primary[res] = sb_bread(sb, blk);
  489. if (!primary[res]) {
  490. err = -EIO;
  491. goto exit_bh;
  492. }
  493. if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
  494. brelse(primary[res]);
  495. err = gdbackups;
  496. goto exit_bh;
  497. }
  498. if (++data >= end)
  499. data = (__le32 *)dind->b_data;
  500. }
  501. for (i = 0; i < reserved_gdb; i++) {
  502. if ((err = ext4_journal_get_write_access(handle, primary[i]))) {
  503. /*
  504. int j;
  505. for (j = 0; j < i; j++)
  506. ext4_journal_release_buffer(handle, primary[j]);
  507. */
  508. goto exit_bh;
  509. }
  510. }
  511. if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
  512. goto exit_bh;
  513. /*
  514. * Finally we can add each of the reserved backup GDT blocks from
  515. * the new group to its reserved primary GDT block.
  516. */
  517. blk = input->group * EXT4_BLOCKS_PER_GROUP(sb);
  518. for (i = 0; i < reserved_gdb; i++) {
  519. int err2;
  520. data = (__le32 *)primary[i]->b_data;
  521. /* printk("reserving backup %lu[%u] = %lu\n",
  522. primary[i]->b_blocknr, gdbackups,
  523. blk + primary[i]->b_blocknr); */
  524. data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
  525. err2 = ext4_journal_dirty_metadata(handle, primary[i]);
  526. if (!err)
  527. err = err2;
  528. }
  529. inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
  530. ext4_mark_iloc_dirty(handle, inode, &iloc);
  531. exit_bh:
  532. while (--res >= 0)
  533. brelse(primary[res]);
  534. brelse(dind);
  535. exit_free:
  536. kfree(primary);
  537. return err;
  538. }
  539. /*
  540. * Update the backup copies of the ext4 metadata. These don't need to be part
  541. * of the main resize transaction, because e2fsck will re-write them if there
  542. * is a problem (basically only OOM will cause a problem). However, we
  543. * _should_ update the backups if possible, in case the primary gets trashed
  544. * for some reason and we need to run e2fsck from a backup superblock. The
  545. * important part is that the new block and inode counts are in the backup
  546. * superblocks, and the location of the new group metadata in the GDT backups.
  547. *
  548. * We do not need lock_super() for this, because these blocks are not
  549. * otherwise touched by the filesystem code when it is mounted. We don't
  550. * need to worry about last changing from sbi->s_groups_count, because the
  551. * worst that can happen is that we do not copy the full number of backups
  552. * at this time. The resize which changed s_groups_count will backup again.
  553. */
  554. static void update_backups(struct super_block *sb,
  555. int blk_off, char *data, int size)
  556. {
  557. struct ext4_sb_info *sbi = EXT4_SB(sb);
  558. const unsigned long last = sbi->s_groups_count;
  559. const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
  560. unsigned three = 1;
  561. unsigned five = 5;
  562. unsigned seven = 7;
  563. unsigned group;
  564. int rest = sb->s_blocksize - size;
  565. handle_t *handle;
  566. int err = 0, err2;
  567. handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
  568. if (IS_ERR(handle)) {
  569. group = 1;
  570. err = PTR_ERR(handle);
  571. goto exit_err;
  572. }
  573. while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
  574. struct buffer_head *bh;
  575. /* Out of journal space, and can't get more - abort - so sad */
  576. if (handle->h_buffer_credits == 0 &&
  577. ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
  578. (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
  579. break;
  580. bh = sb_getblk(sb, group * bpg + blk_off);
  581. if (!bh) {
  582. err = -EIO;
  583. break;
  584. }
  585. ext4_debug("update metadata backup %#04lx\n",
  586. (unsigned long)bh->b_blocknr);
  587. if ((err = ext4_journal_get_write_access(handle, bh)))
  588. break;
  589. lock_buffer(bh);
  590. memcpy(bh->b_data, data, size);
  591. if (rest)
  592. memset(bh->b_data + size, 0, rest);
  593. set_buffer_uptodate(bh);
  594. unlock_buffer(bh);
  595. ext4_journal_dirty_metadata(handle, bh);
  596. brelse(bh);
  597. }
  598. if ((err2 = ext4_journal_stop(handle)) && !err)
  599. err = err2;
  600. /*
  601. * Ugh! Need to have e2fsck write the backup copies. It is too
  602. * late to revert the resize, we shouldn't fail just because of
  603. * the backup copies (they are only needed in case of corruption).
  604. *
  605. * However, if we got here we have a journal problem too, so we
  606. * can't really start a transaction to mark the superblock.
  607. * Chicken out and just set the flag on the hope it will be written
  608. * to disk, and if not - we will simply wait until next fsck.
  609. */
  610. exit_err:
  611. if (err) {
  612. ext4_warning(sb, __FUNCTION__,
  613. "can't update backup for group %d (err %d), "
  614. "forcing fsck on next reboot", group, err);
  615. sbi->s_mount_state &= ~EXT4_VALID_FS;
  616. sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  617. mark_buffer_dirty(sbi->s_sbh);
  618. }
  619. }
  620. /* Add group descriptor data to an existing or new group descriptor block.
  621. * Ensure we handle all possible error conditions _before_ we start modifying
  622. * the filesystem, because we cannot abort the transaction and not have it
  623. * write the data to disk.
  624. *
  625. * If we are on a GDT block boundary, we need to get the reserved GDT block.
  626. * Otherwise, we may need to add backup GDT blocks for a sparse group.
  627. *
  628. * We only need to hold the superblock lock while we are actually adding
  629. * in the new group's counts to the superblock. Prior to that we have
  630. * not really "added" the group at all. We re-check that we are still
  631. * adding in the last group in case things have changed since verifying.
  632. */
  633. int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
  634. {
  635. struct ext4_sb_info *sbi = EXT4_SB(sb);
  636. struct ext4_super_block *es = sbi->s_es;
  637. int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
  638. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  639. struct buffer_head *primary = NULL;
  640. struct ext4_group_desc *gdp;
  641. struct inode *inode = NULL;
  642. handle_t *handle;
  643. int gdb_off, gdb_num;
  644. int err, err2;
  645. gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
  646. gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
  647. if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
  648. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  649. ext4_warning(sb, __FUNCTION__,
  650. "Can't resize non-sparse filesystem further");
  651. return -EPERM;
  652. }
  653. if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
  654. le32_to_cpu(es->s_blocks_count)) {
  655. ext4_warning(sb, __FUNCTION__, "blocks_count overflow\n");
  656. return -EINVAL;
  657. }
  658. if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
  659. le32_to_cpu(es->s_inodes_count)) {
  660. ext4_warning(sb, __FUNCTION__, "inodes_count overflow\n");
  661. return -EINVAL;
  662. }
  663. if (reserved_gdb || gdb_off == 0) {
  664. if (!EXT4_HAS_COMPAT_FEATURE(sb,
  665. EXT4_FEATURE_COMPAT_RESIZE_INODE)){
  666. ext4_warning(sb, __FUNCTION__,
  667. "No reserved GDT blocks, can't resize");
  668. return -EPERM;
  669. }
  670. inode = iget(sb, EXT4_RESIZE_INO);
  671. if (!inode || is_bad_inode(inode)) {
  672. ext4_warning(sb, __FUNCTION__,
  673. "Error opening resize inode");
  674. iput(inode);
  675. return -ENOENT;
  676. }
  677. }
  678. if ((err = verify_group_input(sb, input)))
  679. goto exit_put;
  680. if ((err = setup_new_group_blocks(sb, input)))
  681. goto exit_put;
  682. /*
  683. * We will always be modifying at least the superblock and a GDT
  684. * block. If we are adding a group past the last current GDT block,
  685. * we will also modify the inode and the dindirect block. If we
  686. * are adding a group with superblock/GDT backups we will also
  687. * modify each of the reserved GDT dindirect blocks.
  688. */
  689. handle = ext4_journal_start_sb(sb,
  690. ext4_bg_has_super(sb, input->group) ?
  691. 3 + reserved_gdb : 4);
  692. if (IS_ERR(handle)) {
  693. err = PTR_ERR(handle);
  694. goto exit_put;
  695. }
  696. lock_super(sb);
  697. if (input->group != sbi->s_groups_count) {
  698. ext4_warning(sb, __FUNCTION__,
  699. "multiple resizers run on filesystem!");
  700. err = -EBUSY;
  701. goto exit_journal;
  702. }
  703. if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
  704. goto exit_journal;
  705. /*
  706. * We will only either add reserved group blocks to a backup group
  707. * or remove reserved blocks for the first group in a new group block.
  708. * Doing both would be mean more complex code, and sane people don't
  709. * use non-sparse filesystems anymore. This is already checked above.
  710. */
  711. if (gdb_off) {
  712. primary = sbi->s_group_desc[gdb_num];
  713. if ((err = ext4_journal_get_write_access(handle, primary)))
  714. goto exit_journal;
  715. if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
  716. (err = reserve_backup_gdb(handle, inode, input)))
  717. goto exit_journal;
  718. } else if ((err = add_new_gdb(handle, inode, input, &primary)))
  719. goto exit_journal;
  720. /*
  721. * OK, now we've set up the new group. Time to make it active.
  722. *
  723. * Current kernels don't lock all allocations via lock_super(),
  724. * so we have to be safe wrt. concurrent accesses the group
  725. * data. So we need to be careful to set all of the relevant
  726. * group descriptor data etc. *before* we enable the group.
  727. *
  728. * The key field here is sbi->s_groups_count: as long as
  729. * that retains its old value, nobody is going to access the new
  730. * group.
  731. *
  732. * So first we update all the descriptor metadata for the new
  733. * group; then we update the total disk blocks count; then we
  734. * update the groups count to enable the group; then finally we
  735. * update the free space counts so that the system can start
  736. * using the new disk blocks.
  737. */
  738. /* Update group descriptor block for new group */
  739. gdp = (struct ext4_group_desc *)primary->b_data + gdb_off;
  740. gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
  741. gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
  742. gdp->bg_inode_table = cpu_to_le32(input->inode_table);
  743. gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
  744. gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb));
  745. /*
  746. * Make the new blocks and inodes valid next. We do this before
  747. * increasing the group count so that once the group is enabled,
  748. * all of its blocks and inodes are already valid.
  749. *
  750. * We always allocate group-by-group, then block-by-block or
  751. * inode-by-inode within a group, so enabling these
  752. * blocks/inodes before the group is live won't actually let us
  753. * allocate the new space yet.
  754. */
  755. es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) +
  756. input->blocks_count);
  757. es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
  758. EXT4_INODES_PER_GROUP(sb));
  759. /*
  760. * We need to protect s_groups_count against other CPUs seeing
  761. * inconsistent state in the superblock.
  762. *
  763. * The precise rules we use are:
  764. *
  765. * * Writers of s_groups_count *must* hold lock_super
  766. * AND
  767. * * Writers must perform a smp_wmb() after updating all dependent
  768. * data and before modifying the groups count
  769. *
  770. * * Readers must hold lock_super() over the access
  771. * OR
  772. * * Readers must perform an smp_rmb() after reading the groups count
  773. * and before reading any dependent data.
  774. *
  775. * NB. These rules can be relaxed when checking the group count
  776. * while freeing data, as we can only allocate from a block
  777. * group after serialising against the group count, and we can
  778. * only then free after serialising in turn against that
  779. * allocation.
  780. */
  781. smp_wmb();
  782. /* Update the global fs size fields */
  783. sbi->s_groups_count++;
  784. ext4_journal_dirty_metadata(handle, primary);
  785. /* Update the reserved block counts only once the new group is
  786. * active. */
  787. es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) +
  788. input->reserved_blocks);
  789. /* Update the free space counts */
  790. percpu_counter_mod(&sbi->s_freeblocks_counter,
  791. input->free_blocks_count);
  792. percpu_counter_mod(&sbi->s_freeinodes_counter,
  793. EXT4_INODES_PER_GROUP(sb));
  794. ext4_journal_dirty_metadata(handle, sbi->s_sbh);
  795. sb->s_dirt = 1;
  796. exit_journal:
  797. unlock_super(sb);
  798. if ((err2 = ext4_journal_stop(handle)) && !err)
  799. err = err2;
  800. if (!err) {
  801. update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
  802. sizeof(struct ext4_super_block));
  803. update_backups(sb, primary->b_blocknr, primary->b_data,
  804. primary->b_size);
  805. }
  806. exit_put:
  807. iput(inode);
  808. return err;
  809. } /* ext4_group_add */
  810. /* Extend the filesystem to the new number of blocks specified. This entry
  811. * point is only used to extend the current filesystem to the end of the last
  812. * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
  813. * for emergencies (because it has no dependencies on reserved blocks).
  814. *
  815. * If we _really_ wanted, we could use default values to call ext4_group_add()
  816. * allow the "remount" trick to work for arbitrary resizing, assuming enough
  817. * GDT blocks are reserved to grow to the desired size.
  818. */
  819. int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
  820. ext4_fsblk_t n_blocks_count)
  821. {
  822. ext4_fsblk_t o_blocks_count;
  823. unsigned long o_groups_count;
  824. ext4_grpblk_t last;
  825. ext4_grpblk_t add;
  826. struct buffer_head * bh;
  827. handle_t *handle;
  828. int err;
  829. unsigned long freed_blocks;
  830. /* We don't need to worry about locking wrt other resizers just
  831. * yet: we're going to revalidate es->s_blocks_count after
  832. * taking lock_super() below. */
  833. o_blocks_count = le32_to_cpu(es->s_blocks_count);
  834. o_groups_count = EXT4_SB(sb)->s_groups_count;
  835. if (test_opt(sb, DEBUG))
  836. printk(KERN_DEBUG "EXT4-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
  837. o_blocks_count, n_blocks_count);
  838. if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
  839. return 0;
  840. if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
  841. printk(KERN_ERR "EXT4-fs: filesystem on %s:"
  842. " too large to resize to "E3FSBLK" blocks safely\n",
  843. sb->s_id, n_blocks_count);
  844. if (sizeof(sector_t) < 8)
  845. ext4_warning(sb, __FUNCTION__,
  846. "CONFIG_LBD not enabled\n");
  847. return -EINVAL;
  848. }
  849. if (n_blocks_count < o_blocks_count) {
  850. ext4_warning(sb, __FUNCTION__,
  851. "can't shrink FS - resize aborted");
  852. return -EBUSY;
  853. }
  854. /* Handle the remaining blocks in the last group only. */
  855. ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last);
  856. if (last == 0) {
  857. ext4_warning(sb, __FUNCTION__,
  858. "need to use ext2online to resize further");
  859. return -EPERM;
  860. }
  861. add = EXT4_BLOCKS_PER_GROUP(sb) - last;
  862. if (o_blocks_count + add < o_blocks_count) {
  863. ext4_warning(sb, __FUNCTION__, "blocks_count overflow");
  864. return -EINVAL;
  865. }
  866. if (o_blocks_count + add > n_blocks_count)
  867. add = n_blocks_count - o_blocks_count;
  868. if (o_blocks_count + add < n_blocks_count)
  869. ext4_warning(sb, __FUNCTION__,
  870. "will only finish group ("E3FSBLK
  871. " blocks, %u new)",
  872. o_blocks_count + add, add);
  873. /* See if the device is actually as big as what was requested */
  874. bh = sb_bread(sb, o_blocks_count + add -1);
  875. if (!bh) {
  876. ext4_warning(sb, __FUNCTION__,
  877. "can't read last block, resize aborted");
  878. return -ENOSPC;
  879. }
  880. brelse(bh);
  881. /* We will update the superblock, one block bitmap, and
  882. * one group descriptor via ext4_free_blocks().
  883. */
  884. handle = ext4_journal_start_sb(sb, 3);
  885. if (IS_ERR(handle)) {
  886. err = PTR_ERR(handle);
  887. ext4_warning(sb, __FUNCTION__, "error %d on journal start",err);
  888. goto exit_put;
  889. }
  890. lock_super(sb);
  891. if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
  892. ext4_warning(sb, __FUNCTION__,
  893. "multiple resizers run on filesystem!");
  894. unlock_super(sb);
  895. err = -EBUSY;
  896. goto exit_put;
  897. }
  898. if ((err = ext4_journal_get_write_access(handle,
  899. EXT4_SB(sb)->s_sbh))) {
  900. ext4_warning(sb, __FUNCTION__,
  901. "error %d on journal write access", err);
  902. unlock_super(sb);
  903. ext4_journal_stop(handle);
  904. goto exit_put;
  905. }
  906. es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
  907. ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
  908. sb->s_dirt = 1;
  909. unlock_super(sb);
  910. ext4_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
  911. o_blocks_count + add);
  912. ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
  913. ext4_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
  914. o_blocks_count + add);
  915. if ((err = ext4_journal_stop(handle)))
  916. goto exit_put;
  917. if (test_opt(sb, DEBUG))
  918. printk(KERN_DEBUG "EXT4-fs: extended group to %u blocks\n",
  919. le32_to_cpu(es->s_blocks_count));
  920. update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
  921. sizeof(struct ext4_super_block));
  922. exit_put:
  923. return err;
  924. } /* ext4_group_extend */