resize.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /*
  2. * linux/fs/ext3/resize.c
  3. *
  4. * Support for resizing an ext3 filesystem while it is mounted.
  5. *
  6. * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
  7. *
  8. * This could probably be made into a module, because it is not often in use.
  9. */
  10. #include <linux/config.h>
  11. #define EXT3FS_DEBUG
  12. #include <linux/sched.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/ext3_jbd.h>
  15. #include <linux/errno.h>
  16. #include <linux/slab.h>
  17. #define outside(b, first, last) ((b) < (first) || (b) >= (last))
  18. #define inside(b, first, last) ((b) >= (first) && (b) < (last))
  19. static int verify_group_input(struct super_block *sb,
  20. struct ext3_new_group_data *input)
  21. {
  22. struct ext3_sb_info *sbi = EXT3_SB(sb);
  23. struct ext3_super_block *es = sbi->s_es;
  24. unsigned start = le32_to_cpu(es->s_blocks_count);
  25. unsigned end = start + input->blocks_count;
  26. unsigned group = input->group;
  27. unsigned itend = input->inode_table + EXT3_SB(sb)->s_itb_per_group;
  28. unsigned overhead = ext3_bg_has_super(sb, group) ?
  29. (1 + ext3_bg_num_gdb(sb, group) +
  30. le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
  31. unsigned metaend = start + overhead;
  32. struct buffer_head *bh = NULL;
  33. int free_blocks_count;
  34. int err = -EINVAL;
  35. input->free_blocks_count = free_blocks_count =
  36. input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
  37. if (test_opt(sb, DEBUG))
  38. printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
  39. "(%d free, %u reserved)\n",
  40. ext3_bg_has_super(sb, input->group) ? "normal" :
  41. "no-super", input->group, input->blocks_count,
  42. free_blocks_count, input->reserved_blocks);
  43. if (group != sbi->s_groups_count)
  44. ext3_warning(sb, __FUNCTION__,
  45. "Cannot add at group %u (only %lu groups)",
  46. input->group, sbi->s_groups_count);
  47. else if ((start - le32_to_cpu(es->s_first_data_block)) %
  48. EXT3_BLOCKS_PER_GROUP(sb))
  49. ext3_warning(sb, __FUNCTION__, "Last group not full");
  50. else if (input->reserved_blocks > input->blocks_count / 5)
  51. ext3_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
  52. input->reserved_blocks);
  53. else if (free_blocks_count < 0)
  54. ext3_warning(sb, __FUNCTION__, "Bad blocks count %u",
  55. input->blocks_count);
  56. else if (!(bh = sb_bread(sb, end - 1)))
  57. ext3_warning(sb, __FUNCTION__, "Cannot read last block (%u)",
  58. end - 1);
  59. else if (outside(input->block_bitmap, start, end))
  60. ext3_warning(sb, __FUNCTION__,
  61. "Block bitmap not in group (block %u)",
  62. input->block_bitmap);
  63. else if (outside(input->inode_bitmap, start, end))
  64. ext3_warning(sb, __FUNCTION__,
  65. "Inode bitmap not in group (block %u)",
  66. input->inode_bitmap);
  67. else if (outside(input->inode_table, start, end) ||
  68. outside(itend - 1, start, end))
  69. ext3_warning(sb, __FUNCTION__,
  70. "Inode table not in group (blocks %u-%u)",
  71. input->inode_table, itend - 1);
  72. else if (input->inode_bitmap == input->block_bitmap)
  73. ext3_warning(sb, __FUNCTION__,
  74. "Block bitmap same as inode bitmap (%u)",
  75. input->block_bitmap);
  76. else if (inside(input->block_bitmap, input->inode_table, itend))
  77. ext3_warning(sb, __FUNCTION__,
  78. "Block bitmap (%u) in inode table (%u-%u)",
  79. input->block_bitmap, input->inode_table, itend-1);
  80. else if (inside(input->inode_bitmap, input->inode_table, itend))
  81. ext3_warning(sb, __FUNCTION__,
  82. "Inode bitmap (%u) in inode table (%u-%u)",
  83. input->inode_bitmap, input->inode_table, itend-1);
  84. else if (inside(input->block_bitmap, start, metaend))
  85. ext3_warning(sb, __FUNCTION__,
  86. "Block bitmap (%u) in GDT table (%u-%u)",
  87. input->block_bitmap, start, metaend - 1);
  88. else if (inside(input->inode_bitmap, start, metaend))
  89. ext3_warning(sb, __FUNCTION__,
  90. "Inode bitmap (%u) in GDT table (%u-%u)",
  91. input->inode_bitmap, start, metaend - 1);
  92. else if (inside(input->inode_table, start, metaend) ||
  93. inside(itend - 1, start, metaend))
  94. ext3_warning(sb, __FUNCTION__,
  95. "Inode table (%u-%u) overlaps GDT table (%u-%u)",
  96. input->inode_table, itend - 1, start, metaend - 1);
  97. else
  98. err = 0;
  99. brelse(bh);
  100. return err;
  101. }
  102. static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
  103. unsigned long blk)
  104. {
  105. struct buffer_head *bh;
  106. int err;
  107. bh = sb_getblk(sb, blk);
  108. if ((err = ext3_journal_get_write_access(handle, bh))) {
  109. brelse(bh);
  110. bh = ERR_PTR(err);
  111. } else {
  112. lock_buffer(bh);
  113. memset(bh->b_data, 0, sb->s_blocksize);
  114. set_buffer_uptodate(bh);
  115. unlock_buffer(bh);
  116. }
  117. return bh;
  118. }
  119. /*
  120. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  121. * need to use it within a single byte (to ensure we get endianness right).
  122. * We can use memset for the rest of the bitmap as there are no other users.
  123. */
  124. static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  125. {
  126. int i;
  127. if (start_bit >= end_bit)
  128. return;
  129. ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  130. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  131. ext3_set_bit(i, bitmap);
  132. if (i < end_bit)
  133. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  134. }
  135. /*
  136. * Set up the block and inode bitmaps, and the inode table for the new group.
  137. * This doesn't need to be part of the main transaction, since we are only
  138. * changing blocks outside the actual filesystem. We still do journaling to
  139. * ensure the recovery is correct in case of a failure just after resize.
  140. * If any part of this fails, we simply abort the resize.
  141. */
  142. static int setup_new_group_blocks(struct super_block *sb,
  143. struct ext3_new_group_data *input)
  144. {
  145. struct ext3_sb_info *sbi = EXT3_SB(sb);
  146. unsigned long start = input->group * sbi->s_blocks_per_group +
  147. le32_to_cpu(sbi->s_es->s_first_data_block);
  148. int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
  149. le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
  150. unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
  151. struct buffer_head *bh;
  152. handle_t *handle;
  153. unsigned long block;
  154. int bit;
  155. int i;
  156. int err = 0, err2;
  157. handle = ext3_journal_start_sb(sb, reserved_gdb + gdblocks +
  158. 2 + sbi->s_itb_per_group);
  159. if (IS_ERR(handle))
  160. return PTR_ERR(handle);
  161. lock_super(sb);
  162. if (input->group != sbi->s_groups_count) {
  163. err = -EBUSY;
  164. goto exit_journal;
  165. }
  166. if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
  167. err = PTR_ERR(bh);
  168. goto exit_journal;
  169. }
  170. if (ext3_bg_has_super(sb, input->group)) {
  171. ext3_debug("mark backup superblock %#04lx (+0)\n", start);
  172. ext3_set_bit(0, bh->b_data);
  173. }
  174. /* Copy all of the GDT blocks into the backup in this group */
  175. for (i = 0, bit = 1, block = start + 1;
  176. i < gdblocks; i++, block++, bit++) {
  177. struct buffer_head *gdb;
  178. ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
  179. gdb = sb_getblk(sb, block);
  180. if ((err = ext3_journal_get_write_access(handle, gdb))) {
  181. brelse(gdb);
  182. goto exit_bh;
  183. }
  184. lock_buffer(bh);
  185. memcpy(gdb->b_data, sbi->s_group_desc[i], bh->b_size);
  186. set_buffer_uptodate(gdb);
  187. unlock_buffer(bh);
  188. ext3_journal_dirty_metadata(handle, gdb);
  189. ext3_set_bit(bit, bh->b_data);
  190. brelse(gdb);
  191. }
  192. /* Zero out all of the reserved backup group descriptor table blocks */
  193. for (i = 0, bit = gdblocks + 1, block = start + bit;
  194. i < reserved_gdb; i++, block++, bit++) {
  195. struct buffer_head *gdb;
  196. ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
  197. if (IS_ERR(gdb = bclean(handle, sb, block))) {
  198. err = PTR_ERR(bh);
  199. goto exit_bh;
  200. }
  201. ext3_journal_dirty_metadata(handle, gdb);
  202. ext3_set_bit(bit, bh->b_data);
  203. brelse(gdb);
  204. }
  205. ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
  206. input->block_bitmap - start);
  207. ext3_set_bit(input->block_bitmap - start, bh->b_data);
  208. ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
  209. input->inode_bitmap - start);
  210. ext3_set_bit(input->inode_bitmap - start, bh->b_data);
  211. /* Zero out all of the inode table blocks */
  212. for (i = 0, block = input->inode_table, bit = block - start;
  213. i < sbi->s_itb_per_group; i++, bit++, block++) {
  214. struct buffer_head *it;
  215. ext3_debug("clear inode block %#04x (+%ld)\n", block, bit);
  216. if (IS_ERR(it = bclean(handle, sb, block))) {
  217. err = PTR_ERR(it);
  218. goto exit_bh;
  219. }
  220. ext3_journal_dirty_metadata(handle, it);
  221. brelse(it);
  222. ext3_set_bit(bit, bh->b_data);
  223. }
  224. mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
  225. bh->b_data);
  226. ext3_journal_dirty_metadata(handle, bh);
  227. brelse(bh);
  228. /* Mark unused entries in inode bitmap used */
  229. ext3_debug("clear inode bitmap %#04x (+%ld)\n",
  230. input->inode_bitmap, input->inode_bitmap - start);
  231. if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
  232. err = PTR_ERR(bh);
  233. goto exit_journal;
  234. }
  235. mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
  236. bh->b_data);
  237. ext3_journal_dirty_metadata(handle, bh);
  238. exit_bh:
  239. brelse(bh);
  240. exit_journal:
  241. unlock_super(sb);
  242. if ((err2 = ext3_journal_stop(handle)) && !err)
  243. err = err2;
  244. return err;
  245. }
  246. /*
  247. * Iterate through the groups which hold BACKUP superblock/GDT copies in an
  248. * ext3 filesystem. The counters should be initialized to 1, 5, and 7 before
  249. * calling this for the first time. In a sparse filesystem it will be the
  250. * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
  251. * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
  252. */
  253. static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
  254. unsigned *five, unsigned *seven)
  255. {
  256. unsigned *min = three;
  257. int mult = 3;
  258. unsigned ret;
  259. if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
  260. EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  261. ret = *min;
  262. *min += 1;
  263. return ret;
  264. }
  265. if (*five < *min) {
  266. min = five;
  267. mult = 5;
  268. }
  269. if (*seven < *min) {
  270. min = seven;
  271. mult = 7;
  272. }
  273. ret = *min;
  274. *min *= mult;
  275. return ret;
  276. }
  277. /*
  278. * Check that all of the backup GDT blocks are held in the primary GDT block.
  279. * It is assumed that they are stored in group order. Returns the number of
  280. * groups in current filesystem that have BACKUPS, or -ve error code.
  281. */
  282. static int verify_reserved_gdb(struct super_block *sb,
  283. struct buffer_head *primary)
  284. {
  285. const unsigned long blk = primary->b_blocknr;
  286. const unsigned long end = EXT3_SB(sb)->s_groups_count;
  287. unsigned three = 1;
  288. unsigned five = 5;
  289. unsigned seven = 7;
  290. unsigned grp;
  291. __u32 *p = (__u32 *)primary->b_data;
  292. int gdbackups = 0;
  293. while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
  294. if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
  295. ext3_warning(sb, __FUNCTION__,
  296. "reserved GDT %ld missing grp %d (%ld)\n",
  297. blk, grp,
  298. grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
  299. return -EINVAL;
  300. }
  301. if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
  302. return -EFBIG;
  303. }
  304. return gdbackups;
  305. }
  306. /*
  307. * Called when we need to bring a reserved group descriptor table block into
  308. * use from the resize inode. The primary copy of the new GDT block currently
  309. * is an indirect block (under the double indirect block in the resize inode).
  310. * The new backup GDT blocks will be stored as leaf blocks in this indirect
  311. * block, in group order. Even though we know all the block numbers we need,
  312. * we check to ensure that the resize inode has actually reserved these blocks.
  313. *
  314. * Don't need to update the block bitmaps because the blocks are still in use.
  315. *
  316. * We get all of the error cases out of the way, so that we are sure to not
  317. * fail once we start modifying the data on disk, because JBD has no rollback.
  318. */
  319. static int add_new_gdb(handle_t *handle, struct inode *inode,
  320. struct ext3_new_group_data *input,
  321. struct buffer_head **primary)
  322. {
  323. struct super_block *sb = inode->i_sb;
  324. struct ext3_super_block *es = EXT3_SB(sb)->s_es;
  325. unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
  326. unsigned long gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
  327. struct buffer_head **o_group_desc, **n_group_desc;
  328. struct buffer_head *dind;
  329. int gdbackups;
  330. struct ext3_iloc iloc;
  331. __u32 *data;
  332. int err;
  333. if (test_opt(sb, DEBUG))
  334. printk(KERN_DEBUG
  335. "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
  336. gdb_num);
  337. /*
  338. * If we are not using the primary superblock/GDT copy don't resize,
  339. * because the user tools have no way of handling this. Probably a
  340. * bad time to do it anyways.
  341. */
  342. if (EXT3_SB(sb)->s_sbh->b_blocknr !=
  343. le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
  344. ext3_warning(sb, __FUNCTION__,
  345. "won't resize using backup superblock at %llu\n",
  346. (unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
  347. return -EPERM;
  348. }
  349. *primary = sb_bread(sb, gdblock);
  350. if (!*primary)
  351. return -EIO;
  352. if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
  353. err = gdbackups;
  354. goto exit_bh;
  355. }
  356. data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
  357. dind = sb_bread(sb, le32_to_cpu(*data));
  358. if (!dind) {
  359. err = -EIO;
  360. goto exit_bh;
  361. }
  362. data = (__u32 *)dind->b_data;
  363. if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
  364. ext3_warning(sb, __FUNCTION__,
  365. "new group %u GDT block %lu not reserved\n",
  366. input->group, gdblock);
  367. err = -EINVAL;
  368. goto exit_dind;
  369. }
  370. if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
  371. goto exit_dind;
  372. if ((err = ext3_journal_get_write_access(handle, *primary)))
  373. goto exit_sbh;
  374. if ((err = ext3_journal_get_write_access(handle, dind)))
  375. goto exit_primary;
  376. /* ext3_reserve_inode_write() gets a reference on the iloc */
  377. if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
  378. goto exit_dindj;
  379. n_group_desc = (struct buffer_head **)kmalloc((gdb_num + 1) *
  380. sizeof(struct buffer_head *), GFP_KERNEL);
  381. if (!n_group_desc) {
  382. err = -ENOMEM;
  383. ext3_warning (sb, __FUNCTION__,
  384. "not enough memory for %lu groups", gdb_num + 1);
  385. goto exit_inode;
  386. }
  387. /*
  388. * Finally, we have all of the possible failures behind us...
  389. *
  390. * Remove new GDT block from inode double-indirect block and clear out
  391. * the new GDT block for use (which also "frees" the backup GDT blocks
  392. * from the reserved inode). We don't need to change the bitmaps for
  393. * these blocks, because they are marked as in-use from being in the
  394. * reserved inode, and will become GDT blocks (primary and backup).
  395. */
  396. data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
  397. ext3_journal_dirty_metadata(handle, dind);
  398. brelse(dind);
  399. inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
  400. ext3_mark_iloc_dirty(handle, inode, &iloc);
  401. memset((*primary)->b_data, 0, sb->s_blocksize);
  402. ext3_journal_dirty_metadata(handle, *primary);
  403. o_group_desc = EXT3_SB(sb)->s_group_desc;
  404. memcpy(n_group_desc, o_group_desc,
  405. EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  406. n_group_desc[gdb_num] = *primary;
  407. EXT3_SB(sb)->s_group_desc = n_group_desc;
  408. EXT3_SB(sb)->s_gdb_count++;
  409. kfree(o_group_desc);
  410. es->s_reserved_gdt_blocks =
  411. cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
  412. ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
  413. return 0;
  414. exit_inode:
  415. //ext3_journal_release_buffer(handle, iloc.bh);
  416. brelse(iloc.bh);
  417. exit_dindj:
  418. //ext3_journal_release_buffer(handle, dind);
  419. exit_primary:
  420. //ext3_journal_release_buffer(handle, *primary);
  421. exit_sbh:
  422. //ext3_journal_release_buffer(handle, *primary);
  423. exit_dind:
  424. brelse(dind);
  425. exit_bh:
  426. brelse(*primary);
  427. ext3_debug("leaving with error %d\n", err);
  428. return err;
  429. }
  430. /*
  431. * Called when we are adding a new group which has a backup copy of each of
  432. * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
  433. * We need to add these reserved backup GDT blocks to the resize inode, so
  434. * that they are kept for future resizing and not allocated to files.
  435. *
  436. * Each reserved backup GDT block will go into a different indirect block.
  437. * The indirect blocks are actually the primary reserved GDT blocks,
  438. * so we know in advance what their block numbers are. We only get the
  439. * double-indirect block to verify it is pointing to the primary reserved
  440. * GDT blocks so we don't overwrite a data block by accident. The reserved
  441. * backup GDT blocks are stored in their reserved primary GDT block.
  442. */
  443. static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
  444. struct ext3_new_group_data *input)
  445. {
  446. struct super_block *sb = inode->i_sb;
  447. int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
  448. struct buffer_head **primary;
  449. struct buffer_head *dind;
  450. struct ext3_iloc iloc;
  451. unsigned long blk;
  452. __u32 *data, *end;
  453. int gdbackups = 0;
  454. int res, i;
  455. int err;
  456. primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
  457. if (!primary)
  458. return -ENOMEM;
  459. data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
  460. dind = sb_bread(sb, le32_to_cpu(*data));
  461. if (!dind) {
  462. err = -EIO;
  463. goto exit_free;
  464. }
  465. blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
  466. data = (__u32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
  467. end = (__u32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
  468. /* Get each reserved primary GDT block and verify it holds backups */
  469. for (res = 0; res < reserved_gdb; res++, blk++) {
  470. if (le32_to_cpu(*data) != blk) {
  471. ext3_warning(sb, __FUNCTION__,
  472. "reserved block %lu not at offset %ld\n",
  473. blk, (long)(data - (__u32 *)dind->b_data));
  474. err = -EINVAL;
  475. goto exit_bh;
  476. }
  477. primary[res] = sb_bread(sb, blk);
  478. if (!primary[res]) {
  479. err = -EIO;
  480. goto exit_bh;
  481. }
  482. if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
  483. brelse(primary[res]);
  484. err = gdbackups;
  485. goto exit_bh;
  486. }
  487. if (++data >= end)
  488. data = (__u32 *)dind->b_data;
  489. }
  490. for (i = 0; i < reserved_gdb; i++) {
  491. if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
  492. /*
  493. int j;
  494. for (j = 0; j < i; j++)
  495. ext3_journal_release_buffer(handle, primary[j]);
  496. */
  497. goto exit_bh;
  498. }
  499. }
  500. if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
  501. goto exit_bh;
  502. /*
  503. * Finally we can add each of the reserved backup GDT blocks from
  504. * the new group to its reserved primary GDT block.
  505. */
  506. blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
  507. for (i = 0; i < reserved_gdb; i++) {
  508. int err2;
  509. data = (__u32 *)primary[i]->b_data;
  510. /* printk("reserving backup %lu[%u] = %lu\n",
  511. primary[i]->b_blocknr, gdbackups,
  512. blk + primary[i]->b_blocknr); */
  513. data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
  514. err2 = ext3_journal_dirty_metadata(handle, primary[i]);
  515. if (!err)
  516. err = err2;
  517. }
  518. inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
  519. ext3_mark_iloc_dirty(handle, inode, &iloc);
  520. exit_bh:
  521. while (--res >= 0)
  522. brelse(primary[res]);
  523. brelse(dind);
  524. exit_free:
  525. kfree(primary);
  526. return err;
  527. }
  528. /*
  529. * Update the backup copies of the ext3 metadata. These don't need to be part
  530. * of the main resize transaction, because e2fsck will re-write them if there
  531. * is a problem (basically only OOM will cause a problem). However, we
  532. * _should_ update the backups if possible, in case the primary gets trashed
  533. * for some reason and we need to run e2fsck from a backup superblock. The
  534. * important part is that the new block and inode counts are in the backup
  535. * superblocks, and the location of the new group metadata in the GDT backups.
  536. *
  537. * We do not need lock_super() for this, because these blocks are not
  538. * otherwise touched by the filesystem code when it is mounted. We don't
  539. * need to worry about last changing from sbi->s_groups_count, because the
  540. * worst that can happen is that we do not copy the full number of backups
  541. * at this time. The resize which changed s_groups_count will backup again.
  542. */
  543. static void update_backups(struct super_block *sb,
  544. int blk_off, char *data, int size)
  545. {
  546. struct ext3_sb_info *sbi = EXT3_SB(sb);
  547. const unsigned long last = sbi->s_groups_count;
  548. const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
  549. unsigned three = 1;
  550. unsigned five = 5;
  551. unsigned seven = 7;
  552. unsigned group;
  553. int rest = sb->s_blocksize - size;
  554. handle_t *handle;
  555. int err = 0, err2;
  556. handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
  557. if (IS_ERR(handle)) {
  558. group = 1;
  559. err = PTR_ERR(handle);
  560. goto exit_err;
  561. }
  562. while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
  563. struct buffer_head *bh;
  564. /* Out of journal space, and can't get more - abort - so sad */
  565. if (handle->h_buffer_credits == 0 &&
  566. ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
  567. (err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
  568. break;
  569. bh = sb_getblk(sb, group * bpg + blk_off);
  570. ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n",
  571. bh->b_blocknr);
  572. if ((err = ext3_journal_get_write_access(handle, bh)))
  573. break;
  574. lock_buffer(bh);
  575. memcpy(bh->b_data, data, size);
  576. if (rest)
  577. memset(bh->b_data + size, 0, rest);
  578. set_buffer_uptodate(bh);
  579. unlock_buffer(bh);
  580. ext3_journal_dirty_metadata(handle, bh);
  581. brelse(bh);
  582. }
  583. if ((err2 = ext3_journal_stop(handle)) && !err)
  584. err = err2;
  585. /*
  586. * Ugh! Need to have e2fsck write the backup copies. It is too
  587. * late to revert the resize, we shouldn't fail just because of
  588. * the backup copies (they are only needed in case of corruption).
  589. *
  590. * However, if we got here we have a journal problem too, so we
  591. * can't really start a transaction to mark the superblock.
  592. * Chicken out and just set the flag on the hope it will be written
  593. * to disk, and if not - we will simply wait until next fsck.
  594. */
  595. exit_err:
  596. if (err) {
  597. ext3_warning(sb, __FUNCTION__,
  598. "can't update backup for group %d (err %d), "
  599. "forcing fsck on next reboot\n", group, err);
  600. sbi->s_mount_state &= ~EXT3_VALID_FS;
  601. sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS);
  602. mark_buffer_dirty(sbi->s_sbh);
  603. }
  604. }
  605. /* Add group descriptor data to an existing or new group descriptor block.
  606. * Ensure we handle all possible error conditions _before_ we start modifying
  607. * the filesystem, because we cannot abort the transaction and not have it
  608. * write the data to disk.
  609. *
  610. * If we are on a GDT block boundary, we need to get the reserved GDT block.
  611. * Otherwise, we may need to add backup GDT blocks for a sparse group.
  612. *
  613. * We only need to hold the superblock lock while we are actually adding
  614. * in the new group's counts to the superblock. Prior to that we have
  615. * not really "added" the group at all. We re-check that we are still
  616. * adding in the last group in case things have changed since verifying.
  617. */
  618. int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
  619. {
  620. struct ext3_sb_info *sbi = EXT3_SB(sb);
  621. struct ext3_super_block *es = sbi->s_es;
  622. int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
  623. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  624. struct buffer_head *primary = NULL;
  625. struct ext3_group_desc *gdp;
  626. struct inode *inode = NULL;
  627. handle_t *handle;
  628. int gdb_off, gdb_num;
  629. int err, err2;
  630. gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
  631. gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
  632. if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
  633. EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  634. ext3_warning(sb, __FUNCTION__,
  635. "Can't resize non-sparse filesystem further\n");
  636. return -EPERM;
  637. }
  638. if (reserved_gdb || gdb_off == 0) {
  639. if (!EXT3_HAS_COMPAT_FEATURE(sb,
  640. EXT3_FEATURE_COMPAT_RESIZE_INODE)){
  641. ext3_warning(sb, __FUNCTION__,
  642. "No reserved GDT blocks, can't resize\n");
  643. return -EPERM;
  644. }
  645. inode = iget(sb, EXT3_RESIZE_INO);
  646. if (!inode || is_bad_inode(inode)) {
  647. ext3_warning(sb, __FUNCTION__,
  648. "Error opening resize inode\n");
  649. iput(inode);
  650. return -ENOENT;
  651. }
  652. }
  653. if ((err = verify_group_input(sb, input)))
  654. goto exit_put;
  655. if ((err = setup_new_group_blocks(sb, input)))
  656. goto exit_put;
  657. /*
  658. * We will always be modifying at least the superblock and a GDT
  659. * block. If we are adding a group past the last current GDT block,
  660. * we will also modify the inode and the dindirect block. If we
  661. * are adding a group with superblock/GDT backups we will also
  662. * modify each of the reserved GDT dindirect blocks.
  663. */
  664. handle = ext3_journal_start_sb(sb,
  665. ext3_bg_has_super(sb, input->group) ?
  666. 3 + reserved_gdb : 4);
  667. if (IS_ERR(handle)) {
  668. err = PTR_ERR(handle);
  669. goto exit_put;
  670. }
  671. lock_super(sb);
  672. if (input->group != EXT3_SB(sb)->s_groups_count) {
  673. ext3_warning(sb, __FUNCTION__,
  674. "multiple resizers run on filesystem!\n");
  675. goto exit_journal;
  676. }
  677. if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
  678. goto exit_journal;
  679. /*
  680. * We will only either add reserved group blocks to a backup group
  681. * or remove reserved blocks for the first group in a new group block.
  682. * Doing both would be mean more complex code, and sane people don't
  683. * use non-sparse filesystems anymore. This is already checked above.
  684. */
  685. if (gdb_off) {
  686. primary = sbi->s_group_desc[gdb_num];
  687. if ((err = ext3_journal_get_write_access(handle, primary)))
  688. goto exit_journal;
  689. if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
  690. (err = reserve_backup_gdb(handle, inode, input)))
  691. goto exit_journal;
  692. } else if ((err = add_new_gdb(handle, inode, input, &primary)))
  693. goto exit_journal;
  694. /*
  695. * OK, now we've set up the new group. Time to make it active.
  696. *
  697. * Current kernels don't lock all allocations via lock_super(),
  698. * so we have to be safe wrt. concurrent accesses the group
  699. * data. So we need to be careful to set all of the relevant
  700. * group descriptor data etc. *before* we enable the group.
  701. *
  702. * The key field here is EXT3_SB(sb)->s_groups_count: as long as
  703. * that retains its old value, nobody is going to access the new
  704. * group.
  705. *
  706. * So first we update all the descriptor metadata for the new
  707. * group; then we update the total disk blocks count; then we
  708. * update the groups count to enable the group; then finally we
  709. * update the free space counts so that the system can start
  710. * using the new disk blocks.
  711. */
  712. /* Update group descriptor block for new group */
  713. gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
  714. gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
  715. gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
  716. gdp->bg_inode_table = cpu_to_le32(input->inode_table);
  717. gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
  718. gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
  719. /*
  720. * Make the new blocks and inodes valid next. We do this before
  721. * increasing the group count so that once the group is enabled,
  722. * all of its blocks and inodes are already valid.
  723. *
  724. * We always allocate group-by-group, then block-by-block or
  725. * inode-by-inode within a group, so enabling these
  726. * blocks/inodes before the group is live won't actually let us
  727. * allocate the new space yet.
  728. */
  729. es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) +
  730. input->blocks_count);
  731. es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
  732. EXT3_INODES_PER_GROUP(sb));
  733. /*
  734. * We need to protect s_groups_count against other CPUs seeing
  735. * inconsistent state in the superblock.
  736. *
  737. * The precise rules we use are:
  738. *
  739. * * Writers of s_groups_count *must* hold lock_super
  740. * AND
  741. * * Writers must perform a smp_wmb() after updating all dependent
  742. * data and before modifying the groups count
  743. *
  744. * * Readers must hold lock_super() over the access
  745. * OR
  746. * * Readers must perform an smp_rmb() after reading the groups count
  747. * and before reading any dependent data.
  748. *
  749. * NB. These rules can be relaxed when checking the group count
  750. * while freeing data, as we can only allocate from a block
  751. * group after serialising against the group count, and we can
  752. * only then free after serialising in turn against that
  753. * allocation.
  754. */
  755. smp_wmb();
  756. /* Update the global fs size fields */
  757. EXT3_SB(sb)->s_groups_count++;
  758. ext3_journal_dirty_metadata(handle, primary);
  759. /* Update the reserved block counts only once the new group is
  760. * active. */
  761. es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) +
  762. input->reserved_blocks);
  763. /* Update the free space counts */
  764. percpu_counter_mod(&sbi->s_freeblocks_counter,
  765. input->free_blocks_count);
  766. percpu_counter_mod(&sbi->s_freeinodes_counter,
  767. EXT3_INODES_PER_GROUP(sb));
  768. ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
  769. sb->s_dirt = 1;
  770. exit_journal:
  771. unlock_super(sb);
  772. if ((err2 = ext3_journal_stop(handle)) && !err)
  773. err = err2;
  774. if (!err) {
  775. update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
  776. sizeof(struct ext3_super_block));
  777. update_backups(sb, primary->b_blocknr, primary->b_data,
  778. primary->b_size);
  779. }
  780. exit_put:
  781. iput(inode);
  782. return err;
  783. } /* ext3_group_add */
  784. /* Extend the filesystem to the new number of blocks specified. This entry
  785. * point is only used to extend the current filesystem to the end of the last
  786. * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
  787. * for emergencies (because it has no dependencies on reserved blocks).
  788. *
  789. * If we _really_ wanted, we could use default values to call ext3_group_add()
  790. * allow the "remount" trick to work for arbitrary resizing, assuming enough
  791. * GDT blocks are reserved to grow to the desired size.
  792. */
  793. int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
  794. unsigned long n_blocks_count)
  795. {
  796. unsigned long o_blocks_count;
  797. unsigned long o_groups_count;
  798. unsigned long last;
  799. int add;
  800. struct buffer_head * bh;
  801. handle_t *handle;
  802. int err, freed_blocks;
  803. /* We don't need to worry about locking wrt other resizers just
  804. * yet: we're going to revalidate es->s_blocks_count after
  805. * taking lock_super() below. */
  806. o_blocks_count = le32_to_cpu(es->s_blocks_count);
  807. o_groups_count = EXT3_SB(sb)->s_groups_count;
  808. if (test_opt(sb, DEBUG))
  809. printk(KERN_DEBUG "EXT3-fs: extending last group from %lu to %lu blocks\n",
  810. o_blocks_count, n_blocks_count);
  811. if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
  812. return 0;
  813. if (n_blocks_count < o_blocks_count) {
  814. ext3_warning(sb, __FUNCTION__,
  815. "can't shrink FS - resize aborted");
  816. return -EBUSY;
  817. }
  818. /* Handle the remaining blocks in the last group only. */
  819. last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
  820. EXT3_BLOCKS_PER_GROUP(sb);
  821. if (last == 0) {
  822. ext3_warning(sb, __FUNCTION__,
  823. "need to use ext2online to resize further\n");
  824. return -EPERM;
  825. }
  826. add = EXT3_BLOCKS_PER_GROUP(sb) - last;
  827. if (o_blocks_count + add > n_blocks_count)
  828. add = n_blocks_count - o_blocks_count;
  829. if (o_blocks_count + add < n_blocks_count)
  830. ext3_warning(sb, __FUNCTION__,
  831. "will only finish group (%lu blocks, %u new)",
  832. o_blocks_count + add, add);
  833. /* See if the device is actually as big as what was requested */
  834. bh = sb_bread(sb, o_blocks_count + add -1);
  835. if (!bh) {
  836. ext3_warning(sb, __FUNCTION__,
  837. "can't read last block, resize aborted");
  838. return -ENOSPC;
  839. }
  840. brelse(bh);
  841. /* We will update the superblock, one block bitmap, and
  842. * one group descriptor via ext3_free_blocks().
  843. */
  844. handle = ext3_journal_start_sb(sb, 3);
  845. if (IS_ERR(handle)) {
  846. err = PTR_ERR(handle);
  847. ext3_warning(sb, __FUNCTION__, "error %d on journal start",err);
  848. goto exit_put;
  849. }
  850. lock_super(sb);
  851. if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
  852. ext3_warning(sb, __FUNCTION__,
  853. "multiple resizers run on filesystem!\n");
  854. err = -EBUSY;
  855. goto exit_put;
  856. }
  857. if ((err = ext3_journal_get_write_access(handle,
  858. EXT3_SB(sb)->s_sbh))) {
  859. ext3_warning(sb, __FUNCTION__,
  860. "error %d on journal write access", err);
  861. unlock_super(sb);
  862. ext3_journal_stop(handle);
  863. goto exit_put;
  864. }
  865. es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
  866. ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
  867. sb->s_dirt = 1;
  868. unlock_super(sb);
  869. ext3_debug("freeing blocks %ld through %ld\n", o_blocks_count,
  870. o_blocks_count + add);
  871. ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
  872. ext3_debug("freed blocks %ld through %ld\n", o_blocks_count,
  873. o_blocks_count + add);
  874. if ((err = ext3_journal_stop(handle)))
  875. goto exit_put;
  876. if (test_opt(sb, DEBUG))
  877. printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
  878. le32_to_cpu(es->s_blocks_count));
  879. update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
  880. sizeof(struct ext3_super_block));
  881. exit_put:
  882. return err;
  883. } /* ext3_group_extend */