recovery.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941
  1. /*
  2. * recovery.c - NILFS recovery logic
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. */
  22. #include <linux/buffer_head.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/swap.h>
  25. #include <linux/crc32.h>
  26. #include "nilfs.h"
  27. #include "segment.h"
  28. #include "sufile.h"
  29. #include "page.h"
  30. #include "seglist.h"
  31. #include "segbuf.h"
  32. /*
  33. * Segment check result
  34. */
  35. enum {
  36. NILFS_SEG_VALID,
  37. NILFS_SEG_NO_SUPER_ROOT,
  38. NILFS_SEG_FAIL_IO,
  39. NILFS_SEG_FAIL_MAGIC,
  40. NILFS_SEG_FAIL_SEQ,
  41. NILFS_SEG_FAIL_CHECKSUM_SEGSUM,
  42. NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
  43. NILFS_SEG_FAIL_CHECKSUM_FULL,
  44. NILFS_SEG_FAIL_CONSISTENCY,
  45. };
  46. /* work structure for recovery */
  47. struct nilfs_recovery_block {
  48. ino_t ino; /* Inode number of the file that this block
  49. belongs to */
  50. sector_t blocknr; /* block number */
  51. __u64 vblocknr; /* virtual block number */
  52. unsigned long blkoff; /* File offset of the data block (per block) */
  53. struct list_head list;
  54. };
  55. static int nilfs_warn_segment_error(int err)
  56. {
  57. switch (err) {
  58. case NILFS_SEG_FAIL_IO:
  59. printk(KERN_WARNING
  60. "NILFS warning: I/O error on loading last segment\n");
  61. return -EIO;
  62. case NILFS_SEG_FAIL_MAGIC:
  63. printk(KERN_WARNING
  64. "NILFS warning: Segment magic number invalid\n");
  65. break;
  66. case NILFS_SEG_FAIL_SEQ:
  67. printk(KERN_WARNING
  68. "NILFS warning: Sequence number mismatch\n");
  69. break;
  70. case NILFS_SEG_FAIL_CHECKSUM_SEGSUM:
  71. printk(KERN_WARNING
  72. "NILFS warning: Checksum error in segment summary\n");
  73. break;
  74. case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
  75. printk(KERN_WARNING
  76. "NILFS warning: Checksum error in super root\n");
  77. break;
  78. case NILFS_SEG_FAIL_CHECKSUM_FULL:
  79. printk(KERN_WARNING
  80. "NILFS warning: Checksum error in segment payload\n");
  81. break;
  82. case NILFS_SEG_FAIL_CONSISTENCY:
  83. printk(KERN_WARNING
  84. "NILFS warning: Inconsistent segment\n");
  85. break;
  86. case NILFS_SEG_NO_SUPER_ROOT:
  87. printk(KERN_WARNING
  88. "NILFS warning: No super root in the last segment\n");
  89. break;
  90. case NILFS_SEG_VALID:
  91. default:
  92. BUG();
  93. }
  94. return -EINVAL;
  95. }
  96. static void store_segsum_info(struct nilfs_segsum_info *ssi,
  97. struct nilfs_segment_summary *sum,
  98. unsigned int blocksize)
  99. {
  100. ssi->flags = le16_to_cpu(sum->ss_flags);
  101. ssi->seg_seq = le64_to_cpu(sum->ss_seq);
  102. ssi->ctime = le64_to_cpu(sum->ss_create);
  103. ssi->next = le64_to_cpu(sum->ss_next);
  104. ssi->nblocks = le32_to_cpu(sum->ss_nblocks);
  105. ssi->nfinfo = le32_to_cpu(sum->ss_nfinfo);
  106. ssi->sumbytes = le32_to_cpu(sum->ss_sumbytes);
  107. ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize);
  108. ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi);
  109. }
  110. /**
  111. * calc_crc_cont - check CRC of blocks continuously
  112. * @sbi: nilfs_sb_info
  113. * @bhs: buffer head of start block
  114. * @sum: place to store result
  115. * @offset: offset bytes in the first block
  116. * @check_bytes: number of bytes to be checked
  117. * @start: DBN of start block
  118. * @nblock: number of blocks to be checked
  119. */
  120. static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs,
  121. u32 *sum, unsigned long offset, u64 check_bytes,
  122. sector_t start, unsigned long nblock)
  123. {
  124. unsigned long blocksize = sbi->s_super->s_blocksize;
  125. unsigned long size;
  126. u32 crc;
  127. BUG_ON(offset >= blocksize);
  128. check_bytes -= offset;
  129. size = min_t(u64, check_bytes, blocksize - offset);
  130. crc = crc32_le(sbi->s_nilfs->ns_crc_seed,
  131. (unsigned char *)bhs->b_data + offset, size);
  132. if (--nblock > 0) {
  133. do {
  134. struct buffer_head *bh
  135. = sb_bread(sbi->s_super, ++start);
  136. if (!bh)
  137. return -EIO;
  138. check_bytes -= size;
  139. size = min_t(u64, check_bytes, blocksize);
  140. crc = crc32_le(crc, bh->b_data, size);
  141. brelse(bh);
  142. } while (--nblock > 0);
  143. }
  144. *sum = crc;
  145. return 0;
  146. }
  147. /**
  148. * nilfs_read_super_root_block - read super root block
  149. * @sb: super_block
  150. * @sr_block: disk block number of the super root block
  151. * @pbh: address of a buffer_head pointer to return super root buffer
  152. * @check: CRC check flag
  153. */
  154. int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
  155. struct buffer_head **pbh, int check)
  156. {
  157. struct buffer_head *bh_sr;
  158. struct nilfs_super_root *sr;
  159. u32 crc;
  160. int ret;
  161. *pbh = NULL;
  162. bh_sr = sb_bread(sb, sr_block);
  163. if (unlikely(!bh_sr)) {
  164. ret = NILFS_SEG_FAIL_IO;
  165. goto failed;
  166. }
  167. sr = (struct nilfs_super_root *)bh_sr->b_data;
  168. if (check) {
  169. unsigned bytes = le16_to_cpu(sr->sr_bytes);
  170. if (bytes == 0 || bytes > sb->s_blocksize) {
  171. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  172. goto failed_bh;
  173. }
  174. if (calc_crc_cont(NILFS_SB(sb), bh_sr, &crc,
  175. sizeof(sr->sr_sum), bytes, sr_block, 1)) {
  176. ret = NILFS_SEG_FAIL_IO;
  177. goto failed_bh;
  178. }
  179. if (crc != le32_to_cpu(sr->sr_sum)) {
  180. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  181. goto failed_bh;
  182. }
  183. }
  184. *pbh = bh_sr;
  185. return 0;
  186. failed_bh:
  187. brelse(bh_sr);
  188. failed:
  189. return nilfs_warn_segment_error(ret);
  190. }
  191. /**
  192. * load_segment_summary - read segment summary of the specified partial segment
  193. * @sbi: nilfs_sb_info
  194. * @pseg_start: start disk block number of partial segment
  195. * @seg_seq: sequence number requested
  196. * @ssi: pointer to nilfs_segsum_info struct to store information
  197. * @full_check: full check flag
  198. * (0: only checks segment summary CRC, 1: data CRC)
  199. */
  200. static int
  201. load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
  202. u64 seg_seq, struct nilfs_segsum_info *ssi,
  203. int full_check)
  204. {
  205. struct buffer_head *bh_sum;
  206. struct nilfs_segment_summary *sum;
  207. unsigned long offset, nblock;
  208. u64 check_bytes;
  209. u32 crc, crc_sum;
  210. int ret = NILFS_SEG_FAIL_IO;
  211. bh_sum = sb_bread(sbi->s_super, pseg_start);
  212. if (!bh_sum)
  213. goto out;
  214. sum = (struct nilfs_segment_summary *)bh_sum->b_data;
  215. /* Check consistency of segment summary */
  216. if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) {
  217. ret = NILFS_SEG_FAIL_MAGIC;
  218. goto failed;
  219. }
  220. store_segsum_info(ssi, sum, sbi->s_super->s_blocksize);
  221. if (seg_seq != ssi->seg_seq) {
  222. ret = NILFS_SEG_FAIL_SEQ;
  223. goto failed;
  224. }
  225. if (full_check) {
  226. offset = sizeof(sum->ss_datasum);
  227. check_bytes =
  228. ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits);
  229. nblock = ssi->nblocks;
  230. crc_sum = le32_to_cpu(sum->ss_datasum);
  231. ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
  232. } else { /* only checks segment summary */
  233. offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum);
  234. check_bytes = ssi->sumbytes;
  235. nblock = ssi->nsumblk;
  236. crc_sum = le32_to_cpu(sum->ss_sumsum);
  237. ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM;
  238. }
  239. if (unlikely(nblock == 0 ||
  240. nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
  241. /* This limits the number of blocks read in the CRC check */
  242. ret = NILFS_SEG_FAIL_CONSISTENCY;
  243. goto failed;
  244. }
  245. if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes,
  246. pseg_start, nblock)) {
  247. ret = NILFS_SEG_FAIL_IO;
  248. goto failed;
  249. }
  250. if (crc == crc_sum)
  251. ret = 0;
  252. failed:
  253. brelse(bh_sum);
  254. out:
  255. return ret;
  256. }
  257. static void *segsum_get(struct super_block *sb, struct buffer_head **pbh,
  258. unsigned int *offset, unsigned int bytes)
  259. {
  260. void *ptr;
  261. sector_t blocknr;
  262. BUG_ON((*pbh)->b_size < *offset);
  263. if (bytes > (*pbh)->b_size - *offset) {
  264. blocknr = (*pbh)->b_blocknr;
  265. brelse(*pbh);
  266. *pbh = sb_bread(sb, blocknr + 1);
  267. if (unlikely(!*pbh))
  268. return NULL;
  269. *offset = 0;
  270. }
  271. ptr = (*pbh)->b_data + *offset;
  272. *offset += bytes;
  273. return ptr;
  274. }
  275. static void segsum_skip(struct super_block *sb, struct buffer_head **pbh,
  276. unsigned int *offset, unsigned int bytes,
  277. unsigned long count)
  278. {
  279. unsigned int rest_item_in_current_block
  280. = ((*pbh)->b_size - *offset) / bytes;
  281. if (count <= rest_item_in_current_block) {
  282. *offset += bytes * count;
  283. } else {
  284. sector_t blocknr = (*pbh)->b_blocknr;
  285. unsigned int nitem_per_block = (*pbh)->b_size / bytes;
  286. unsigned int bcnt;
  287. count -= rest_item_in_current_block;
  288. bcnt = DIV_ROUND_UP(count, nitem_per_block);
  289. *offset = bytes * (count - (bcnt - 1) * nitem_per_block);
  290. brelse(*pbh);
  291. *pbh = sb_bread(sb, blocknr + bcnt);
  292. }
  293. }
  294. static int
  295. collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr,
  296. struct nilfs_segsum_info *ssi,
  297. struct list_head *head)
  298. {
  299. struct buffer_head *bh;
  300. unsigned int offset;
  301. unsigned long nfinfo = ssi->nfinfo;
  302. sector_t blocknr = sum_blocknr + ssi->nsumblk;
  303. ino_t ino;
  304. int err = -EIO;
  305. if (!nfinfo)
  306. return 0;
  307. bh = sb_bread(sbi->s_super, sum_blocknr);
  308. if (unlikely(!bh))
  309. goto out;
  310. offset = le16_to_cpu(
  311. ((struct nilfs_segment_summary *)bh->b_data)->ss_bytes);
  312. for (;;) {
  313. unsigned long nblocks, ndatablk, nnodeblk;
  314. struct nilfs_finfo *finfo;
  315. finfo = segsum_get(sbi->s_super, &bh, &offset, sizeof(*finfo));
  316. if (unlikely(!finfo))
  317. goto out;
  318. ino = le64_to_cpu(finfo->fi_ino);
  319. nblocks = le32_to_cpu(finfo->fi_nblocks);
  320. ndatablk = le32_to_cpu(finfo->fi_ndatablk);
  321. nnodeblk = nblocks - ndatablk;
  322. while (ndatablk-- > 0) {
  323. struct nilfs_recovery_block *rb;
  324. struct nilfs_binfo_v *binfo;
  325. binfo = segsum_get(sbi->s_super, &bh, &offset,
  326. sizeof(*binfo));
  327. if (unlikely(!binfo))
  328. goto out;
  329. rb = kmalloc(sizeof(*rb), GFP_NOFS);
  330. if (unlikely(!rb)) {
  331. err = -ENOMEM;
  332. goto out;
  333. }
  334. rb->ino = ino;
  335. rb->blocknr = blocknr++;
  336. rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
  337. rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
  338. /* INIT_LIST_HEAD(&rb->list); */
  339. list_add_tail(&rb->list, head);
  340. }
  341. if (--nfinfo == 0)
  342. break;
  343. blocknr += nnodeblk; /* always 0 for the data sync segments */
  344. segsum_skip(sbi->s_super, &bh, &offset, sizeof(__le64),
  345. nnodeblk);
  346. if (unlikely(!bh))
  347. goto out;
  348. }
  349. err = 0;
  350. out:
  351. brelse(bh); /* brelse(NULL) is just ignored */
  352. return err;
  353. }
  354. static void dispose_recovery_list(struct list_head *head)
  355. {
  356. while (!list_empty(head)) {
  357. struct nilfs_recovery_block *rb
  358. = list_entry(head->next,
  359. struct nilfs_recovery_block, list);
  360. list_del(&rb->list);
  361. kfree(rb);
  362. }
  363. }
  364. void nilfs_dispose_segment_list(struct list_head *head)
  365. {
  366. while (!list_empty(head)) {
  367. struct nilfs_segment_entry *ent
  368. = list_entry(head->next,
  369. struct nilfs_segment_entry, list);
  370. list_del(&ent->list);
  371. nilfs_free_segment_entry(ent);
  372. }
  373. }
  374. static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
  375. struct nilfs_recovery_info *ri)
  376. {
  377. struct list_head *head = &ri->ri_used_segments;
  378. struct nilfs_segment_entry *ent, *n;
  379. struct inode *sufile = nilfs->ns_sufile;
  380. __u64 segnum[4];
  381. int err;
  382. int i;
  383. segnum[0] = nilfs->ns_segnum;
  384. segnum[1] = nilfs->ns_nextnum;
  385. segnum[2] = ri->ri_segnum;
  386. segnum[3] = ri->ri_nextnum;
  387. /*
  388. * Releasing the next segment of the latest super root.
  389. * The next segment is invalidated by this recovery.
  390. */
  391. err = nilfs_sufile_free(sufile, segnum[1]);
  392. if (unlikely(err))
  393. goto failed;
  394. err = -ENOMEM;
  395. for (i = 1; i < 4; i++) {
  396. ent = nilfs_alloc_segment_entry(segnum[i]);
  397. if (unlikely(!ent))
  398. goto failed;
  399. list_add_tail(&ent->list, head);
  400. }
  401. /*
  402. * Collecting segments written after the latest super root.
  403. * These are marked volatile active, and won't be reallocated in
  404. * the next construction.
  405. */
  406. list_for_each_entry_safe(ent, n, head, list) {
  407. if (ent->segnum == segnum[0]) {
  408. list_del(&ent->list);
  409. nilfs_free_segment_entry(ent);
  410. continue;
  411. }
  412. err = nilfs_open_segment_entry(ent, sufile);
  413. if (unlikely(err))
  414. goto failed;
  415. if (nilfs_segment_usage_clean(ent->raw_su)) {
  416. nilfs_segment_usage_set_volatile_active(ent->raw_su);
  417. /* Keep it open */
  418. } else {
  419. /* Removing duplicated entries */
  420. list_del(&ent->list);
  421. nilfs_close_segment_entry(ent, sufile);
  422. nilfs_free_segment_entry(ent);
  423. }
  424. }
  425. list_splice_init(head, nilfs->ns_used_segments.prev);
  426. /*
  427. * The segment having the latest super root is active, and
  428. * should be deactivated on the next construction for recovery.
  429. */
  430. err = -ENOMEM;
  431. ent = nilfs_alloc_segment_entry(segnum[0]);
  432. if (unlikely(!ent))
  433. goto failed;
  434. list_add_tail(&ent->list, &ri->ri_used_segments);
  435. /* Allocate new segments for recovery */
  436. err = nilfs_sufile_alloc(sufile, &segnum[0]);
  437. if (unlikely(err))
  438. goto failed;
  439. nilfs->ns_pseg_offset = 0;
  440. nilfs->ns_seg_seq = ri->ri_seq + 2;
  441. nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
  442. return 0;
  443. failed:
  444. /* No need to recover sufile because it will be destroyed on error */
  445. return err;
  446. }
  447. static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi,
  448. struct nilfs_recovery_block *rb,
  449. struct page *page)
  450. {
  451. struct buffer_head *bh_org;
  452. void *kaddr;
  453. bh_org = sb_bread(sbi->s_super, rb->blocknr);
  454. if (unlikely(!bh_org))
  455. return -EIO;
  456. kaddr = kmap_atomic(page, KM_USER0);
  457. memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
  458. kunmap_atomic(kaddr, KM_USER0);
  459. brelse(bh_org);
  460. return 0;
  461. }
  462. static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
  463. struct list_head *head,
  464. unsigned long *nr_salvaged_blocks)
  465. {
  466. struct inode *inode;
  467. struct nilfs_recovery_block *rb, *n;
  468. unsigned blocksize = sbi->s_super->s_blocksize;
  469. struct page *page;
  470. loff_t pos;
  471. int err = 0, err2 = 0;
  472. list_for_each_entry_safe(rb, n, head, list) {
  473. inode = nilfs_iget(sbi->s_super, rb->ino);
  474. if (IS_ERR(inode)) {
  475. err = PTR_ERR(inode);
  476. inode = NULL;
  477. goto failed_inode;
  478. }
  479. pos = rb->blkoff << inode->i_blkbits;
  480. page = NULL;
  481. err = block_write_begin(NULL, inode->i_mapping, pos, blocksize,
  482. 0, &page, NULL, nilfs_get_block);
  483. if (unlikely(err))
  484. goto failed_inode;
  485. err = nilfs_recovery_copy_block(sbi, rb, page);
  486. if (unlikely(err))
  487. goto failed_page;
  488. err = nilfs_set_file_dirty(sbi, inode, 1);
  489. if (unlikely(err))
  490. goto failed_page;
  491. block_write_end(NULL, inode->i_mapping, pos, blocksize,
  492. blocksize, page, NULL);
  493. unlock_page(page);
  494. page_cache_release(page);
  495. (*nr_salvaged_blocks)++;
  496. goto next;
  497. failed_page:
  498. unlock_page(page);
  499. page_cache_release(page);
  500. failed_inode:
  501. printk(KERN_WARNING
  502. "NILFS warning: error recovering data block "
  503. "(err=%d, ino=%lu, block-offset=%llu)\n",
  504. err, rb->ino, (unsigned long long)rb->blkoff);
  505. if (!err2)
  506. err2 = err;
  507. next:
  508. iput(inode); /* iput(NULL) is just ignored */
  509. list_del_init(&rb->list);
  510. kfree(rb);
  511. }
  512. return err2;
  513. }
  514. /**
  515. * nilfs_do_roll_forward - salvage logical segments newer than the latest
  516. * checkpoint
  517. * @sbi: nilfs_sb_info
  518. * @nilfs: the_nilfs
  519. * @ri: pointer to a nilfs_recovery_info
  520. */
  521. static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
  522. struct nilfs_sb_info *sbi,
  523. struct nilfs_recovery_info *ri)
  524. {
  525. struct nilfs_segsum_info ssi;
  526. sector_t pseg_start;
  527. sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */
  528. unsigned long nsalvaged_blocks = 0;
  529. u64 seg_seq;
  530. __u64 segnum, nextnum = 0;
  531. int empty_seg = 0;
  532. int err = 0, ret;
  533. LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */
  534. enum {
  535. RF_INIT_ST,
  536. RF_DSYNC_ST, /* scanning data-sync segments */
  537. };
  538. int state = RF_INIT_ST;
  539. nilfs_attach_writer(nilfs, sbi);
  540. pseg_start = ri->ri_lsegs_start;
  541. seg_seq = ri->ri_lsegs_start_seq;
  542. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  543. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  544. while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
  545. ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
  546. if (ret) {
  547. if (ret == NILFS_SEG_FAIL_IO) {
  548. err = -EIO;
  549. goto failed;
  550. }
  551. goto strayed;
  552. }
  553. if (unlikely(NILFS_SEG_HAS_SR(&ssi)))
  554. goto confused;
  555. /* Found a valid partial segment; do recovery actions */
  556. nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
  557. empty_seg = 0;
  558. nilfs->ns_ctime = ssi.ctime;
  559. if (!(ssi.flags & NILFS_SS_GC))
  560. nilfs->ns_nongc_ctime = ssi.ctime;
  561. switch (state) {
  562. case RF_INIT_ST:
  563. if (!NILFS_SEG_LOGBGN(&ssi) || !NILFS_SEG_DSYNC(&ssi))
  564. goto try_next_pseg;
  565. state = RF_DSYNC_ST;
  566. /* Fall through */
  567. case RF_DSYNC_ST:
  568. if (!NILFS_SEG_DSYNC(&ssi))
  569. goto confused;
  570. err = collect_blocks_from_segsum(
  571. sbi, pseg_start, &ssi, &dsync_blocks);
  572. if (unlikely(err))
  573. goto failed;
  574. if (NILFS_SEG_LOGEND(&ssi)) {
  575. err = recover_dsync_blocks(
  576. sbi, &dsync_blocks, &nsalvaged_blocks);
  577. if (unlikely(err))
  578. goto failed;
  579. state = RF_INIT_ST;
  580. }
  581. break; /* Fall through to try_next_pseg */
  582. }
  583. try_next_pseg:
  584. if (pseg_start == ri->ri_lsegs_end)
  585. break;
  586. pseg_start += ssi.nblocks;
  587. if (pseg_start < seg_end)
  588. continue;
  589. goto feed_segment;
  590. strayed:
  591. if (pseg_start == ri->ri_lsegs_end)
  592. break;
  593. feed_segment:
  594. /* Looking to the next full segment */
  595. if (empty_seg++)
  596. break;
  597. seg_seq++;
  598. segnum = nextnum;
  599. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  600. pseg_start = seg_start;
  601. }
  602. if (nsalvaged_blocks) {
  603. printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
  604. sbi->s_super->s_id, nsalvaged_blocks);
  605. ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
  606. }
  607. out:
  608. dispose_recovery_list(&dsync_blocks);
  609. nilfs_detach_writer(sbi->s_nilfs, sbi);
  610. return err;
  611. confused:
  612. err = -EINVAL;
  613. failed:
  614. printk(KERN_ERR
  615. "NILFS (device %s): Error roll-forwarding "
  616. "(err=%d, pseg block=%llu). ",
  617. sbi->s_super->s_id, err, (unsigned long long)pseg_start);
  618. goto out;
  619. }
  620. static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
  621. struct nilfs_sb_info *sbi,
  622. struct nilfs_recovery_info *ri)
  623. {
  624. struct buffer_head *bh;
  625. int err;
  626. if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
  627. nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
  628. return;
  629. bh = sb_getblk(sbi->s_super, ri->ri_lsegs_start);
  630. BUG_ON(!bh);
  631. memset(bh->b_data, 0, bh->b_size);
  632. set_buffer_dirty(bh);
  633. err = sync_dirty_buffer(bh);
  634. if (unlikely(err))
  635. printk(KERN_WARNING
  636. "NILFS warning: buffer sync write failed during "
  637. "post-cleaning of recovery.\n");
  638. brelse(bh);
  639. }
  640. /**
  641. * nilfs_recover_logical_segments - salvage logical segments written after
  642. * the latest super root
  643. * @nilfs: the_nilfs
  644. * @sbi: nilfs_sb_info
  645. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  646. *
  647. * Return Value: On success, 0 is returned. On error, one of the following
  648. * negative error code is returned.
  649. *
  650. * %-EINVAL - Inconsistent filesystem state.
  651. *
  652. * %-EIO - I/O error
  653. *
  654. * %-ENOSPC - No space left on device (only in a panic state).
  655. *
  656. * %-ERESTARTSYS - Interrupted.
  657. *
  658. * %-ENOMEM - Insufficient memory available.
  659. */
  660. int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
  661. struct nilfs_sb_info *sbi,
  662. struct nilfs_recovery_info *ri)
  663. {
  664. int err;
  665. if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
  666. return 0;
  667. err = nilfs_attach_checkpoint(sbi, ri->ri_cno);
  668. if (unlikely(err)) {
  669. printk(KERN_ERR
  670. "NILFS: error loading the latest checkpoint.\n");
  671. return err;
  672. }
  673. err = nilfs_do_roll_forward(nilfs, sbi, ri);
  674. if (unlikely(err))
  675. goto failed;
  676. if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
  677. err = nilfs_prepare_segment_for_recovery(nilfs, ri);
  678. if (unlikely(err)) {
  679. printk(KERN_ERR "NILFS: Error preparing segments for "
  680. "recovery.\n");
  681. goto failed;
  682. }
  683. err = nilfs_attach_segment_constructor(sbi, ri);
  684. if (unlikely(err))
  685. goto failed;
  686. set_nilfs_discontinued(nilfs);
  687. err = nilfs_construct_segment(sbi->s_super);
  688. nilfs_detach_segment_constructor(sbi);
  689. if (unlikely(err)) {
  690. printk(KERN_ERR "NILFS: Oops! recovery failed. "
  691. "(err=%d)\n", err);
  692. goto failed;
  693. }
  694. nilfs_finish_roll_forward(nilfs, sbi, ri);
  695. }
  696. nilfs_detach_checkpoint(sbi);
  697. return 0;
  698. failed:
  699. nilfs_detach_checkpoint(sbi);
  700. nilfs_mdt_clear(nilfs->ns_cpfile);
  701. nilfs_mdt_clear(nilfs->ns_sufile);
  702. nilfs_mdt_clear(nilfs->ns_dat);
  703. return err;
  704. }
  705. /**
  706. * nilfs_search_super_root - search the latest valid super root
  707. * @nilfs: the_nilfs
  708. * @sbi: nilfs_sb_info
  709. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  710. *
  711. * nilfs_search_super_root() looks for the latest super-root from a partial
  712. * segment pointed by the superblock. It sets up struct the_nilfs through
  713. * this search. It fills nilfs_recovery_info (ri) required for recovery.
  714. *
  715. * Return Value: On success, 0 is returned. On error, one of the following
  716. * negative error code is returned.
  717. *
  718. * %-EINVAL - No valid segment found
  719. *
  720. * %-EIO - I/O error
  721. */
  722. int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
  723. struct nilfs_recovery_info *ri)
  724. {
  725. struct nilfs_segsum_info ssi;
  726. sector_t pseg_start, pseg_end, sr_pseg_start = 0;
  727. sector_t seg_start, seg_end; /* range of full segment (block number) */
  728. u64 seg_seq;
  729. __u64 segnum, nextnum = 0;
  730. __u64 cno;
  731. struct nilfs_segment_entry *ent;
  732. LIST_HEAD(segments);
  733. int empty_seg = 0, scan_newer = 0;
  734. int ret;
  735. pseg_start = nilfs->ns_last_pseg;
  736. seg_seq = nilfs->ns_last_seq;
  737. cno = nilfs->ns_last_cno;
  738. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  739. /* Calculate range of segment */
  740. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  741. for (;;) {
  742. /* Load segment summary */
  743. ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
  744. if (ret) {
  745. if (ret == NILFS_SEG_FAIL_IO)
  746. goto failed;
  747. goto strayed;
  748. }
  749. pseg_end = pseg_start + ssi.nblocks - 1;
  750. if (unlikely(pseg_end > seg_end)) {
  751. ret = NILFS_SEG_FAIL_CONSISTENCY;
  752. goto strayed;
  753. }
  754. /* A valid partial segment */
  755. ri->ri_pseg_start = pseg_start;
  756. ri->ri_seq = seg_seq;
  757. ri->ri_segnum = segnum;
  758. nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
  759. ri->ri_nextnum = nextnum;
  760. empty_seg = 0;
  761. if (!NILFS_SEG_HAS_SR(&ssi)) {
  762. if (!scan_newer) {
  763. /* This will never happen because a superblock
  764. (last_segment) always points to a pseg
  765. having a super root. */
  766. ret = NILFS_SEG_FAIL_CONSISTENCY;
  767. goto failed;
  768. }
  769. if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
  770. ri->ri_lsegs_start = pseg_start;
  771. ri->ri_lsegs_start_seq = seg_seq;
  772. }
  773. if (NILFS_SEG_LOGEND(&ssi))
  774. ri->ri_lsegs_end = pseg_start;
  775. goto try_next_pseg;
  776. }
  777. /* A valid super root was found. */
  778. ri->ri_cno = cno++;
  779. ri->ri_super_root = pseg_end;
  780. ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
  781. nilfs_dispose_segment_list(&segments);
  782. nilfs->ns_pseg_offset = (sr_pseg_start = pseg_start)
  783. + ssi.nblocks - seg_start;
  784. nilfs->ns_seg_seq = seg_seq;
  785. nilfs->ns_segnum = segnum;
  786. nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */
  787. nilfs->ns_ctime = ssi.ctime;
  788. nilfs->ns_nextnum = nextnum;
  789. if (scan_newer)
  790. ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
  791. else if (nilfs->ns_mount_state & NILFS_VALID_FS)
  792. goto super_root_found;
  793. scan_newer = 1;
  794. /* reset region for roll-forward */
  795. pseg_start += ssi.nblocks;
  796. if (pseg_start < seg_end)
  797. continue;
  798. goto feed_segment;
  799. try_next_pseg:
  800. /* Standing on a course, or met an inconsistent state */
  801. pseg_start += ssi.nblocks;
  802. if (pseg_start < seg_end)
  803. continue;
  804. goto feed_segment;
  805. strayed:
  806. /* Off the trail */
  807. if (!scan_newer)
  808. /*
  809. * This can happen if a checkpoint was written without
  810. * barriers, or as a result of an I/O failure.
  811. */
  812. goto failed;
  813. feed_segment:
  814. /* Looking to the next full segment */
  815. if (empty_seg++)
  816. goto super_root_found; /* found a valid super root */
  817. ent = nilfs_alloc_segment_entry(segnum);
  818. if (unlikely(!ent)) {
  819. ret = -ENOMEM;
  820. goto failed;
  821. }
  822. list_add_tail(&ent->list, &segments);
  823. seg_seq++;
  824. segnum = nextnum;
  825. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  826. pseg_start = seg_start;
  827. }
  828. super_root_found:
  829. /* Updating pointers relating to the latest checkpoint */
  830. list_splice(&segments, ri->ri_used_segments.prev);
  831. nilfs->ns_last_pseg = sr_pseg_start;
  832. nilfs->ns_last_seq = nilfs->ns_seg_seq;
  833. nilfs->ns_last_cno = ri->ri_cno;
  834. return 0;
  835. failed:
  836. nilfs_dispose_segment_list(&segments);
  837. return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
  838. }