recovery.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /*
  2. * recovery.c - NILFS recovery logic
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. */
  22. #include <linux/buffer_head.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/swap.h>
  25. #include <linux/crc32.h>
  26. #include "nilfs.h"
  27. #include "segment.h"
  28. #include "sufile.h"
  29. #include "page.h"
  30. #include "segbuf.h"
  31. /*
  32. * Segment check result
  33. */
  34. enum {
  35. NILFS_SEG_VALID,
  36. NILFS_SEG_NO_SUPER_ROOT,
  37. NILFS_SEG_FAIL_IO,
  38. NILFS_SEG_FAIL_MAGIC,
  39. NILFS_SEG_FAIL_SEQ,
  40. NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
  41. NILFS_SEG_FAIL_CHECKSUM_FULL,
  42. NILFS_SEG_FAIL_CONSISTENCY,
  43. };
  44. /* work structure for recovery */
  45. struct nilfs_recovery_block {
  46. ino_t ino; /* Inode number of the file that this block
  47. belongs to */
  48. sector_t blocknr; /* block number */
  49. __u64 vblocknr; /* virtual block number */
  50. unsigned long blkoff; /* File offset of the data block (per block) */
  51. struct list_head list;
  52. };
  53. static int nilfs_warn_segment_error(int err)
  54. {
  55. switch (err) {
  56. case NILFS_SEG_FAIL_IO:
  57. printk(KERN_WARNING
  58. "NILFS warning: I/O error on loading last segment\n");
  59. return -EIO;
  60. case NILFS_SEG_FAIL_MAGIC:
  61. printk(KERN_WARNING
  62. "NILFS warning: Segment magic number invalid\n");
  63. break;
  64. case NILFS_SEG_FAIL_SEQ:
  65. printk(KERN_WARNING
  66. "NILFS warning: Sequence number mismatch\n");
  67. break;
  68. case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
  69. printk(KERN_WARNING
  70. "NILFS warning: Checksum error in super root\n");
  71. break;
  72. case NILFS_SEG_FAIL_CHECKSUM_FULL:
  73. printk(KERN_WARNING
  74. "NILFS warning: Checksum error in segment payload\n");
  75. break;
  76. case NILFS_SEG_FAIL_CONSISTENCY:
  77. printk(KERN_WARNING
  78. "NILFS warning: Inconsistent segment\n");
  79. break;
  80. case NILFS_SEG_NO_SUPER_ROOT:
  81. printk(KERN_WARNING
  82. "NILFS warning: No super root in the last segment\n");
  83. break;
  84. }
  85. return -EINVAL;
  86. }
  87. static void store_segsum_info(struct nilfs_segsum_info *ssi,
  88. struct nilfs_segment_summary *sum,
  89. unsigned int blocksize)
  90. {
  91. ssi->flags = le16_to_cpu(sum->ss_flags);
  92. ssi->seg_seq = le64_to_cpu(sum->ss_seq);
  93. ssi->ctime = le64_to_cpu(sum->ss_create);
  94. ssi->next = le64_to_cpu(sum->ss_next);
  95. ssi->nblocks = le32_to_cpu(sum->ss_nblocks);
  96. ssi->nfinfo = le32_to_cpu(sum->ss_nfinfo);
  97. ssi->sumbytes = le32_to_cpu(sum->ss_sumbytes);
  98. ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize);
  99. ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi);
  100. }
  101. /**
  102. * calc_crc_cont - check CRC of blocks continuously
  103. * @sbi: nilfs_sb_info
  104. * @bhs: buffer head of start block
  105. * @sum: place to store result
  106. * @offset: offset bytes in the first block
  107. * @check_bytes: number of bytes to be checked
  108. * @start: DBN of start block
  109. * @nblock: number of blocks to be checked
  110. */
  111. static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs,
  112. u32 *sum, unsigned long offset, u64 check_bytes,
  113. sector_t start, unsigned long nblock)
  114. {
  115. unsigned long blocksize = sbi->s_super->s_blocksize;
  116. unsigned long size;
  117. u32 crc;
  118. BUG_ON(offset >= blocksize);
  119. check_bytes -= offset;
  120. size = min_t(u64, check_bytes, blocksize - offset);
  121. crc = crc32_le(sbi->s_nilfs->ns_crc_seed,
  122. (unsigned char *)bhs->b_data + offset, size);
  123. if (--nblock > 0) {
  124. do {
  125. struct buffer_head *bh
  126. = sb_bread(sbi->s_super, ++start);
  127. if (!bh)
  128. return -EIO;
  129. check_bytes -= size;
  130. size = min_t(u64, check_bytes, blocksize);
  131. crc = crc32_le(crc, bh->b_data, size);
  132. brelse(bh);
  133. } while (--nblock > 0);
  134. }
  135. *sum = crc;
  136. return 0;
  137. }
  138. /**
  139. * nilfs_read_super_root_block - read super root block
  140. * @sb: super_block
  141. * @sr_block: disk block number of the super root block
  142. * @pbh: address of a buffer_head pointer to return super root buffer
  143. * @check: CRC check flag
  144. */
  145. int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
  146. struct buffer_head **pbh, int check)
  147. {
  148. struct buffer_head *bh_sr;
  149. struct nilfs_super_root *sr;
  150. u32 crc;
  151. int ret;
  152. *pbh = NULL;
  153. bh_sr = sb_bread(sb, sr_block);
  154. if (unlikely(!bh_sr)) {
  155. ret = NILFS_SEG_FAIL_IO;
  156. goto failed;
  157. }
  158. sr = (struct nilfs_super_root *)bh_sr->b_data;
  159. if (check) {
  160. unsigned bytes = le16_to_cpu(sr->sr_bytes);
  161. if (bytes == 0 || bytes > sb->s_blocksize) {
  162. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  163. goto failed_bh;
  164. }
  165. if (calc_crc_cont(NILFS_SB(sb), bh_sr, &crc,
  166. sizeof(sr->sr_sum), bytes, sr_block, 1)) {
  167. ret = NILFS_SEG_FAIL_IO;
  168. goto failed_bh;
  169. }
  170. if (crc != le32_to_cpu(sr->sr_sum)) {
  171. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  172. goto failed_bh;
  173. }
  174. }
  175. *pbh = bh_sr;
  176. return 0;
  177. failed_bh:
  178. brelse(bh_sr);
  179. failed:
  180. return nilfs_warn_segment_error(ret);
  181. }
  182. /**
  183. * load_segment_summary - read segment summary of the specified partial segment
  184. * @sbi: nilfs_sb_info
  185. * @pseg_start: start disk block number of partial segment
  186. * @seg_seq: sequence number requested
  187. * @ssi: pointer to nilfs_segsum_info struct to store information
  188. */
  189. static int
  190. load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
  191. u64 seg_seq, struct nilfs_segsum_info *ssi)
  192. {
  193. struct buffer_head *bh_sum;
  194. struct nilfs_segment_summary *sum;
  195. unsigned long nblock;
  196. u32 crc;
  197. int ret = NILFS_SEG_FAIL_IO;
  198. bh_sum = sb_bread(sbi->s_super, pseg_start);
  199. if (!bh_sum)
  200. goto out;
  201. sum = (struct nilfs_segment_summary *)bh_sum->b_data;
  202. /* Check consistency of segment summary */
  203. if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) {
  204. ret = NILFS_SEG_FAIL_MAGIC;
  205. goto failed;
  206. }
  207. store_segsum_info(ssi, sum, sbi->s_super->s_blocksize);
  208. if (seg_seq != ssi->seg_seq) {
  209. ret = NILFS_SEG_FAIL_SEQ;
  210. goto failed;
  211. }
  212. nblock = ssi->nblocks;
  213. if (unlikely(nblock == 0 ||
  214. nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
  215. /* This limits the number of blocks read in the CRC check */
  216. ret = NILFS_SEG_FAIL_CONSISTENCY;
  217. goto failed;
  218. }
  219. if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
  220. ((u64)nblock << sbi->s_super->s_blocksize_bits),
  221. pseg_start, nblock)) {
  222. ret = NILFS_SEG_FAIL_IO;
  223. goto failed;
  224. }
  225. if (crc == le32_to_cpu(sum->ss_datasum))
  226. ret = 0;
  227. else
  228. ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
  229. failed:
  230. brelse(bh_sum);
  231. out:
  232. return ret;
  233. }
  234. static void *segsum_get(struct super_block *sb, struct buffer_head **pbh,
  235. unsigned int *offset, unsigned int bytes)
  236. {
  237. void *ptr;
  238. sector_t blocknr;
  239. BUG_ON((*pbh)->b_size < *offset);
  240. if (bytes > (*pbh)->b_size - *offset) {
  241. blocknr = (*pbh)->b_blocknr;
  242. brelse(*pbh);
  243. *pbh = sb_bread(sb, blocknr + 1);
  244. if (unlikely(!*pbh))
  245. return NULL;
  246. *offset = 0;
  247. }
  248. ptr = (*pbh)->b_data + *offset;
  249. *offset += bytes;
  250. return ptr;
  251. }
  252. static void segsum_skip(struct super_block *sb, struct buffer_head **pbh,
  253. unsigned int *offset, unsigned int bytes,
  254. unsigned long count)
  255. {
  256. unsigned int rest_item_in_current_block
  257. = ((*pbh)->b_size - *offset) / bytes;
  258. if (count <= rest_item_in_current_block) {
  259. *offset += bytes * count;
  260. } else {
  261. sector_t blocknr = (*pbh)->b_blocknr;
  262. unsigned int nitem_per_block = (*pbh)->b_size / bytes;
  263. unsigned int bcnt;
  264. count -= rest_item_in_current_block;
  265. bcnt = DIV_ROUND_UP(count, nitem_per_block);
  266. *offset = bytes * (count - (bcnt - 1) * nitem_per_block);
  267. brelse(*pbh);
  268. *pbh = sb_bread(sb, blocknr + bcnt);
  269. }
  270. }
  271. static int
  272. collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr,
  273. struct nilfs_segsum_info *ssi,
  274. struct list_head *head)
  275. {
  276. struct buffer_head *bh;
  277. unsigned int offset;
  278. unsigned long nfinfo = ssi->nfinfo;
  279. sector_t blocknr = sum_blocknr + ssi->nsumblk;
  280. ino_t ino;
  281. int err = -EIO;
  282. if (!nfinfo)
  283. return 0;
  284. bh = sb_bread(sbi->s_super, sum_blocknr);
  285. if (unlikely(!bh))
  286. goto out;
  287. offset = le16_to_cpu(
  288. ((struct nilfs_segment_summary *)bh->b_data)->ss_bytes);
  289. for (;;) {
  290. unsigned long nblocks, ndatablk, nnodeblk;
  291. struct nilfs_finfo *finfo;
  292. finfo = segsum_get(sbi->s_super, &bh, &offset, sizeof(*finfo));
  293. if (unlikely(!finfo))
  294. goto out;
  295. ino = le64_to_cpu(finfo->fi_ino);
  296. nblocks = le32_to_cpu(finfo->fi_nblocks);
  297. ndatablk = le32_to_cpu(finfo->fi_ndatablk);
  298. nnodeblk = nblocks - ndatablk;
  299. while (ndatablk-- > 0) {
  300. struct nilfs_recovery_block *rb;
  301. struct nilfs_binfo_v *binfo;
  302. binfo = segsum_get(sbi->s_super, &bh, &offset,
  303. sizeof(*binfo));
  304. if (unlikely(!binfo))
  305. goto out;
  306. rb = kmalloc(sizeof(*rb), GFP_NOFS);
  307. if (unlikely(!rb)) {
  308. err = -ENOMEM;
  309. goto out;
  310. }
  311. rb->ino = ino;
  312. rb->blocknr = blocknr++;
  313. rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
  314. rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
  315. /* INIT_LIST_HEAD(&rb->list); */
  316. list_add_tail(&rb->list, head);
  317. }
  318. if (--nfinfo == 0)
  319. break;
  320. blocknr += nnodeblk; /* always 0 for the data sync segments */
  321. segsum_skip(sbi->s_super, &bh, &offset, sizeof(__le64),
  322. nnodeblk);
  323. if (unlikely(!bh))
  324. goto out;
  325. }
  326. err = 0;
  327. out:
  328. brelse(bh); /* brelse(NULL) is just ignored */
  329. return err;
  330. }
  331. static void dispose_recovery_list(struct list_head *head)
  332. {
  333. while (!list_empty(head)) {
  334. struct nilfs_recovery_block *rb
  335. = list_entry(head->next,
  336. struct nilfs_recovery_block, list);
  337. list_del(&rb->list);
  338. kfree(rb);
  339. }
  340. }
  341. struct nilfs_segment_entry {
  342. struct list_head list;
  343. __u64 segnum;
  344. };
  345. static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
  346. {
  347. struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
  348. if (unlikely(!ent))
  349. return -ENOMEM;
  350. ent->segnum = segnum;
  351. INIT_LIST_HEAD(&ent->list);
  352. list_add_tail(&ent->list, head);
  353. return 0;
  354. }
  355. void nilfs_dispose_segment_list(struct list_head *head)
  356. {
  357. while (!list_empty(head)) {
  358. struct nilfs_segment_entry *ent
  359. = list_entry(head->next,
  360. struct nilfs_segment_entry, list);
  361. list_del(&ent->list);
  362. kfree(ent);
  363. }
  364. }
  365. static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
  366. struct nilfs_sb_info *sbi,
  367. struct nilfs_recovery_info *ri)
  368. {
  369. struct list_head *head = &ri->ri_used_segments;
  370. struct nilfs_segment_entry *ent, *n;
  371. struct inode *sufile = nilfs->ns_sufile;
  372. __u64 segnum[4];
  373. int err;
  374. int i;
  375. segnum[0] = nilfs->ns_segnum;
  376. segnum[1] = nilfs->ns_nextnum;
  377. segnum[2] = ri->ri_segnum;
  378. segnum[3] = ri->ri_nextnum;
  379. nilfs_attach_writer(nilfs, sbi);
  380. /*
  381. * Releasing the next segment of the latest super root.
  382. * The next segment is invalidated by this recovery.
  383. */
  384. err = nilfs_sufile_free(sufile, segnum[1]);
  385. if (unlikely(err))
  386. goto failed;
  387. for (i = 1; i < 4; i++) {
  388. err = nilfs_segment_list_add(head, segnum[i]);
  389. if (unlikely(err))
  390. goto failed;
  391. }
  392. /*
  393. * Collecting segments written after the latest super root.
  394. * These are marked dirty to avoid being reallocated in the next write.
  395. */
  396. list_for_each_entry_safe(ent, n, head, list) {
  397. if (ent->segnum != segnum[0]) {
  398. err = nilfs_sufile_scrap(sufile, ent->segnum);
  399. if (unlikely(err))
  400. goto failed;
  401. }
  402. list_del(&ent->list);
  403. kfree(ent);
  404. }
  405. /* Allocate new segments for recovery */
  406. err = nilfs_sufile_alloc(sufile, &segnum[0]);
  407. if (unlikely(err))
  408. goto failed;
  409. nilfs->ns_pseg_offset = 0;
  410. nilfs->ns_seg_seq = ri->ri_seq + 2;
  411. nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
  412. failed:
  413. /* No need to recover sufile because it will be destroyed on error */
  414. nilfs_detach_writer(nilfs, sbi);
  415. return err;
  416. }
  417. static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi,
  418. struct nilfs_recovery_block *rb,
  419. struct page *page)
  420. {
  421. struct buffer_head *bh_org;
  422. void *kaddr;
  423. bh_org = sb_bread(sbi->s_super, rb->blocknr);
  424. if (unlikely(!bh_org))
  425. return -EIO;
  426. kaddr = kmap_atomic(page, KM_USER0);
  427. memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
  428. kunmap_atomic(kaddr, KM_USER0);
  429. brelse(bh_org);
  430. return 0;
  431. }
  432. static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
  433. struct list_head *head,
  434. unsigned long *nr_salvaged_blocks)
  435. {
  436. struct inode *inode;
  437. struct nilfs_recovery_block *rb, *n;
  438. unsigned blocksize = sbi->s_super->s_blocksize;
  439. struct page *page;
  440. loff_t pos;
  441. int err = 0, err2 = 0;
  442. list_for_each_entry_safe(rb, n, head, list) {
  443. inode = nilfs_iget(sbi->s_super, rb->ino);
  444. if (IS_ERR(inode)) {
  445. err = PTR_ERR(inode);
  446. inode = NULL;
  447. goto failed_inode;
  448. }
  449. pos = rb->blkoff << inode->i_blkbits;
  450. page = NULL;
  451. err = block_write_begin(NULL, inode->i_mapping, pos, blocksize,
  452. 0, &page, NULL, nilfs_get_block);
  453. if (unlikely(err))
  454. goto failed_inode;
  455. err = nilfs_recovery_copy_block(sbi, rb, page);
  456. if (unlikely(err))
  457. goto failed_page;
  458. err = nilfs_set_file_dirty(sbi, inode, 1);
  459. if (unlikely(err))
  460. goto failed_page;
  461. block_write_end(NULL, inode->i_mapping, pos, blocksize,
  462. blocksize, page, NULL);
  463. unlock_page(page);
  464. page_cache_release(page);
  465. (*nr_salvaged_blocks)++;
  466. goto next;
  467. failed_page:
  468. unlock_page(page);
  469. page_cache_release(page);
  470. failed_inode:
  471. printk(KERN_WARNING
  472. "NILFS warning: error recovering data block "
  473. "(err=%d, ino=%lu, block-offset=%llu)\n",
  474. err, (unsigned long)rb->ino,
  475. (unsigned long long)rb->blkoff);
  476. if (!err2)
  477. err2 = err;
  478. next:
  479. iput(inode); /* iput(NULL) is just ignored */
  480. list_del_init(&rb->list);
  481. kfree(rb);
  482. }
  483. return err2;
  484. }
  485. /**
  486. * nilfs_do_roll_forward - salvage logical segments newer than the latest
  487. * checkpoint
  488. * @sbi: nilfs_sb_info
  489. * @nilfs: the_nilfs
  490. * @ri: pointer to a nilfs_recovery_info
  491. */
  492. static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
  493. struct nilfs_sb_info *sbi,
  494. struct nilfs_recovery_info *ri)
  495. {
  496. struct nilfs_segsum_info ssi;
  497. sector_t pseg_start;
  498. sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */
  499. unsigned long nsalvaged_blocks = 0;
  500. u64 seg_seq;
  501. __u64 segnum, nextnum = 0;
  502. int empty_seg = 0;
  503. int err = 0, ret;
  504. LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */
  505. enum {
  506. RF_INIT_ST,
  507. RF_DSYNC_ST, /* scanning data-sync segments */
  508. };
  509. int state = RF_INIT_ST;
  510. nilfs_attach_writer(nilfs, sbi);
  511. pseg_start = ri->ri_lsegs_start;
  512. seg_seq = ri->ri_lsegs_start_seq;
  513. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  514. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  515. while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
  516. ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
  517. if (ret) {
  518. if (ret == NILFS_SEG_FAIL_IO) {
  519. err = -EIO;
  520. goto failed;
  521. }
  522. goto strayed;
  523. }
  524. if (unlikely(NILFS_SEG_HAS_SR(&ssi)))
  525. goto confused;
  526. /* Found a valid partial segment; do recovery actions */
  527. nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
  528. empty_seg = 0;
  529. nilfs->ns_ctime = ssi.ctime;
  530. if (!(ssi.flags & NILFS_SS_GC))
  531. nilfs->ns_nongc_ctime = ssi.ctime;
  532. switch (state) {
  533. case RF_INIT_ST:
  534. if (!NILFS_SEG_LOGBGN(&ssi) || !NILFS_SEG_DSYNC(&ssi))
  535. goto try_next_pseg;
  536. state = RF_DSYNC_ST;
  537. /* Fall through */
  538. case RF_DSYNC_ST:
  539. if (!NILFS_SEG_DSYNC(&ssi))
  540. goto confused;
  541. err = collect_blocks_from_segsum(
  542. sbi, pseg_start, &ssi, &dsync_blocks);
  543. if (unlikely(err))
  544. goto failed;
  545. if (NILFS_SEG_LOGEND(&ssi)) {
  546. err = recover_dsync_blocks(
  547. sbi, &dsync_blocks, &nsalvaged_blocks);
  548. if (unlikely(err))
  549. goto failed;
  550. state = RF_INIT_ST;
  551. }
  552. break; /* Fall through to try_next_pseg */
  553. }
  554. try_next_pseg:
  555. if (pseg_start == ri->ri_lsegs_end)
  556. break;
  557. pseg_start += ssi.nblocks;
  558. if (pseg_start < seg_end)
  559. continue;
  560. goto feed_segment;
  561. strayed:
  562. if (pseg_start == ri->ri_lsegs_end)
  563. break;
  564. feed_segment:
  565. /* Looking to the next full segment */
  566. if (empty_seg++)
  567. break;
  568. seg_seq++;
  569. segnum = nextnum;
  570. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  571. pseg_start = seg_start;
  572. }
  573. if (nsalvaged_blocks) {
  574. printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
  575. sbi->s_super->s_id, nsalvaged_blocks);
  576. ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
  577. }
  578. out:
  579. dispose_recovery_list(&dsync_blocks);
  580. nilfs_detach_writer(sbi->s_nilfs, sbi);
  581. return err;
  582. confused:
  583. err = -EINVAL;
  584. failed:
  585. printk(KERN_ERR
  586. "NILFS (device %s): Error roll-forwarding "
  587. "(err=%d, pseg block=%llu). ",
  588. sbi->s_super->s_id, err, (unsigned long long)pseg_start);
  589. goto out;
  590. }
  591. static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
  592. struct nilfs_sb_info *sbi,
  593. struct nilfs_recovery_info *ri)
  594. {
  595. struct buffer_head *bh;
  596. int err;
  597. if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
  598. nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
  599. return;
  600. bh = sb_getblk(sbi->s_super, ri->ri_lsegs_start);
  601. BUG_ON(!bh);
  602. memset(bh->b_data, 0, bh->b_size);
  603. set_buffer_dirty(bh);
  604. err = sync_dirty_buffer(bh);
  605. if (unlikely(err))
  606. printk(KERN_WARNING
  607. "NILFS warning: buffer sync write failed during "
  608. "post-cleaning of recovery.\n");
  609. brelse(bh);
  610. }
  611. /**
  612. * nilfs_recover_logical_segments - salvage logical segments written after
  613. * the latest super root
  614. * @nilfs: the_nilfs
  615. * @sbi: nilfs_sb_info
  616. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  617. *
  618. * Return Value: On success, 0 is returned. On error, one of the following
  619. * negative error code is returned.
  620. *
  621. * %-EINVAL - Inconsistent filesystem state.
  622. *
  623. * %-EIO - I/O error
  624. *
  625. * %-ENOSPC - No space left on device (only in a panic state).
  626. *
  627. * %-ERESTARTSYS - Interrupted.
  628. *
  629. * %-ENOMEM - Insufficient memory available.
  630. */
  631. int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
  632. struct nilfs_sb_info *sbi,
  633. struct nilfs_recovery_info *ri)
  634. {
  635. int err;
  636. if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
  637. return 0;
  638. err = nilfs_attach_checkpoint(sbi, ri->ri_cno);
  639. if (unlikely(err)) {
  640. printk(KERN_ERR
  641. "NILFS: error loading the latest checkpoint.\n");
  642. return err;
  643. }
  644. err = nilfs_do_roll_forward(nilfs, sbi, ri);
  645. if (unlikely(err))
  646. goto failed;
  647. if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
  648. err = nilfs_prepare_segment_for_recovery(nilfs, sbi, ri);
  649. if (unlikely(err)) {
  650. printk(KERN_ERR "NILFS: Error preparing segments for "
  651. "recovery.\n");
  652. goto failed;
  653. }
  654. err = nilfs_attach_segment_constructor(sbi);
  655. if (unlikely(err))
  656. goto failed;
  657. set_nilfs_discontinued(nilfs);
  658. err = nilfs_construct_segment(sbi->s_super);
  659. nilfs_detach_segment_constructor(sbi);
  660. if (unlikely(err)) {
  661. printk(KERN_ERR "NILFS: Oops! recovery failed. "
  662. "(err=%d)\n", err);
  663. goto failed;
  664. }
  665. nilfs_finish_roll_forward(nilfs, sbi, ri);
  666. }
  667. failed:
  668. nilfs_detach_checkpoint(sbi);
  669. return err;
  670. }
  671. /**
  672. * nilfs_search_super_root - search the latest valid super root
  673. * @nilfs: the_nilfs
  674. * @sbi: nilfs_sb_info
  675. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  676. *
  677. * nilfs_search_super_root() looks for the latest super-root from a partial
  678. * segment pointed by the superblock. It sets up struct the_nilfs through
  679. * this search. It fills nilfs_recovery_info (ri) required for recovery.
  680. *
  681. * Return Value: On success, 0 is returned. On error, one of the following
  682. * negative error code is returned.
  683. *
  684. * %-EINVAL - No valid segment found
  685. *
  686. * %-EIO - I/O error
  687. */
  688. int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
  689. struct nilfs_recovery_info *ri)
  690. {
  691. struct nilfs_segsum_info ssi;
  692. sector_t pseg_start, pseg_end, sr_pseg_start = 0;
  693. sector_t seg_start, seg_end; /* range of full segment (block number) */
  694. sector_t b, end;
  695. u64 seg_seq;
  696. __u64 segnum, nextnum = 0;
  697. __u64 cno;
  698. LIST_HEAD(segments);
  699. int empty_seg = 0, scan_newer = 0;
  700. int ret;
  701. pseg_start = nilfs->ns_last_pseg;
  702. seg_seq = nilfs->ns_last_seq;
  703. cno = nilfs->ns_last_cno;
  704. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  705. /* Calculate range of segment */
  706. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  707. /* Read ahead segment */
  708. b = seg_start;
  709. while (b <= seg_end)
  710. sb_breadahead(sbi->s_super, b++);
  711. for (;;) {
  712. /* Load segment summary */
  713. ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
  714. if (ret) {
  715. if (ret == NILFS_SEG_FAIL_IO)
  716. goto failed;
  717. goto strayed;
  718. }
  719. pseg_end = pseg_start + ssi.nblocks - 1;
  720. if (unlikely(pseg_end > seg_end)) {
  721. ret = NILFS_SEG_FAIL_CONSISTENCY;
  722. goto strayed;
  723. }
  724. /* A valid partial segment */
  725. ri->ri_pseg_start = pseg_start;
  726. ri->ri_seq = seg_seq;
  727. ri->ri_segnum = segnum;
  728. nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
  729. ri->ri_nextnum = nextnum;
  730. empty_seg = 0;
  731. if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
  732. /* This will never happen because a superblock
  733. (last_segment) always points to a pseg
  734. having a super root. */
  735. ret = NILFS_SEG_FAIL_CONSISTENCY;
  736. goto failed;
  737. }
  738. if (pseg_start == seg_start) {
  739. nilfs_get_segment_range(nilfs, nextnum, &b, &end);
  740. while (b <= end)
  741. sb_breadahead(sbi->s_super, b++);
  742. }
  743. if (!NILFS_SEG_HAS_SR(&ssi)) {
  744. if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
  745. ri->ri_lsegs_start = pseg_start;
  746. ri->ri_lsegs_start_seq = seg_seq;
  747. }
  748. if (NILFS_SEG_LOGEND(&ssi))
  749. ri->ri_lsegs_end = pseg_start;
  750. goto try_next_pseg;
  751. }
  752. /* A valid super root was found. */
  753. ri->ri_cno = cno++;
  754. ri->ri_super_root = pseg_end;
  755. ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
  756. nilfs_dispose_segment_list(&segments);
  757. nilfs->ns_pseg_offset = (sr_pseg_start = pseg_start)
  758. + ssi.nblocks - seg_start;
  759. nilfs->ns_seg_seq = seg_seq;
  760. nilfs->ns_segnum = segnum;
  761. nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */
  762. nilfs->ns_ctime = ssi.ctime;
  763. nilfs->ns_nextnum = nextnum;
  764. if (scan_newer)
  765. ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
  766. else {
  767. if (nilfs->ns_mount_state & NILFS_VALID_FS)
  768. goto super_root_found;
  769. scan_newer = 1;
  770. }
  771. /* reset region for roll-forward */
  772. pseg_start += ssi.nblocks;
  773. if (pseg_start < seg_end)
  774. continue;
  775. goto feed_segment;
  776. try_next_pseg:
  777. /* Standing on a course, or met an inconsistent state */
  778. pseg_start += ssi.nblocks;
  779. if (pseg_start < seg_end)
  780. continue;
  781. goto feed_segment;
  782. strayed:
  783. /* Off the trail */
  784. if (!scan_newer)
  785. /*
  786. * This can happen if a checkpoint was written without
  787. * barriers, or as a result of an I/O failure.
  788. */
  789. goto failed;
  790. feed_segment:
  791. /* Looking to the next full segment */
  792. if (empty_seg++)
  793. goto super_root_found; /* found a valid super root */
  794. ret = nilfs_segment_list_add(&segments, segnum);
  795. if (unlikely(ret))
  796. goto failed;
  797. seg_seq++;
  798. segnum = nextnum;
  799. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  800. pseg_start = seg_start;
  801. }
  802. super_root_found:
  803. /* Updating pointers relating to the latest checkpoint */
  804. list_splice_tail(&segments, &ri->ri_used_segments);
  805. nilfs->ns_last_pseg = sr_pseg_start;
  806. nilfs->ns_last_seq = nilfs->ns_seg_seq;
  807. nilfs->ns_last_cno = ri->ri_cno;
  808. return 0;
  809. failed:
  810. nilfs_dispose_segment_list(&segments);
  811. return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
  812. }