scan.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: scan.c,v 1.115 2004/11/17 12:59:08 dedekind Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/crc32.h>
  19. #include <linux/compiler.h>
  20. #include "nodelist.h"
  21. #define EMPTY_SCAN_SIZE 1024
  22. #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
  23. c->free_size -= _x; c->dirty_size += _x; \
  24. jeb->free_size -= _x ; jeb->dirty_size += _x; \
  25. }while(0)
  26. #define USED_SPACE(x) do { typeof(x) _x = (x); \
  27. c->free_size -= _x; c->used_size += _x; \
  28. jeb->free_size -= _x ; jeb->used_size += _x; \
  29. }while(0)
  30. #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
  31. c->free_size -= _x; c->unchecked_size += _x; \
  32. jeb->free_size -= _x ; jeb->unchecked_size += _x; \
  33. }while(0)
  34. #define noisy_printk(noise, args...) do { \
  35. if (*(noise)) { \
  36. printk(KERN_NOTICE args); \
  37. (*(noise))--; \
  38. if (!(*(noise))) { \
  39. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  40. } \
  41. } \
  42. } while(0)
  43. static uint32_t pseudo_random;
  44. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  45. unsigned char *buf, uint32_t buf_size);
  46. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  47. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  48. * as dirty.
  49. */
  50. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  51. struct jffs2_raw_inode *ri, uint32_t ofs);
  52. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  53. struct jffs2_raw_dirent *rd, uint32_t ofs);
  54. #define BLK_STATE_ALLFF 0
  55. #define BLK_STATE_CLEAN 1
  56. #define BLK_STATE_PARTDIRTY 2
  57. #define BLK_STATE_CLEANMARKER 3
  58. #define BLK_STATE_ALLDIRTY 4
  59. #define BLK_STATE_BADBLOCK 5
  60. static inline int min_free(struct jffs2_sb_info *c)
  61. {
  62. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  63. #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
  64. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  65. return c->wbuf_pagesize;
  66. #endif
  67. return min;
  68. }
  69. int jffs2_scan_medium(struct jffs2_sb_info *c)
  70. {
  71. int i, ret;
  72. uint32_t empty_blocks = 0, bad_blocks = 0;
  73. unsigned char *flashbuf = NULL;
  74. uint32_t buf_size = 0;
  75. #ifndef __ECOS
  76. size_t pointlen;
  77. if (c->mtd->point) {
  78. ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
  79. if (!ret && pointlen < c->mtd->size) {
  80. /* Don't muck about if it won't let us point to the whole flash */
  81. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  82. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  83. flashbuf = NULL;
  84. }
  85. if (ret)
  86. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  87. }
  88. #endif
  89. if (!flashbuf) {
  90. /* For NAND it's quicker to read a whole eraseblock at a time,
  91. apparently */
  92. if (jffs2_cleanmarker_oob(c))
  93. buf_size = c->sector_size;
  94. else
  95. buf_size = PAGE_SIZE;
  96. /* Respect kmalloc limitations */
  97. if (buf_size > 128*1024)
  98. buf_size = 128*1024;
  99. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  100. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  101. if (!flashbuf)
  102. return -ENOMEM;
  103. }
  104. for (i=0; i<c->nr_blocks; i++) {
  105. struct jffs2_eraseblock *jeb = &c->blocks[i];
  106. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
  107. if (ret < 0)
  108. goto out;
  109. ACCT_PARANOIA_CHECK(jeb);
  110. /* Now decide which list to put it on */
  111. switch(ret) {
  112. case BLK_STATE_ALLFF:
  113. /*
  114. * Empty block. Since we can't be sure it
  115. * was entirely erased, we just queue it for erase
  116. * again. It will be marked as such when the erase
  117. * is complete. Meanwhile we still count it as empty
  118. * for later checks.
  119. */
  120. empty_blocks++;
  121. list_add(&jeb->list, &c->erase_pending_list);
  122. c->nr_erasing_blocks++;
  123. break;
  124. case BLK_STATE_CLEANMARKER:
  125. /* Only a CLEANMARKER node is valid */
  126. if (!jeb->dirty_size) {
  127. /* It's actually free */
  128. list_add(&jeb->list, &c->free_list);
  129. c->nr_free_blocks++;
  130. } else {
  131. /* Dirt */
  132. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  133. list_add(&jeb->list, &c->erase_pending_list);
  134. c->nr_erasing_blocks++;
  135. }
  136. break;
  137. case BLK_STATE_CLEAN:
  138. /* Full (or almost full) of clean data. Clean list */
  139. list_add(&jeb->list, &c->clean_list);
  140. break;
  141. case BLK_STATE_PARTDIRTY:
  142. /* Some data, but not full. Dirty list. */
  143. /* We want to remember the block with most free space
  144. and stick it in the 'nextblock' position to start writing to it. */
  145. if (jeb->free_size > min_free(c) &&
  146. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  147. /* Better candidate for the next writes to go to */
  148. if (c->nextblock) {
  149. c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
  150. c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
  151. c->free_size -= c->nextblock->free_size;
  152. c->wasted_size -= c->nextblock->wasted_size;
  153. c->nextblock->free_size = c->nextblock->wasted_size = 0;
  154. if (VERYDIRTY(c, c->nextblock->dirty_size)) {
  155. list_add(&c->nextblock->list, &c->very_dirty_list);
  156. } else {
  157. list_add(&c->nextblock->list, &c->dirty_list);
  158. }
  159. }
  160. c->nextblock = jeb;
  161. } else {
  162. jeb->dirty_size += jeb->free_size + jeb->wasted_size;
  163. c->dirty_size += jeb->free_size + jeb->wasted_size;
  164. c->free_size -= jeb->free_size;
  165. c->wasted_size -= jeb->wasted_size;
  166. jeb->free_size = jeb->wasted_size = 0;
  167. if (VERYDIRTY(c, jeb->dirty_size)) {
  168. list_add(&jeb->list, &c->very_dirty_list);
  169. } else {
  170. list_add(&jeb->list, &c->dirty_list);
  171. }
  172. }
  173. break;
  174. case BLK_STATE_ALLDIRTY:
  175. /* Nothing valid - not even a clean marker. Needs erasing. */
  176. /* For now we just put it on the erasing list. We'll start the erases later */
  177. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  178. list_add(&jeb->list, &c->erase_pending_list);
  179. c->nr_erasing_blocks++;
  180. break;
  181. case BLK_STATE_BADBLOCK:
  182. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  183. list_add(&jeb->list, &c->bad_list);
  184. c->bad_size += c->sector_size;
  185. c->free_size -= c->sector_size;
  186. bad_blocks++;
  187. break;
  188. default:
  189. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  190. BUG();
  191. }
  192. }
  193. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  194. if (c->nextblock && (c->nextblock->dirty_size)) {
  195. c->nextblock->wasted_size += c->nextblock->dirty_size;
  196. c->wasted_size += c->nextblock->dirty_size;
  197. c->dirty_size -= c->nextblock->dirty_size;
  198. c->nextblock->dirty_size = 0;
  199. }
  200. #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC
  201. if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
  202. /* If we're going to start writing into a block which already
  203. contains data, and the end of the data isn't page-aligned,
  204. skip a little and align it. */
  205. uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
  206. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  207. skip));
  208. c->nextblock->wasted_size += skip;
  209. c->wasted_size += skip;
  210. c->nextblock->free_size -= skip;
  211. c->free_size -= skip;
  212. }
  213. #endif
  214. if (c->nr_erasing_blocks) {
  215. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  216. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  217. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  218. ret = -EIO;
  219. goto out;
  220. }
  221. jffs2_erase_pending_trigger(c);
  222. }
  223. ret = 0;
  224. out:
  225. if (buf_size)
  226. kfree(flashbuf);
  227. #ifndef __ECOS
  228. else
  229. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  230. #endif
  231. return ret;
  232. }
  233. static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
  234. uint32_t ofs, uint32_t len)
  235. {
  236. int ret;
  237. size_t retlen;
  238. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  239. if (ret) {
  240. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  241. return ret;
  242. }
  243. if (retlen < len) {
  244. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  245. return -EIO;
  246. }
  247. D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
  248. D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
  249. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
  250. return 0;
  251. }
  252. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  253. unsigned char *buf, uint32_t buf_size) {
  254. struct jffs2_unknown_node *node;
  255. struct jffs2_unknown_node crcnode;
  256. uint32_t ofs, prevofs;
  257. uint32_t hdr_crc, buf_ofs, buf_len;
  258. int err;
  259. int noise = 0;
  260. #ifdef CONFIG_JFFS2_FS_NAND
  261. int cleanmarkerfound = 0;
  262. #endif
  263. ofs = jeb->offset;
  264. prevofs = jeb->offset - 1;
  265. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  266. #ifdef CONFIG_JFFS2_FS_NAND
  267. if (jffs2_cleanmarker_oob(c)) {
  268. int ret = jffs2_check_nand_cleanmarker(c, jeb);
  269. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  270. /* Even if it's not found, we still scan to see
  271. if the block is empty. We use this information
  272. to decide whether to erase it or not. */
  273. switch (ret) {
  274. case 0: cleanmarkerfound = 1; break;
  275. case 1: break;
  276. case 2: return BLK_STATE_BADBLOCK;
  277. case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
  278. default: return ret;
  279. }
  280. }
  281. #endif
  282. buf_ofs = jeb->offset;
  283. if (!buf_size) {
  284. buf_len = c->sector_size;
  285. } else {
  286. buf_len = EMPTY_SCAN_SIZE;
  287. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  288. if (err)
  289. return err;
  290. }
  291. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  292. ofs = 0;
  293. /* Scan only 4KiB of 0xFF before declaring it's empty */
  294. while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  295. ofs += 4;
  296. if (ofs == EMPTY_SCAN_SIZE) {
  297. #ifdef CONFIG_JFFS2_FS_NAND
  298. if (jffs2_cleanmarker_oob(c)) {
  299. /* scan oob, take care of cleanmarker */
  300. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  301. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  302. switch (ret) {
  303. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  304. case 1: return BLK_STATE_ALLDIRTY;
  305. default: return ret;
  306. }
  307. }
  308. #endif
  309. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  310. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  311. }
  312. if (ofs) {
  313. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  314. jeb->offset + ofs));
  315. DIRTY_SPACE(ofs);
  316. }
  317. /* Now ofs is a complete physical flash offset as it always was... */
  318. ofs += jeb->offset;
  319. noise = 10;
  320. scan_more:
  321. while(ofs < jeb->offset + c->sector_size) {
  322. D1(ACCT_PARANOIA_CHECK(jeb));
  323. cond_resched();
  324. if (ofs & 3) {
  325. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  326. ofs = PAD(ofs);
  327. continue;
  328. }
  329. if (ofs == prevofs) {
  330. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  331. DIRTY_SPACE(4);
  332. ofs += 4;
  333. continue;
  334. }
  335. prevofs = ofs;
  336. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  337. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  338. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  339. DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
  340. break;
  341. }
  342. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  343. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  344. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  345. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  346. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  347. if (err)
  348. return err;
  349. buf_ofs = ofs;
  350. }
  351. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  352. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  353. uint32_t inbuf_ofs;
  354. uint32_t empty_start;
  355. empty_start = ofs;
  356. ofs += 4;
  357. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  358. more_empty:
  359. inbuf_ofs = ofs - buf_ofs;
  360. while (inbuf_ofs < buf_len) {
  361. if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
  362. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  363. empty_start, ofs);
  364. DIRTY_SPACE(ofs-empty_start);
  365. goto scan_more;
  366. }
  367. inbuf_ofs+=4;
  368. ofs += 4;
  369. }
  370. /* Ran off end. */
  371. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  372. /* If we're only checking the beginning of a block with a cleanmarker,
  373. bail now */
  374. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  375. c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) {
  376. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE));
  377. return BLK_STATE_CLEANMARKER;
  378. }
  379. /* See how much more there is to read in this eraseblock... */
  380. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  381. if (!buf_len) {
  382. /* No more to read. Break out of main loop without marking
  383. this range of empty space as dirty (because it's not) */
  384. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  385. empty_start));
  386. break;
  387. }
  388. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  389. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  390. if (err)
  391. return err;
  392. buf_ofs = ofs;
  393. goto more_empty;
  394. }
  395. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  396. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  397. DIRTY_SPACE(4);
  398. ofs += 4;
  399. continue;
  400. }
  401. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  402. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  403. DIRTY_SPACE(4);
  404. ofs += 4;
  405. continue;
  406. }
  407. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  408. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  409. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  410. DIRTY_SPACE(4);
  411. ofs += 4;
  412. continue;
  413. }
  414. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  415. /* OK. We're out of possibilities. Whinge and move on */
  416. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  417. JFFS2_MAGIC_BITMASK, ofs,
  418. je16_to_cpu(node->magic));
  419. DIRTY_SPACE(4);
  420. ofs += 4;
  421. continue;
  422. }
  423. /* We seem to have a node of sorts. Check the CRC */
  424. crcnode.magic = node->magic;
  425. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  426. crcnode.totlen = node->totlen;
  427. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  428. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  429. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  430. ofs, je16_to_cpu(node->magic),
  431. je16_to_cpu(node->nodetype),
  432. je32_to_cpu(node->totlen),
  433. je32_to_cpu(node->hdr_crc),
  434. hdr_crc);
  435. DIRTY_SPACE(4);
  436. ofs += 4;
  437. continue;
  438. }
  439. if (ofs + je32_to_cpu(node->totlen) >
  440. jeb->offset + c->sector_size) {
  441. /* Eep. Node goes over the end of the erase block. */
  442. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  443. ofs, je32_to_cpu(node->totlen));
  444. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  445. DIRTY_SPACE(4);
  446. ofs += 4;
  447. continue;
  448. }
  449. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  450. /* Wheee. This is an obsoleted node */
  451. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  452. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  453. ofs += PAD(je32_to_cpu(node->totlen));
  454. continue;
  455. }
  456. switch(je16_to_cpu(node->nodetype)) {
  457. case JFFS2_NODETYPE_INODE:
  458. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  459. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  460. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  461. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  462. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  463. if (err)
  464. return err;
  465. buf_ofs = ofs;
  466. node = (void *)buf;
  467. }
  468. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
  469. if (err) return err;
  470. ofs += PAD(je32_to_cpu(node->totlen));
  471. break;
  472. case JFFS2_NODETYPE_DIRENT:
  473. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  474. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  475. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  476. je32_to_cpu(node->totlen), buf_len, ofs));
  477. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  478. if (err)
  479. return err;
  480. buf_ofs = ofs;
  481. node = (void *)buf;
  482. }
  483. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
  484. if (err) return err;
  485. ofs += PAD(je32_to_cpu(node->totlen));
  486. break;
  487. case JFFS2_NODETYPE_CLEANMARKER:
  488. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  489. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  490. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  491. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  492. DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
  493. ofs += PAD(sizeof(struct jffs2_unknown_node));
  494. } else if (jeb->first_node) {
  495. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  496. DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
  497. ofs += PAD(sizeof(struct jffs2_unknown_node));
  498. } else {
  499. struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
  500. if (!marker_ref) {
  501. printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
  502. return -ENOMEM;
  503. }
  504. marker_ref->next_in_ino = NULL;
  505. marker_ref->next_phys = NULL;
  506. marker_ref->flash_offset = ofs | REF_NORMAL;
  507. marker_ref->__totlen = c->cleanmarker_size;
  508. jeb->first_node = jeb->last_node = marker_ref;
  509. USED_SPACE(PAD(c->cleanmarker_size));
  510. ofs += PAD(c->cleanmarker_size);
  511. }
  512. break;
  513. case JFFS2_NODETYPE_PADDING:
  514. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  515. ofs += PAD(je32_to_cpu(node->totlen));
  516. break;
  517. default:
  518. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  519. case JFFS2_FEATURE_ROCOMPAT:
  520. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  521. c->flags |= JFFS2_SB_FLAG_RO;
  522. if (!(jffs2_is_readonly(c)))
  523. return -EROFS;
  524. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  525. ofs += PAD(je32_to_cpu(node->totlen));
  526. break;
  527. case JFFS2_FEATURE_INCOMPAT:
  528. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  529. return -EINVAL;
  530. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  531. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  532. DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
  533. ofs += PAD(je32_to_cpu(node->totlen));
  534. break;
  535. case JFFS2_FEATURE_RWCOMPAT_COPY:
  536. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  537. USED_SPACE(PAD(je32_to_cpu(node->totlen)));
  538. ofs += PAD(je32_to_cpu(node->totlen));
  539. break;
  540. }
  541. }
  542. }
  543. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
  544. jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
  545. /* mark_node_obsolete can add to wasted !! */
  546. if (jeb->wasted_size) {
  547. jeb->dirty_size += jeb->wasted_size;
  548. c->dirty_size += jeb->wasted_size;
  549. c->wasted_size -= jeb->wasted_size;
  550. jeb->wasted_size = 0;
  551. }
  552. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  553. && (!jeb->first_node || !jeb->first_node->next_in_ino) )
  554. return BLK_STATE_CLEANMARKER;
  555. /* move blocks with max 4 byte dirty space to cleanlist */
  556. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  557. c->dirty_size -= jeb->dirty_size;
  558. c->wasted_size += jeb->dirty_size;
  559. jeb->wasted_size += jeb->dirty_size;
  560. jeb->dirty_size = 0;
  561. return BLK_STATE_CLEAN;
  562. } else if (jeb->used_size || jeb->unchecked_size)
  563. return BLK_STATE_PARTDIRTY;
  564. else
  565. return BLK_STATE_ALLDIRTY;
  566. }
  567. static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  568. {
  569. struct jffs2_inode_cache *ic;
  570. ic = jffs2_get_ino_cache(c, ino);
  571. if (ic)
  572. return ic;
  573. if (ino > c->highest_ino)
  574. c->highest_ino = ino;
  575. ic = jffs2_alloc_inode_cache();
  576. if (!ic) {
  577. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  578. return NULL;
  579. }
  580. memset(ic, 0, sizeof(*ic));
  581. ic->ino = ino;
  582. ic->nodes = (void *)ic;
  583. jffs2_add_ino_cache(c, ic);
  584. if (ino == 1)
  585. ic->nlink = 1;
  586. return ic;
  587. }
  588. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  589. struct jffs2_raw_inode *ri, uint32_t ofs)
  590. {
  591. struct jffs2_raw_node_ref *raw;
  592. struct jffs2_inode_cache *ic;
  593. uint32_t ino = je32_to_cpu(ri->ino);
  594. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  595. /* We do very little here now. Just check the ino# to which we should attribute
  596. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  597. we used to scan the flash once only, reading everything we want from it into
  598. memory, then building all our in-core data structures and freeing the extra
  599. information. Now we allow the first part of the mount to complete a lot quicker,
  600. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  601. Which means that the _full_ amount of time to get to proper write mode with GC
  602. operational may actually be _longer_ than before. Sucks to be me. */
  603. raw = jffs2_alloc_raw_node_ref();
  604. if (!raw) {
  605. printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
  606. return -ENOMEM;
  607. }
  608. ic = jffs2_get_ino_cache(c, ino);
  609. if (!ic) {
  610. /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
  611. first node we found for this inode. Do a CRC check to protect against the former
  612. case */
  613. uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
  614. if (crc != je32_to_cpu(ri->node_crc)) {
  615. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  616. ofs, je32_to_cpu(ri->node_crc), crc);
  617. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  618. DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
  619. jffs2_free_raw_node_ref(raw);
  620. return 0;
  621. }
  622. ic = jffs2_scan_make_ino_cache(c, ino);
  623. if (!ic) {
  624. jffs2_free_raw_node_ref(raw);
  625. return -ENOMEM;
  626. }
  627. }
  628. /* Wheee. It worked */
  629. raw->flash_offset = ofs | REF_UNCHECKED;
  630. raw->__totlen = PAD(je32_to_cpu(ri->totlen));
  631. raw->next_phys = NULL;
  632. raw->next_in_ino = ic->nodes;
  633. ic->nodes = raw;
  634. if (!jeb->first_node)
  635. jeb->first_node = raw;
  636. if (jeb->last_node)
  637. jeb->last_node->next_phys = raw;
  638. jeb->last_node = raw;
  639. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  640. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  641. je32_to_cpu(ri->offset),
  642. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  643. pseudo_random += je32_to_cpu(ri->version);
  644. UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
  645. return 0;
  646. }
  647. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  648. struct jffs2_raw_dirent *rd, uint32_t ofs)
  649. {
  650. struct jffs2_raw_node_ref *raw;
  651. struct jffs2_full_dirent *fd;
  652. struct jffs2_inode_cache *ic;
  653. uint32_t crc;
  654. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  655. /* We don't get here unless the node is still valid, so we don't have to
  656. mask in the ACCURATE bit any more. */
  657. crc = crc32(0, rd, sizeof(*rd)-8);
  658. if (crc != je32_to_cpu(rd->node_crc)) {
  659. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  660. ofs, je32_to_cpu(rd->node_crc), crc);
  661. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  662. DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
  663. return 0;
  664. }
  665. pseudo_random += je32_to_cpu(rd->version);
  666. fd = jffs2_alloc_full_dirent(rd->nsize+1);
  667. if (!fd) {
  668. return -ENOMEM;
  669. }
  670. memcpy(&fd->name, rd->name, rd->nsize);
  671. fd->name[rd->nsize] = 0;
  672. crc = crc32(0, fd->name, rd->nsize);
  673. if (crc != je32_to_cpu(rd->name_crc)) {
  674. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  675. ofs, je32_to_cpu(rd->name_crc), crc);
  676. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  677. jffs2_free_full_dirent(fd);
  678. /* FIXME: Why do we believe totlen? */
  679. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  680. DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
  681. return 0;
  682. }
  683. raw = jffs2_alloc_raw_node_ref();
  684. if (!raw) {
  685. jffs2_free_full_dirent(fd);
  686. printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
  687. return -ENOMEM;
  688. }
  689. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  690. if (!ic) {
  691. jffs2_free_full_dirent(fd);
  692. jffs2_free_raw_node_ref(raw);
  693. return -ENOMEM;
  694. }
  695. raw->__totlen = PAD(je32_to_cpu(rd->totlen));
  696. raw->flash_offset = ofs | REF_PRISTINE;
  697. raw->next_phys = NULL;
  698. raw->next_in_ino = ic->nodes;
  699. ic->nodes = raw;
  700. if (!jeb->first_node)
  701. jeb->first_node = raw;
  702. if (jeb->last_node)
  703. jeb->last_node->next_phys = raw;
  704. jeb->last_node = raw;
  705. fd->raw = raw;
  706. fd->next = NULL;
  707. fd->version = je32_to_cpu(rd->version);
  708. fd->ino = je32_to_cpu(rd->ino);
  709. fd->nhash = full_name_hash(fd->name, rd->nsize);
  710. fd->type = rd->type;
  711. USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
  712. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  713. return 0;
  714. }
  715. static int count_list(struct list_head *l)
  716. {
  717. uint32_t count = 0;
  718. struct list_head *tmp;
  719. list_for_each(tmp, l) {
  720. count++;
  721. }
  722. return count;
  723. }
  724. /* Note: This breaks if list_empty(head). I don't care. You
  725. might, if you copy this code and use it elsewhere :) */
  726. static void rotate_list(struct list_head *head, uint32_t count)
  727. {
  728. struct list_head *n = head->next;
  729. list_del(head);
  730. while(count--) {
  731. n = n->next;
  732. }
  733. list_add(head, n);
  734. }
  735. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  736. {
  737. uint32_t x;
  738. uint32_t rotateby;
  739. x = count_list(&c->clean_list);
  740. if (x) {
  741. rotateby = pseudo_random % x;
  742. D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
  743. rotate_list((&c->clean_list), rotateby);
  744. D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
  745. list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
  746. } else {
  747. D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
  748. }
  749. x = count_list(&c->very_dirty_list);
  750. if (x) {
  751. rotateby = pseudo_random % x;
  752. D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
  753. rotate_list((&c->very_dirty_list), rotateby);
  754. D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
  755. list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
  756. } else {
  757. D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
  758. }
  759. x = count_list(&c->dirty_list);
  760. if (x) {
  761. rotateby = pseudo_random % x;
  762. D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
  763. rotate_list((&c->dirty_list), rotateby);
  764. D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
  765. list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
  766. } else {
  767. D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
  768. }
  769. x = count_list(&c->erasable_list);
  770. if (x) {
  771. rotateby = pseudo_random % x;
  772. D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
  773. rotate_list((&c->erasable_list), rotateby);
  774. D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
  775. list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
  776. } else {
  777. D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
  778. }
  779. if (c->nr_erasing_blocks) {
  780. rotateby = pseudo_random % c->nr_erasing_blocks;
  781. D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
  782. rotate_list((&c->erase_pending_list), rotateby);
  783. D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
  784. list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
  785. } else {
  786. D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
  787. }
  788. if (c->nr_free_blocks) {
  789. rotateby = pseudo_random % c->nr_free_blocks;
  790. D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
  791. rotate_list((&c->free_list), rotateby);
  792. D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
  793. list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
  794. } else {
  795. D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
  796. }
  797. }