scan.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: scan.c,v 1.125 2005/09/30 13:59:13 dedekind Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/crc32.h>
  19. #include <linux/compiler.h>
  20. #include "nodelist.h"
  21. #include "summary.h"
  22. #include "debug.h"
  23. #define DEFAULT_EMPTY_SCAN_SIZE 1024
  24. #define noisy_printk(noise, args...) do { \
  25. if (*(noise)) { \
  26. printk(KERN_NOTICE args); \
  27. (*(noise))--; \
  28. if (!(*(noise))) { \
  29. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  30. } \
  31. } \
  32. } while(0)
  33. static uint32_t pseudo_random;
  34. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  35. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
  36. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  37. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  38. * as dirty.
  39. */
  40. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  41. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
  42. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  43. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
  44. static inline int min_free(struct jffs2_sb_info *c)
  45. {
  46. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  47. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  48. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  49. return c->wbuf_pagesize;
  50. #endif
  51. return min;
  52. }
  53. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  54. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  55. return sector_size;
  56. else
  57. return DEFAULT_EMPTY_SCAN_SIZE;
  58. }
  59. static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  60. {
  61. int ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size);
  62. if (ret)
  63. return ret;
  64. /* Turned wasted size into dirty, since we apparently
  65. think it's recoverable now. */
  66. jeb->dirty_size += jeb->wasted_size;
  67. c->dirty_size += jeb->wasted_size;
  68. c->wasted_size -= jeb->wasted_size;
  69. jeb->wasted_size = 0;
  70. if (VERYDIRTY(c, jeb->dirty_size)) {
  71. list_add(&jeb->list, &c->very_dirty_list);
  72. } else {
  73. list_add(&jeb->list, &c->dirty_list);
  74. }
  75. return 0;
  76. }
  77. int jffs2_scan_medium(struct jffs2_sb_info *c)
  78. {
  79. int i, ret;
  80. uint32_t empty_blocks = 0, bad_blocks = 0;
  81. unsigned char *flashbuf = NULL;
  82. uint32_t buf_size = 0;
  83. struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
  84. #ifndef __ECOS
  85. size_t pointlen;
  86. if (c->mtd->point) {
  87. ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
  88. if (!ret && pointlen < c->mtd->size) {
  89. /* Don't muck about if it won't let us point to the whole flash */
  90. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  91. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  92. flashbuf = NULL;
  93. }
  94. if (ret)
  95. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  96. }
  97. #endif
  98. if (!flashbuf) {
  99. /* For NAND it's quicker to read a whole eraseblock at a time,
  100. apparently */
  101. if (jffs2_cleanmarker_oob(c))
  102. buf_size = c->sector_size;
  103. else
  104. buf_size = PAGE_SIZE;
  105. /* Respect kmalloc limitations */
  106. if (buf_size > 128*1024)
  107. buf_size = 128*1024;
  108. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  109. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  110. if (!flashbuf)
  111. return -ENOMEM;
  112. }
  113. if (jffs2_sum_active()) {
  114. s = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
  115. if (!s) {
  116. JFFS2_WARNING("Can't allocate memory for summary\n");
  117. return -ENOMEM;
  118. }
  119. memset(s, 0, sizeof(struct jffs2_summary));
  120. }
  121. for (i=0; i<c->nr_blocks; i++) {
  122. struct jffs2_eraseblock *jeb = &c->blocks[i];
  123. /* reset summary info for next eraseblock scan */
  124. jffs2_sum_reset_collected(s);
  125. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
  126. buf_size, s);
  127. if (ret < 0)
  128. goto out;
  129. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  130. /* Now decide which list to put it on */
  131. switch(ret) {
  132. case BLK_STATE_ALLFF:
  133. /*
  134. * Empty block. Since we can't be sure it
  135. * was entirely erased, we just queue it for erase
  136. * again. It will be marked as such when the erase
  137. * is complete. Meanwhile we still count it as empty
  138. * for later checks.
  139. */
  140. empty_blocks++;
  141. list_add(&jeb->list, &c->erase_pending_list);
  142. c->nr_erasing_blocks++;
  143. break;
  144. case BLK_STATE_CLEANMARKER:
  145. /* Only a CLEANMARKER node is valid */
  146. if (!jeb->dirty_size) {
  147. /* It's actually free */
  148. list_add(&jeb->list, &c->free_list);
  149. c->nr_free_blocks++;
  150. } else {
  151. /* Dirt */
  152. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  153. list_add(&jeb->list, &c->erase_pending_list);
  154. c->nr_erasing_blocks++;
  155. }
  156. break;
  157. case BLK_STATE_CLEAN:
  158. /* Full (or almost full) of clean data. Clean list */
  159. list_add(&jeb->list, &c->clean_list);
  160. break;
  161. case BLK_STATE_PARTDIRTY:
  162. /* Some data, but not full. Dirty list. */
  163. /* We want to remember the block with most free space
  164. and stick it in the 'nextblock' position to start writing to it. */
  165. if (jeb->free_size > min_free(c) &&
  166. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  167. /* Better candidate for the next writes to go to */
  168. if (c->nextblock) {
  169. ret = file_dirty(c, c->nextblock);
  170. if (ret)
  171. return ret;
  172. /* deleting summary information of the old nextblock */
  173. jffs2_sum_reset_collected(c->summary);
  174. }
  175. /* update collected summary information for the current nextblock */
  176. jffs2_sum_move_collected(c, s);
  177. D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
  178. c->nextblock = jeb;
  179. } else {
  180. ret = file_dirty(c, jeb);
  181. if (ret)
  182. return ret;
  183. }
  184. break;
  185. case BLK_STATE_ALLDIRTY:
  186. /* Nothing valid - not even a clean marker. Needs erasing. */
  187. /* For now we just put it on the erasing list. We'll start the erases later */
  188. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  189. list_add(&jeb->list, &c->erase_pending_list);
  190. c->nr_erasing_blocks++;
  191. break;
  192. case BLK_STATE_BADBLOCK:
  193. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  194. list_add(&jeb->list, &c->bad_list);
  195. c->bad_size += c->sector_size;
  196. c->free_size -= c->sector_size;
  197. bad_blocks++;
  198. break;
  199. default:
  200. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  201. BUG();
  202. }
  203. }
  204. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  205. if (c->nextblock && (c->nextblock->dirty_size)) {
  206. c->nextblock->wasted_size += c->nextblock->dirty_size;
  207. c->wasted_size += c->nextblock->dirty_size;
  208. c->dirty_size -= c->nextblock->dirty_size;
  209. c->nextblock->dirty_size = 0;
  210. }
  211. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  212. if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
  213. /* If we're going to start writing into a block which already
  214. contains data, and the end of the data isn't page-aligned,
  215. skip a little and align it. */
  216. uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
  217. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  218. skip));
  219. jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
  220. jffs2_scan_dirty_space(c, c->nextblock, skip);
  221. }
  222. #endif
  223. if (c->nr_erasing_blocks) {
  224. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  225. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  226. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  227. ret = -EIO;
  228. goto out;
  229. }
  230. jffs2_erase_pending_trigger(c);
  231. }
  232. ret = 0;
  233. out:
  234. if (buf_size)
  235. kfree(flashbuf);
  236. #ifndef __ECOS
  237. else
  238. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  239. #endif
  240. if (s)
  241. kfree(s);
  242. return ret;
  243. }
  244. int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf,
  245. uint32_t ofs, uint32_t len)
  246. {
  247. int ret;
  248. size_t retlen;
  249. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  250. if (ret) {
  251. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  252. return ret;
  253. }
  254. if (retlen < len) {
  255. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  256. return -EIO;
  257. }
  258. return 0;
  259. }
  260. int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  261. {
  262. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  263. && (!jeb->first_node || !ref_next(jeb->first_node)) )
  264. return BLK_STATE_CLEANMARKER;
  265. /* move blocks with max 4 byte dirty space to cleanlist */
  266. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  267. c->dirty_size -= jeb->dirty_size;
  268. c->wasted_size += jeb->dirty_size;
  269. jeb->wasted_size += jeb->dirty_size;
  270. jeb->dirty_size = 0;
  271. return BLK_STATE_CLEAN;
  272. } else if (jeb->used_size || jeb->unchecked_size)
  273. return BLK_STATE_PARTDIRTY;
  274. else
  275. return BLK_STATE_ALLDIRTY;
  276. }
  277. #ifdef CONFIG_JFFS2_FS_XATTR
  278. static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  279. struct jffs2_raw_xattr *rx, uint32_t ofs,
  280. struct jffs2_summary *s)
  281. {
  282. struct jffs2_xattr_datum *xd;
  283. uint32_t totlen, crc;
  284. int err;
  285. crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
  286. if (crc != je32_to_cpu(rx->node_crc)) {
  287. if (je32_to_cpu(rx->node_crc) != 0xffffffff)
  288. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  289. ofs, je32_to_cpu(rx->node_crc), crc);
  290. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  291. return err;
  292. return 0;
  293. }
  294. totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
  295. if (totlen != je32_to_cpu(rx->totlen)) {
  296. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  297. ofs, je32_to_cpu(rx->totlen), totlen);
  298. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  299. return err;
  300. return 0;
  301. }
  302. xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version));
  303. if (IS_ERR(xd)) {
  304. if (PTR_ERR(xd) == -EEXIST) {
  305. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
  306. return err;
  307. return 0;
  308. }
  309. return PTR_ERR(xd);
  310. }
  311. xd->xprefix = rx->xprefix;
  312. xd->name_len = rx->name_len;
  313. xd->value_len = je16_to_cpu(rx->value_len);
  314. xd->data_crc = je32_to_cpu(rx->data_crc);
  315. xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
  316. /* FIXME */ xd->node->next_in_ino = (void *)xd;
  317. if (jffs2_sum_active())
  318. jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
  319. dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
  320. ofs, xd->xid, xd->version);
  321. return 0;
  322. }
  323. static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  324. struct jffs2_raw_xref *rr, uint32_t ofs,
  325. struct jffs2_summary *s)
  326. {
  327. struct jffs2_xattr_ref *ref;
  328. uint32_t crc;
  329. int err;
  330. crc = crc32(0, rr, sizeof(*rr) - 4);
  331. if (crc != je32_to_cpu(rr->node_crc)) {
  332. if (je32_to_cpu(rr->node_crc) != 0xffffffff)
  333. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  334. ofs, je32_to_cpu(rr->node_crc), crc);
  335. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
  336. return err;
  337. return 0;
  338. }
  339. if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
  340. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  341. ofs, je32_to_cpu(rr->totlen),
  342. PAD(sizeof(struct jffs2_raw_xref)));
  343. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
  344. return err;
  345. return 0;
  346. }
  347. ref = jffs2_alloc_xattr_ref();
  348. if (!ref)
  349. return -ENOMEM;
  350. /* BEFORE jffs2_build_xattr_subsystem() called,
  351. * ref->xid is used to store 32bit xid, xd is not used
  352. * ref->ino is used to store 32bit inode-number, ic is not used
  353. * Thoes variables are declared as union, thus using those
  354. * are exclusive. In a similar way, ref->next is temporarily
  355. * used to chain all xattr_ref object. It's re-chained to
  356. * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
  357. */
  358. ref->ino = je32_to_cpu(rr->ino);
  359. ref->xid = je32_to_cpu(rr->xid);
  360. ref->next = c->xref_temp;
  361. c->xref_temp = ref;
  362. ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL);
  363. /* FIXME */ ref->node->next_in_ino = (void *)ref;
  364. if (jffs2_sum_active())
  365. jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
  366. dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
  367. ofs, ref->xid, ref->ino);
  368. return 0;
  369. }
  370. #endif
  371. /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
  372. the flash, XIP-style */
  373. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  374. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
  375. struct jffs2_unknown_node *node;
  376. struct jffs2_unknown_node crcnode;
  377. uint32_t ofs, prevofs;
  378. uint32_t hdr_crc, buf_ofs, buf_len;
  379. int err;
  380. int noise = 0;
  381. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  382. int cleanmarkerfound = 0;
  383. #endif
  384. ofs = jeb->offset;
  385. prevofs = jeb->offset - 1;
  386. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  387. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  388. if (jffs2_cleanmarker_oob(c)) {
  389. int ret = jffs2_check_nand_cleanmarker(c, jeb);
  390. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  391. /* Even if it's not found, we still scan to see
  392. if the block is empty. We use this information
  393. to decide whether to erase it or not. */
  394. switch (ret) {
  395. case 0: cleanmarkerfound = 1; break;
  396. case 1: break;
  397. case 2: return BLK_STATE_BADBLOCK;
  398. case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
  399. default: return ret;
  400. }
  401. }
  402. #endif
  403. if (jffs2_sum_active()) {
  404. struct jffs2_sum_marker *sm;
  405. void *sumptr = NULL;
  406. uint32_t sumlen;
  407. if (!buf_size) {
  408. /* XIP case. Just look, point at the summary if it's there */
  409. sm = (void *)buf + jeb->offset - sizeof(*sm);
  410. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  411. sumptr = buf + je32_to_cpu(sm->offset);
  412. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  413. }
  414. } else {
  415. /* If NAND flash, read a whole page of it. Else just the end */
  416. if (c->wbuf_pagesize)
  417. buf_len = c->wbuf_pagesize;
  418. else
  419. buf_len = sizeof(*sm);
  420. /* Read as much as we want into the _end_ of the preallocated buffer */
  421. err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
  422. jeb->offset + c->sector_size - buf_len,
  423. buf_len);
  424. if (err)
  425. return err;
  426. sm = (void *)buf + buf_size - sizeof(*sm);
  427. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  428. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  429. sumptr = buf + buf_size - sumlen;
  430. /* Now, make sure the summary itself is available */
  431. if (sumlen > buf_size) {
  432. /* Need to kmalloc for this. */
  433. sumptr = kmalloc(sumlen, GFP_KERNEL);
  434. if (!sumptr)
  435. return -ENOMEM;
  436. memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
  437. }
  438. if (buf_len < sumlen) {
  439. /* Need to read more so that the entire summary node is present */
  440. err = jffs2_fill_scan_buf(c, sumptr,
  441. jeb->offset + c->sector_size - sumlen,
  442. sumlen - buf_len);
  443. if (err)
  444. return err;
  445. }
  446. }
  447. }
  448. if (sumptr) {
  449. err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
  450. if (buf_size && sumlen > buf_size)
  451. kfree(sumptr);
  452. /* If it returns with a real error, bail.
  453. If it returns positive, that's a block classification
  454. (i.e. BLK_STATE_xxx) so return that too.
  455. If it returns zero, fall through to full scan. */
  456. if (err)
  457. return err;
  458. }
  459. }
  460. buf_ofs = jeb->offset;
  461. if (!buf_size) {
  462. /* This is the XIP case -- we're reading _directly_ from the flash chip */
  463. buf_len = c->sector_size;
  464. } else {
  465. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  466. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  467. if (err)
  468. return err;
  469. }
  470. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  471. ofs = 0;
  472. /* Scan only 4KiB of 0xFF before declaring it's empty */
  473. while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  474. ofs += 4;
  475. if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
  476. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  477. if (jffs2_cleanmarker_oob(c)) {
  478. /* scan oob, take care of cleanmarker */
  479. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  480. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  481. switch (ret) {
  482. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  483. case 1: return BLK_STATE_ALLDIRTY;
  484. default: return ret;
  485. }
  486. }
  487. #endif
  488. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  489. if (c->cleanmarker_size == 0)
  490. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  491. else
  492. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  493. }
  494. if (ofs) {
  495. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  496. jeb->offset + ofs));
  497. if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
  498. return err;
  499. }
  500. /* Now ofs is a complete physical flash offset as it always was... */
  501. ofs += jeb->offset;
  502. noise = 10;
  503. dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
  504. scan_more:
  505. while(ofs < jeb->offset + c->sector_size) {
  506. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  507. /* Make sure there are node refs available for use */
  508. err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
  509. if (err)
  510. return err;
  511. cond_resched();
  512. if (ofs & 3) {
  513. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  514. ofs = PAD(ofs);
  515. continue;
  516. }
  517. if (ofs == prevofs) {
  518. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  519. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  520. return err;
  521. ofs += 4;
  522. continue;
  523. }
  524. prevofs = ofs;
  525. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  526. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  527. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  528. if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
  529. return err;
  530. break;
  531. }
  532. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  533. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  534. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  535. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  536. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  537. if (err)
  538. return err;
  539. buf_ofs = ofs;
  540. }
  541. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  542. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  543. uint32_t inbuf_ofs;
  544. uint32_t empty_start;
  545. empty_start = ofs;
  546. ofs += 4;
  547. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  548. more_empty:
  549. inbuf_ofs = ofs - buf_ofs;
  550. while (inbuf_ofs < buf_len) {
  551. if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
  552. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  553. empty_start, ofs);
  554. if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
  555. return err;
  556. goto scan_more;
  557. }
  558. inbuf_ofs+=4;
  559. ofs += 4;
  560. }
  561. /* Ran off end. */
  562. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  563. /* If we're only checking the beginning of a block with a cleanmarker,
  564. bail now */
  565. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  566. c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
  567. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
  568. return BLK_STATE_CLEANMARKER;
  569. }
  570. /* See how much more there is to read in this eraseblock... */
  571. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  572. if (!buf_len) {
  573. /* No more to read. Break out of main loop without marking
  574. this range of empty space as dirty (because it's not) */
  575. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  576. empty_start));
  577. break;
  578. }
  579. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  580. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  581. if (err)
  582. return err;
  583. buf_ofs = ofs;
  584. goto more_empty;
  585. }
  586. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  587. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  588. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  589. return err;
  590. ofs += 4;
  591. continue;
  592. }
  593. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  594. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  595. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  596. return err;
  597. ofs += 4;
  598. continue;
  599. }
  600. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  601. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  602. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  603. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  604. return err;
  605. ofs += 4;
  606. continue;
  607. }
  608. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  609. /* OK. We're out of possibilities. Whinge and move on */
  610. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  611. JFFS2_MAGIC_BITMASK, ofs,
  612. je16_to_cpu(node->magic));
  613. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  614. return err;
  615. ofs += 4;
  616. continue;
  617. }
  618. /* We seem to have a node of sorts. Check the CRC */
  619. crcnode.magic = node->magic;
  620. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  621. crcnode.totlen = node->totlen;
  622. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  623. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  624. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  625. ofs, je16_to_cpu(node->magic),
  626. je16_to_cpu(node->nodetype),
  627. je32_to_cpu(node->totlen),
  628. je32_to_cpu(node->hdr_crc),
  629. hdr_crc);
  630. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  631. return err;
  632. ofs += 4;
  633. continue;
  634. }
  635. if (ofs + je32_to_cpu(node->totlen) >
  636. jeb->offset + c->sector_size) {
  637. /* Eep. Node goes over the end of the erase block. */
  638. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  639. ofs, je32_to_cpu(node->totlen));
  640. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  641. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  642. return err;
  643. ofs += 4;
  644. continue;
  645. }
  646. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  647. /* Wheee. This is an obsoleted node */
  648. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  649. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  650. return err;
  651. ofs += PAD(je32_to_cpu(node->totlen));
  652. continue;
  653. }
  654. switch(je16_to_cpu(node->nodetype)) {
  655. case JFFS2_NODETYPE_INODE:
  656. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  657. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  658. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  659. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  660. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  661. if (err)
  662. return err;
  663. buf_ofs = ofs;
  664. node = (void *)buf;
  665. }
  666. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
  667. if (err) return err;
  668. ofs += PAD(je32_to_cpu(node->totlen));
  669. break;
  670. case JFFS2_NODETYPE_DIRENT:
  671. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  672. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  673. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  674. je32_to_cpu(node->totlen), buf_len, ofs));
  675. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  676. if (err)
  677. return err;
  678. buf_ofs = ofs;
  679. node = (void *)buf;
  680. }
  681. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
  682. if (err) return err;
  683. ofs += PAD(je32_to_cpu(node->totlen));
  684. break;
  685. #ifdef CONFIG_JFFS2_FS_XATTR
  686. case JFFS2_NODETYPE_XATTR:
  687. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  688. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  689. D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
  690. " left to end of buf. Reading 0x%x at 0x%08x\n",
  691. je32_to_cpu(node->totlen), buf_len, ofs));
  692. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  693. if (err)
  694. return err;
  695. buf_ofs = ofs;
  696. node = (void *)buf;
  697. }
  698. err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
  699. if (err)
  700. return err;
  701. ofs += PAD(je32_to_cpu(node->totlen));
  702. break;
  703. case JFFS2_NODETYPE_XREF:
  704. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  705. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  706. D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
  707. " left to end of buf. Reading 0x%x at 0x%08x\n",
  708. je32_to_cpu(node->totlen), buf_len, ofs));
  709. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  710. if (err)
  711. return err;
  712. buf_ofs = ofs;
  713. node = (void *)buf;
  714. }
  715. err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
  716. if (err)
  717. return err;
  718. ofs += PAD(je32_to_cpu(node->totlen));
  719. break;
  720. #endif /* CONFIG_JFFS2_FS_XATTR */
  721. case JFFS2_NODETYPE_CLEANMARKER:
  722. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  723. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  724. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  725. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  726. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  727. return err;
  728. ofs += PAD(sizeof(struct jffs2_unknown_node));
  729. } else if (jeb->first_node) {
  730. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  731. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  732. return err;
  733. ofs += PAD(sizeof(struct jffs2_unknown_node));
  734. } else {
  735. jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
  736. ofs += PAD(c->cleanmarker_size);
  737. }
  738. break;
  739. case JFFS2_NODETYPE_PADDING:
  740. if (jffs2_sum_active())
  741. jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
  742. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  743. return err;
  744. ofs += PAD(je32_to_cpu(node->totlen));
  745. break;
  746. default:
  747. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  748. case JFFS2_FEATURE_ROCOMPAT:
  749. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  750. c->flags |= JFFS2_SB_FLAG_RO;
  751. if (!(jffs2_is_readonly(c)))
  752. return -EROFS;
  753. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  754. return err;
  755. ofs += PAD(je32_to_cpu(node->totlen));
  756. break;
  757. case JFFS2_FEATURE_INCOMPAT:
  758. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  759. return -EINVAL;
  760. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  761. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  762. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  763. return err;
  764. ofs += PAD(je32_to_cpu(node->totlen));
  765. break;
  766. case JFFS2_FEATURE_RWCOMPAT_COPY: {
  767. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  768. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
  769. /* We can't summarise nodes we don't grok */
  770. jffs2_sum_disable_collecting(s);
  771. ofs += PAD(je32_to_cpu(node->totlen));
  772. break;
  773. }
  774. }
  775. }
  776. }
  777. if (jffs2_sum_active()) {
  778. if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
  779. dbg_summary("There is not enough space for "
  780. "summary information, disabling for this jeb!\n");
  781. jffs2_sum_disable_collecting(s);
  782. }
  783. }
  784. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
  785. jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
  786. /* mark_node_obsolete can add to wasted !! */
  787. if (jeb->wasted_size) {
  788. jeb->dirty_size += jeb->wasted_size;
  789. c->dirty_size += jeb->wasted_size;
  790. c->wasted_size -= jeb->wasted_size;
  791. jeb->wasted_size = 0;
  792. }
  793. return jffs2_scan_classify_jeb(c, jeb);
  794. }
  795. struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  796. {
  797. struct jffs2_inode_cache *ic;
  798. ic = jffs2_get_ino_cache(c, ino);
  799. if (ic)
  800. return ic;
  801. if (ino > c->highest_ino)
  802. c->highest_ino = ino;
  803. ic = jffs2_alloc_inode_cache();
  804. if (!ic) {
  805. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  806. return NULL;
  807. }
  808. memset(ic, 0, sizeof(*ic));
  809. ic->ino = ino;
  810. ic->nodes = (void *)ic;
  811. jffs2_add_ino_cache(c, ic);
  812. if (ino == 1)
  813. ic->nlink = 1;
  814. return ic;
  815. }
  816. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  817. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
  818. {
  819. struct jffs2_inode_cache *ic;
  820. uint32_t ino = je32_to_cpu(ri->ino);
  821. int err;
  822. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  823. /* We do very little here now. Just check the ino# to which we should attribute
  824. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  825. we used to scan the flash once only, reading everything we want from it into
  826. memory, then building all our in-core data structures and freeing the extra
  827. information. Now we allow the first part of the mount to complete a lot quicker,
  828. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  829. Which means that the _full_ amount of time to get to proper write mode with GC
  830. operational may actually be _longer_ than before. Sucks to be me. */
  831. ic = jffs2_get_ino_cache(c, ino);
  832. if (!ic) {
  833. /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
  834. first node we found for this inode. Do a CRC check to protect against the former
  835. case */
  836. uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
  837. if (crc != je32_to_cpu(ri->node_crc)) {
  838. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  839. ofs, je32_to_cpu(ri->node_crc), crc);
  840. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  841. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen)))))
  842. return err;
  843. return 0;
  844. }
  845. ic = jffs2_scan_make_ino_cache(c, ino);
  846. if (!ic)
  847. return -ENOMEM;
  848. }
  849. /* Wheee. It worked */
  850. jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
  851. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  852. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  853. je32_to_cpu(ri->offset),
  854. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  855. pseudo_random += je32_to_cpu(ri->version);
  856. if (jffs2_sum_active()) {
  857. jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
  858. }
  859. return 0;
  860. }
  861. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  862. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
  863. {
  864. struct jffs2_full_dirent *fd;
  865. struct jffs2_inode_cache *ic;
  866. uint32_t crc;
  867. int err;
  868. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  869. /* We don't get here unless the node is still valid, so we don't have to
  870. mask in the ACCURATE bit any more. */
  871. crc = crc32(0, rd, sizeof(*rd)-8);
  872. if (crc != je32_to_cpu(rd->node_crc)) {
  873. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  874. ofs, je32_to_cpu(rd->node_crc), crc);
  875. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  876. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  877. return err;
  878. return 0;
  879. }
  880. pseudo_random += je32_to_cpu(rd->version);
  881. fd = jffs2_alloc_full_dirent(rd->nsize+1);
  882. if (!fd) {
  883. return -ENOMEM;
  884. }
  885. memcpy(&fd->name, rd->name, rd->nsize);
  886. fd->name[rd->nsize] = 0;
  887. crc = crc32(0, fd->name, rd->nsize);
  888. if (crc != je32_to_cpu(rd->name_crc)) {
  889. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  890. ofs, je32_to_cpu(rd->name_crc), crc);
  891. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  892. jffs2_free_full_dirent(fd);
  893. /* FIXME: Why do we believe totlen? */
  894. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  895. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  896. return err;
  897. return 0;
  898. }
  899. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  900. if (!ic) {
  901. jffs2_free_full_dirent(fd);
  902. return -ENOMEM;
  903. }
  904. fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic);
  905. fd->next = NULL;
  906. fd->version = je32_to_cpu(rd->version);
  907. fd->ino = je32_to_cpu(rd->ino);
  908. fd->nhash = full_name_hash(fd->name, rd->nsize);
  909. fd->type = rd->type;
  910. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  911. if (jffs2_sum_active()) {
  912. jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
  913. }
  914. return 0;
  915. }
  916. static int count_list(struct list_head *l)
  917. {
  918. uint32_t count = 0;
  919. struct list_head *tmp;
  920. list_for_each(tmp, l) {
  921. count++;
  922. }
  923. return count;
  924. }
  925. /* Note: This breaks if list_empty(head). I don't care. You
  926. might, if you copy this code and use it elsewhere :) */
  927. static void rotate_list(struct list_head *head, uint32_t count)
  928. {
  929. struct list_head *n = head->next;
  930. list_del(head);
  931. while(count--) {
  932. n = n->next;
  933. }
  934. list_add(head, n);
  935. }
  936. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  937. {
  938. uint32_t x;
  939. uint32_t rotateby;
  940. x = count_list(&c->clean_list);
  941. if (x) {
  942. rotateby = pseudo_random % x;
  943. rotate_list((&c->clean_list), rotateby);
  944. }
  945. x = count_list(&c->very_dirty_list);
  946. if (x) {
  947. rotateby = pseudo_random % x;
  948. rotate_list((&c->very_dirty_list), rotateby);
  949. }
  950. x = count_list(&c->dirty_list);
  951. if (x) {
  952. rotateby = pseudo_random % x;
  953. rotate_list((&c->dirty_list), rotateby);
  954. }
  955. x = count_list(&c->erasable_list);
  956. if (x) {
  957. rotateby = pseudo_random % x;
  958. rotate_list((&c->erasable_list), rotateby);
  959. }
  960. if (c->nr_erasing_blocks) {
  961. rotateby = pseudo_random % c->nr_erasing_blocks;
  962. rotate_list((&c->erase_pending_list), rotateby);
  963. }
  964. if (c->nr_free_blocks) {
  965. rotateby = pseudo_random % c->nr_free_blocks;
  966. rotate_list((&c->free_list), rotateby);
  967. }
  968. }