scan.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright © 2001-2007 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/crc32.h>
  17. #include <linux/compiler.h>
  18. #include "nodelist.h"
  19. #include "summary.h"
  20. #include "debug.h"
  21. #define DEFAULT_EMPTY_SCAN_SIZE 1024
  22. #define noisy_printk(noise, args...) do { \
  23. if (*(noise)) { \
  24. printk(KERN_NOTICE args); \
  25. (*(noise))--; \
  26. if (!(*(noise))) { \
  27. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  28. } \
  29. } \
  30. } while(0)
  31. static uint32_t pseudo_random;
  32. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  33. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
  34. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  35. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  36. * as dirty.
  37. */
  38. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  39. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
  40. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  41. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
  42. static inline int min_free(struct jffs2_sb_info *c)
  43. {
  44. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  45. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  46. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  47. return c->wbuf_pagesize;
  48. #endif
  49. return min;
  50. }
  51. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  52. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  53. return sector_size;
  54. else
  55. return DEFAULT_EMPTY_SCAN_SIZE;
  56. }
  57. static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  58. {
  59. int ret;
  60. if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  61. return ret;
  62. if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
  63. return ret;
  64. /* Turned wasted size into dirty, since we apparently
  65. think it's recoverable now. */
  66. jeb->dirty_size += jeb->wasted_size;
  67. c->dirty_size += jeb->wasted_size;
  68. c->wasted_size -= jeb->wasted_size;
  69. jeb->wasted_size = 0;
  70. if (VERYDIRTY(c, jeb->dirty_size)) {
  71. list_add(&jeb->list, &c->very_dirty_list);
  72. } else {
  73. list_add(&jeb->list, &c->dirty_list);
  74. }
  75. return 0;
  76. }
  77. int jffs2_scan_medium(struct jffs2_sb_info *c)
  78. {
  79. int i, ret;
  80. uint32_t empty_blocks = 0, bad_blocks = 0;
  81. unsigned char *flashbuf = NULL;
  82. uint32_t buf_size = 0;
  83. struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
  84. #ifndef __ECOS
  85. size_t pointlen;
  86. if (c->mtd->point) {
  87. ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
  88. if (!ret && pointlen < c->mtd->size) {
  89. /* Don't muck about if it won't let us point to the whole flash */
  90. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  91. c->mtd->unpoint(c->mtd, flashbuf, 0, pointlen);
  92. flashbuf = NULL;
  93. }
  94. if (ret)
  95. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  96. }
  97. #endif
  98. if (!flashbuf) {
  99. /* For NAND it's quicker to read a whole eraseblock at a time,
  100. apparently */
  101. if (jffs2_cleanmarker_oob(c))
  102. buf_size = c->sector_size;
  103. else
  104. buf_size = PAGE_SIZE;
  105. /* Respect kmalloc limitations */
  106. if (buf_size > 128*1024)
  107. buf_size = 128*1024;
  108. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  109. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  110. if (!flashbuf)
  111. return -ENOMEM;
  112. }
  113. if (jffs2_sum_active()) {
  114. s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
  115. if (!s) {
  116. kfree(flashbuf);
  117. JFFS2_WARNING("Can't allocate memory for summary\n");
  118. return -ENOMEM;
  119. }
  120. }
  121. for (i=0; i<c->nr_blocks; i++) {
  122. struct jffs2_eraseblock *jeb = &c->blocks[i];
  123. cond_resched();
  124. /* reset summary info for next eraseblock scan */
  125. jffs2_sum_reset_collected(s);
  126. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
  127. buf_size, s);
  128. if (ret < 0)
  129. goto out;
  130. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  131. /* Now decide which list to put it on */
  132. switch(ret) {
  133. case BLK_STATE_ALLFF:
  134. /*
  135. * Empty block. Since we can't be sure it
  136. * was entirely erased, we just queue it for erase
  137. * again. It will be marked as such when the erase
  138. * is complete. Meanwhile we still count it as empty
  139. * for later checks.
  140. */
  141. empty_blocks++;
  142. list_add(&jeb->list, &c->erase_pending_list);
  143. c->nr_erasing_blocks++;
  144. break;
  145. case BLK_STATE_CLEANMARKER:
  146. /* Only a CLEANMARKER node is valid */
  147. if (!jeb->dirty_size) {
  148. /* It's actually free */
  149. list_add(&jeb->list, &c->free_list);
  150. c->nr_free_blocks++;
  151. } else {
  152. /* Dirt */
  153. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  154. list_add(&jeb->list, &c->erase_pending_list);
  155. c->nr_erasing_blocks++;
  156. }
  157. break;
  158. case BLK_STATE_CLEAN:
  159. /* Full (or almost full) of clean data. Clean list */
  160. list_add(&jeb->list, &c->clean_list);
  161. break;
  162. case BLK_STATE_PARTDIRTY:
  163. /* Some data, but not full. Dirty list. */
  164. /* We want to remember the block with most free space
  165. and stick it in the 'nextblock' position to start writing to it. */
  166. if (jeb->free_size > min_free(c) &&
  167. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  168. /* Better candidate for the next writes to go to */
  169. if (c->nextblock) {
  170. ret = file_dirty(c, c->nextblock);
  171. if (ret)
  172. return ret;
  173. /* deleting summary information of the old nextblock */
  174. jffs2_sum_reset_collected(c->summary);
  175. }
  176. /* update collected summary information for the current nextblock */
  177. jffs2_sum_move_collected(c, s);
  178. D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
  179. c->nextblock = jeb;
  180. } else {
  181. ret = file_dirty(c, jeb);
  182. if (ret)
  183. return ret;
  184. }
  185. break;
  186. case BLK_STATE_ALLDIRTY:
  187. /* Nothing valid - not even a clean marker. Needs erasing. */
  188. /* For now we just put it on the erasing list. We'll start the erases later */
  189. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  190. list_add(&jeb->list, &c->erase_pending_list);
  191. c->nr_erasing_blocks++;
  192. break;
  193. case BLK_STATE_BADBLOCK:
  194. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  195. list_add(&jeb->list, &c->bad_list);
  196. c->bad_size += c->sector_size;
  197. c->free_size -= c->sector_size;
  198. bad_blocks++;
  199. break;
  200. default:
  201. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  202. BUG();
  203. }
  204. }
  205. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  206. if (c->nextblock && (c->nextblock->dirty_size)) {
  207. c->nextblock->wasted_size += c->nextblock->dirty_size;
  208. c->wasted_size += c->nextblock->dirty_size;
  209. c->dirty_size -= c->nextblock->dirty_size;
  210. c->nextblock->dirty_size = 0;
  211. }
  212. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  213. if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
  214. /* If we're going to start writing into a block which already
  215. contains data, and the end of the data isn't page-aligned,
  216. skip a little and align it. */
  217. uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
  218. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  219. skip));
  220. jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
  221. jffs2_scan_dirty_space(c, c->nextblock, skip);
  222. }
  223. #endif
  224. if (c->nr_erasing_blocks) {
  225. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  226. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  227. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  228. ret = -EIO;
  229. goto out;
  230. }
  231. jffs2_erase_pending_trigger(c);
  232. }
  233. ret = 0;
  234. out:
  235. if (buf_size)
  236. kfree(flashbuf);
  237. #ifndef __ECOS
  238. else
  239. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  240. #endif
  241. if (s)
  242. kfree(s);
  243. return ret;
  244. }
  245. static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
  246. uint32_t ofs, uint32_t len)
  247. {
  248. int ret;
  249. size_t retlen;
  250. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  251. if (ret) {
  252. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  253. return ret;
  254. }
  255. if (retlen < len) {
  256. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  257. return -EIO;
  258. }
  259. return 0;
  260. }
  261. int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  262. {
  263. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  264. && (!jeb->first_node || !ref_next(jeb->first_node)) )
  265. return BLK_STATE_CLEANMARKER;
  266. /* move blocks with max 4 byte dirty space to cleanlist */
  267. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  268. c->dirty_size -= jeb->dirty_size;
  269. c->wasted_size += jeb->dirty_size;
  270. jeb->wasted_size += jeb->dirty_size;
  271. jeb->dirty_size = 0;
  272. return BLK_STATE_CLEAN;
  273. } else if (jeb->used_size || jeb->unchecked_size)
  274. return BLK_STATE_PARTDIRTY;
  275. else
  276. return BLK_STATE_ALLDIRTY;
  277. }
  278. #ifdef CONFIG_JFFS2_FS_XATTR
  279. static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  280. struct jffs2_raw_xattr *rx, uint32_t ofs,
  281. struct jffs2_summary *s)
  282. {
  283. struct jffs2_xattr_datum *xd;
  284. uint32_t xid, version, totlen, crc;
  285. int err;
  286. crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
  287. if (crc != je32_to_cpu(rx->node_crc)) {
  288. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  289. ofs, je32_to_cpu(rx->node_crc), crc);
  290. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  291. return err;
  292. return 0;
  293. }
  294. xid = je32_to_cpu(rx->xid);
  295. version = je32_to_cpu(rx->version);
  296. totlen = PAD(sizeof(struct jffs2_raw_xattr)
  297. + rx->name_len + 1 + je16_to_cpu(rx->value_len));
  298. if (totlen != je32_to_cpu(rx->totlen)) {
  299. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  300. ofs, je32_to_cpu(rx->totlen), totlen);
  301. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  302. return err;
  303. return 0;
  304. }
  305. xd = jffs2_setup_xattr_datum(c, xid, version);
  306. if (IS_ERR(xd))
  307. return PTR_ERR(xd);
  308. if (xd->version > version) {
  309. struct jffs2_raw_node_ref *raw
  310. = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
  311. raw->next_in_ino = xd->node->next_in_ino;
  312. xd->node->next_in_ino = raw;
  313. } else {
  314. xd->version = version;
  315. xd->xprefix = rx->xprefix;
  316. xd->name_len = rx->name_len;
  317. xd->value_len = je16_to_cpu(rx->value_len);
  318. xd->data_crc = je32_to_cpu(rx->data_crc);
  319. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
  320. }
  321. if (jffs2_sum_active())
  322. jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
  323. dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
  324. ofs, xd->xid, xd->version);
  325. return 0;
  326. }
  327. static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  328. struct jffs2_raw_xref *rr, uint32_t ofs,
  329. struct jffs2_summary *s)
  330. {
  331. struct jffs2_xattr_ref *ref;
  332. uint32_t crc;
  333. int err;
  334. crc = crc32(0, rr, sizeof(*rr) - 4);
  335. if (crc != je32_to_cpu(rr->node_crc)) {
  336. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  337. ofs, je32_to_cpu(rr->node_crc), crc);
  338. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
  339. return err;
  340. return 0;
  341. }
  342. if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
  343. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
  344. ofs, je32_to_cpu(rr->totlen),
  345. PAD(sizeof(struct jffs2_raw_xref)));
  346. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
  347. return err;
  348. return 0;
  349. }
  350. ref = jffs2_alloc_xattr_ref();
  351. if (!ref)
  352. return -ENOMEM;
  353. /* BEFORE jffs2_build_xattr_subsystem() called,
  354. * and AFTER xattr_ref is marked as a dead xref,
  355. * ref->xid is used to store 32bit xid, xd is not used
  356. * ref->ino is used to store 32bit inode-number, ic is not used
  357. * Thoes variables are declared as union, thus using those
  358. * are exclusive. In a similar way, ref->next is temporarily
  359. * used to chain all xattr_ref object. It's re-chained to
  360. * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
  361. */
  362. ref->ino = je32_to_cpu(rr->ino);
  363. ref->xid = je32_to_cpu(rr->xid);
  364. ref->xseqno = je32_to_cpu(rr->xseqno);
  365. if (ref->xseqno > c->highest_xseqno)
  366. c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
  367. ref->next = c->xref_temp;
  368. c->xref_temp = ref;
  369. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
  370. if (jffs2_sum_active())
  371. jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
  372. dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
  373. ofs, ref->xid, ref->ino);
  374. return 0;
  375. }
  376. #endif
  377. /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
  378. the flash, XIP-style */
  379. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  380. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
  381. struct jffs2_unknown_node *node;
  382. struct jffs2_unknown_node crcnode;
  383. uint32_t ofs, prevofs;
  384. uint32_t hdr_crc, buf_ofs, buf_len;
  385. int err;
  386. int noise = 0;
  387. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  388. int cleanmarkerfound = 0;
  389. #endif
  390. ofs = jeb->offset;
  391. prevofs = jeb->offset - 1;
  392. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  393. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  394. if (jffs2_cleanmarker_oob(c)) {
  395. int ret;
  396. if (c->mtd->block_isbad(c->mtd, jeb->offset))
  397. return BLK_STATE_BADBLOCK;
  398. ret = jffs2_check_nand_cleanmarker(c, jeb);
  399. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  400. /* Even if it's not found, we still scan to see
  401. if the block is empty. We use this information
  402. to decide whether to erase it or not. */
  403. switch (ret) {
  404. case 0: cleanmarkerfound = 1; break;
  405. case 1: break;
  406. default: return ret;
  407. }
  408. }
  409. #endif
  410. if (jffs2_sum_active()) {
  411. struct jffs2_sum_marker *sm;
  412. void *sumptr = NULL;
  413. uint32_t sumlen;
  414. if (!buf_size) {
  415. /* XIP case. Just look, point at the summary if it's there */
  416. sm = (void *)buf + c->sector_size - sizeof(*sm);
  417. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  418. sumptr = buf + je32_to_cpu(sm->offset);
  419. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  420. }
  421. } else {
  422. /* If NAND flash, read a whole page of it. Else just the end */
  423. if (c->wbuf_pagesize)
  424. buf_len = c->wbuf_pagesize;
  425. else
  426. buf_len = sizeof(*sm);
  427. /* Read as much as we want into the _end_ of the preallocated buffer */
  428. err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
  429. jeb->offset + c->sector_size - buf_len,
  430. buf_len);
  431. if (err)
  432. return err;
  433. sm = (void *)buf + buf_size - sizeof(*sm);
  434. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  435. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  436. sumptr = buf + buf_size - sumlen;
  437. /* Now, make sure the summary itself is available */
  438. if (sumlen > buf_size) {
  439. /* Need to kmalloc for this. */
  440. sumptr = kmalloc(sumlen, GFP_KERNEL);
  441. if (!sumptr)
  442. return -ENOMEM;
  443. memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
  444. }
  445. if (buf_len < sumlen) {
  446. /* Need to read more so that the entire summary node is present */
  447. err = jffs2_fill_scan_buf(c, sumptr,
  448. jeb->offset + c->sector_size - sumlen,
  449. sumlen - buf_len);
  450. if (err)
  451. return err;
  452. }
  453. }
  454. }
  455. if (sumptr) {
  456. err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
  457. if (buf_size && sumlen > buf_size)
  458. kfree(sumptr);
  459. /* If it returns with a real error, bail.
  460. If it returns positive, that's a block classification
  461. (i.e. BLK_STATE_xxx) so return that too.
  462. If it returns zero, fall through to full scan. */
  463. if (err)
  464. return err;
  465. }
  466. }
  467. buf_ofs = jeb->offset;
  468. if (!buf_size) {
  469. /* This is the XIP case -- we're reading _directly_ from the flash chip */
  470. buf_len = c->sector_size;
  471. } else {
  472. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  473. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  474. if (err)
  475. return err;
  476. }
  477. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  478. ofs = 0;
  479. /* Scan only 4KiB of 0xFF before declaring it's empty */
  480. while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  481. ofs += 4;
  482. if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
  483. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  484. if (jffs2_cleanmarker_oob(c)) {
  485. /* scan oob, take care of cleanmarker */
  486. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  487. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  488. switch (ret) {
  489. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  490. case 1: return BLK_STATE_ALLDIRTY;
  491. default: return ret;
  492. }
  493. }
  494. #endif
  495. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  496. if (c->cleanmarker_size == 0)
  497. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  498. else
  499. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  500. }
  501. if (ofs) {
  502. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  503. jeb->offset + ofs));
  504. if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  505. return err;
  506. if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
  507. return err;
  508. }
  509. /* Now ofs is a complete physical flash offset as it always was... */
  510. ofs += jeb->offset;
  511. noise = 10;
  512. dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
  513. scan_more:
  514. while(ofs < jeb->offset + c->sector_size) {
  515. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  516. /* Make sure there are node refs available for use */
  517. err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
  518. if (err)
  519. return err;
  520. cond_resched();
  521. if (ofs & 3) {
  522. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  523. ofs = PAD(ofs);
  524. continue;
  525. }
  526. if (ofs == prevofs) {
  527. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  528. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  529. return err;
  530. ofs += 4;
  531. continue;
  532. }
  533. prevofs = ofs;
  534. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  535. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  536. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  537. if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
  538. return err;
  539. break;
  540. }
  541. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  542. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  543. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  544. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  545. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  546. if (err)
  547. return err;
  548. buf_ofs = ofs;
  549. }
  550. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  551. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  552. uint32_t inbuf_ofs;
  553. uint32_t empty_start, scan_end;
  554. empty_start = ofs;
  555. ofs += 4;
  556. scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
  557. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  558. more_empty:
  559. inbuf_ofs = ofs - buf_ofs;
  560. while (inbuf_ofs < scan_end) {
  561. if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
  562. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  563. empty_start, ofs);
  564. if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
  565. return err;
  566. goto scan_more;
  567. }
  568. inbuf_ofs+=4;
  569. ofs += 4;
  570. }
  571. /* Ran off end. */
  572. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  573. /* If we're only checking the beginning of a block with a cleanmarker,
  574. bail now */
  575. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  576. c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
  577. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
  578. return BLK_STATE_CLEANMARKER;
  579. }
  580. if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
  581. scan_end = buf_len;
  582. goto more_empty;
  583. }
  584. /* See how much more there is to read in this eraseblock... */
  585. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  586. if (!buf_len) {
  587. /* No more to read. Break out of main loop without marking
  588. this range of empty space as dirty (because it's not) */
  589. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  590. empty_start));
  591. break;
  592. }
  593. /* point never reaches here */
  594. scan_end = buf_len;
  595. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  596. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  597. if (err)
  598. return err;
  599. buf_ofs = ofs;
  600. goto more_empty;
  601. }
  602. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  603. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  604. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  605. return err;
  606. ofs += 4;
  607. continue;
  608. }
  609. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  610. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  611. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  612. return err;
  613. ofs += 4;
  614. continue;
  615. }
  616. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  617. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  618. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  619. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  620. return err;
  621. ofs += 4;
  622. continue;
  623. }
  624. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  625. /* OK. We're out of possibilities. Whinge and move on */
  626. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  627. JFFS2_MAGIC_BITMASK, ofs,
  628. je16_to_cpu(node->magic));
  629. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  630. return err;
  631. ofs += 4;
  632. continue;
  633. }
  634. /* We seem to have a node of sorts. Check the CRC */
  635. crcnode.magic = node->magic;
  636. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  637. crcnode.totlen = node->totlen;
  638. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  639. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  640. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  641. ofs, je16_to_cpu(node->magic),
  642. je16_to_cpu(node->nodetype),
  643. je32_to_cpu(node->totlen),
  644. je32_to_cpu(node->hdr_crc),
  645. hdr_crc);
  646. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  647. return err;
  648. ofs += 4;
  649. continue;
  650. }
  651. if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
  652. /* Eep. Node goes over the end of the erase block. */
  653. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  654. ofs, je32_to_cpu(node->totlen));
  655. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  656. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  657. return err;
  658. ofs += 4;
  659. continue;
  660. }
  661. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  662. /* Wheee. This is an obsoleted node */
  663. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  664. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  665. return err;
  666. ofs += PAD(je32_to_cpu(node->totlen));
  667. continue;
  668. }
  669. switch(je16_to_cpu(node->nodetype)) {
  670. case JFFS2_NODETYPE_INODE:
  671. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  672. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  673. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  674. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  675. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  676. if (err)
  677. return err;
  678. buf_ofs = ofs;
  679. node = (void *)buf;
  680. }
  681. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
  682. if (err) return err;
  683. ofs += PAD(je32_to_cpu(node->totlen));
  684. break;
  685. case JFFS2_NODETYPE_DIRENT:
  686. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  687. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  688. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  689. je32_to_cpu(node->totlen), buf_len, ofs));
  690. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  691. if (err)
  692. return err;
  693. buf_ofs = ofs;
  694. node = (void *)buf;
  695. }
  696. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
  697. if (err) return err;
  698. ofs += PAD(je32_to_cpu(node->totlen));
  699. break;
  700. #ifdef CONFIG_JFFS2_FS_XATTR
  701. case JFFS2_NODETYPE_XATTR:
  702. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  703. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  704. D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
  705. " left to end of buf. Reading 0x%x at 0x%08x\n",
  706. je32_to_cpu(node->totlen), buf_len, ofs));
  707. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  708. if (err)
  709. return err;
  710. buf_ofs = ofs;
  711. node = (void *)buf;
  712. }
  713. err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
  714. if (err)
  715. return err;
  716. ofs += PAD(je32_to_cpu(node->totlen));
  717. break;
  718. case JFFS2_NODETYPE_XREF:
  719. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  720. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  721. D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
  722. " left to end of buf. Reading 0x%x at 0x%08x\n",
  723. je32_to_cpu(node->totlen), buf_len, ofs));
  724. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  725. if (err)
  726. return err;
  727. buf_ofs = ofs;
  728. node = (void *)buf;
  729. }
  730. err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
  731. if (err)
  732. return err;
  733. ofs += PAD(je32_to_cpu(node->totlen));
  734. break;
  735. #endif /* CONFIG_JFFS2_FS_XATTR */
  736. case JFFS2_NODETYPE_CLEANMARKER:
  737. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  738. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  739. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  740. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  741. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  742. return err;
  743. ofs += PAD(sizeof(struct jffs2_unknown_node));
  744. } else if (jeb->first_node) {
  745. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  746. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  747. return err;
  748. ofs += PAD(sizeof(struct jffs2_unknown_node));
  749. } else {
  750. jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
  751. ofs += PAD(c->cleanmarker_size);
  752. }
  753. break;
  754. case JFFS2_NODETYPE_PADDING:
  755. if (jffs2_sum_active())
  756. jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
  757. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  758. return err;
  759. ofs += PAD(je32_to_cpu(node->totlen));
  760. break;
  761. default:
  762. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  763. case JFFS2_FEATURE_ROCOMPAT:
  764. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  765. c->flags |= JFFS2_SB_FLAG_RO;
  766. if (!(jffs2_is_readonly(c)))
  767. return -EROFS;
  768. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  769. return err;
  770. ofs += PAD(je32_to_cpu(node->totlen));
  771. break;
  772. case JFFS2_FEATURE_INCOMPAT:
  773. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  774. return -EINVAL;
  775. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  776. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  777. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  778. return err;
  779. ofs += PAD(je32_to_cpu(node->totlen));
  780. break;
  781. case JFFS2_FEATURE_RWCOMPAT_COPY: {
  782. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  783. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
  784. /* We can't summarise nodes we don't grok */
  785. jffs2_sum_disable_collecting(s);
  786. ofs += PAD(je32_to_cpu(node->totlen));
  787. break;
  788. }
  789. }
  790. }
  791. }
  792. if (jffs2_sum_active()) {
  793. if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
  794. dbg_summary("There is not enough space for "
  795. "summary information, disabling for this jeb!\n");
  796. jffs2_sum_disable_collecting(s);
  797. }
  798. }
  799. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
  800. jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
  801. /* mark_node_obsolete can add to wasted !! */
  802. if (jeb->wasted_size) {
  803. jeb->dirty_size += jeb->wasted_size;
  804. c->dirty_size += jeb->wasted_size;
  805. c->wasted_size -= jeb->wasted_size;
  806. jeb->wasted_size = 0;
  807. }
  808. return jffs2_scan_classify_jeb(c, jeb);
  809. }
  810. struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  811. {
  812. struct jffs2_inode_cache *ic;
  813. ic = jffs2_get_ino_cache(c, ino);
  814. if (ic)
  815. return ic;
  816. if (ino > c->highest_ino)
  817. c->highest_ino = ino;
  818. ic = jffs2_alloc_inode_cache();
  819. if (!ic) {
  820. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  821. return NULL;
  822. }
  823. memset(ic, 0, sizeof(*ic));
  824. ic->ino = ino;
  825. ic->nodes = (void *)ic;
  826. jffs2_add_ino_cache(c, ic);
  827. if (ino == 1)
  828. ic->nlink = 1;
  829. return ic;
  830. }
  831. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  832. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
  833. {
  834. struct jffs2_inode_cache *ic;
  835. uint32_t crc, ino = je32_to_cpu(ri->ino);
  836. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  837. /* We do very little here now. Just check the ino# to which we should attribute
  838. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  839. we used to scan the flash once only, reading everything we want from it into
  840. memory, then building all our in-core data structures and freeing the extra
  841. information. Now we allow the first part of the mount to complete a lot quicker,
  842. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  843. Which means that the _full_ amount of time to get to proper write mode with GC
  844. operational may actually be _longer_ than before. Sucks to be me. */
  845. /* Check the node CRC in any case. */
  846. crc = crc32(0, ri, sizeof(*ri)-8);
  847. if (crc != je32_to_cpu(ri->node_crc)) {
  848. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on "
  849. "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  850. ofs, je32_to_cpu(ri->node_crc), crc);
  851. /*
  852. * We believe totlen because the CRC on the node
  853. * _header_ was OK, just the node itself failed.
  854. */
  855. return jffs2_scan_dirty_space(c, jeb,
  856. PAD(je32_to_cpu(ri->totlen)));
  857. }
  858. ic = jffs2_get_ino_cache(c, ino);
  859. if (!ic) {
  860. ic = jffs2_scan_make_ino_cache(c, ino);
  861. if (!ic)
  862. return -ENOMEM;
  863. }
  864. /* Wheee. It worked */
  865. jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
  866. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  867. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  868. je32_to_cpu(ri->offset),
  869. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  870. pseudo_random += je32_to_cpu(ri->version);
  871. if (jffs2_sum_active()) {
  872. jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
  873. }
  874. return 0;
  875. }
  876. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  877. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
  878. {
  879. struct jffs2_full_dirent *fd;
  880. struct jffs2_inode_cache *ic;
  881. uint32_t checkedlen;
  882. uint32_t crc;
  883. int err;
  884. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  885. /* We don't get here unless the node is still valid, so we don't have to
  886. mask in the ACCURATE bit any more. */
  887. crc = crc32(0, rd, sizeof(*rd)-8);
  888. if (crc != je32_to_cpu(rd->node_crc)) {
  889. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  890. ofs, je32_to_cpu(rd->node_crc), crc);
  891. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  892. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  893. return err;
  894. return 0;
  895. }
  896. pseudo_random += je32_to_cpu(rd->version);
  897. /* Should never happen. Did. (OLPC trac #4184)*/
  898. checkedlen = strnlen(rd->name, rd->nsize);
  899. if (checkedlen < rd->nsize) {
  900. printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
  901. ofs, checkedlen);
  902. }
  903. fd = jffs2_alloc_full_dirent(checkedlen+1);
  904. if (!fd) {
  905. return -ENOMEM;
  906. }
  907. memcpy(&fd->name, rd->name, checkedlen);
  908. fd->name[checkedlen] = 0;
  909. crc = crc32(0, fd->name, rd->nsize);
  910. if (crc != je32_to_cpu(rd->name_crc)) {
  911. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  912. ofs, je32_to_cpu(rd->name_crc), crc);
  913. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  914. jffs2_free_full_dirent(fd);
  915. /* FIXME: Why do we believe totlen? */
  916. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  917. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  918. return err;
  919. return 0;
  920. }
  921. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  922. if (!ic) {
  923. jffs2_free_full_dirent(fd);
  924. return -ENOMEM;
  925. }
  926. fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
  927. PAD(je32_to_cpu(rd->totlen)), ic);
  928. fd->next = NULL;
  929. fd->version = je32_to_cpu(rd->version);
  930. fd->ino = je32_to_cpu(rd->ino);
  931. fd->nhash = full_name_hash(fd->name, checkedlen);
  932. fd->type = rd->type;
  933. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  934. if (jffs2_sum_active()) {
  935. jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
  936. }
  937. return 0;
  938. }
  939. static int count_list(struct list_head *l)
  940. {
  941. uint32_t count = 0;
  942. struct list_head *tmp;
  943. list_for_each(tmp, l) {
  944. count++;
  945. }
  946. return count;
  947. }
  948. /* Note: This breaks if list_empty(head). I don't care. You
  949. might, if you copy this code and use it elsewhere :) */
  950. static void rotate_list(struct list_head *head, uint32_t count)
  951. {
  952. struct list_head *n = head->next;
  953. list_del(head);
  954. while(count--) {
  955. n = n->next;
  956. }
  957. list_add(head, n);
  958. }
  959. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  960. {
  961. uint32_t x;
  962. uint32_t rotateby;
  963. x = count_list(&c->clean_list);
  964. if (x) {
  965. rotateby = pseudo_random % x;
  966. rotate_list((&c->clean_list), rotateby);
  967. }
  968. x = count_list(&c->very_dirty_list);
  969. if (x) {
  970. rotateby = pseudo_random % x;
  971. rotate_list((&c->very_dirty_list), rotateby);
  972. }
  973. x = count_list(&c->dirty_list);
  974. if (x) {
  975. rotateby = pseudo_random % x;
  976. rotate_list((&c->dirty_list), rotateby);
  977. }
  978. x = count_list(&c->erasable_list);
  979. if (x) {
  980. rotateby = pseudo_random % x;
  981. rotate_list((&c->erasable_list), rotateby);
  982. }
  983. if (c->nr_erasing_blocks) {
  984. rotateby = pseudo_random % c->nr_erasing_blocks;
  985. rotate_list((&c->erase_pending_list), rotateby);
  986. }
  987. if (c->nr_free_blocks) {
  988. rotateby = pseudo_random % c->nr_free_blocks;
  989. rotate_list((&c->free_list), rotateby);
  990. }
  991. }