scan.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright © 2001-2007 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/mtd/mtd.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/crc32.h>
  17. #include <linux/compiler.h>
  18. #include "nodelist.h"
  19. #include "summary.h"
  20. #include "debug.h"
  21. #define DEFAULT_EMPTY_SCAN_SIZE 1024
  22. #define noisy_printk(noise, args...) do { \
  23. if (*(noise)) { \
  24. printk(KERN_NOTICE args); \
  25. (*(noise))--; \
  26. if (!(*(noise))) { \
  27. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  28. } \
  29. } \
  30. } while(0)
  31. static uint32_t pseudo_random;
  32. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  33. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
  34. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  35. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  36. * as dirty.
  37. */
  38. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  39. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
  40. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  41. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
  42. static inline int min_free(struct jffs2_sb_info *c)
  43. {
  44. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  45. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  46. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  47. return c->wbuf_pagesize;
  48. #endif
  49. return min;
  50. }
  51. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  52. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  53. return sector_size;
  54. else
  55. return DEFAULT_EMPTY_SCAN_SIZE;
  56. }
  57. static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  58. {
  59. int ret;
  60. if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  61. return ret;
  62. if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
  63. return ret;
  64. /* Turned wasted size into dirty, since we apparently
  65. think it's recoverable now. */
  66. jeb->dirty_size += jeb->wasted_size;
  67. c->dirty_size += jeb->wasted_size;
  68. c->wasted_size -= jeb->wasted_size;
  69. jeb->wasted_size = 0;
  70. if (VERYDIRTY(c, jeb->dirty_size)) {
  71. list_add(&jeb->list, &c->very_dirty_list);
  72. } else {
  73. list_add(&jeb->list, &c->dirty_list);
  74. }
  75. return 0;
  76. }
  77. int jffs2_scan_medium(struct jffs2_sb_info *c)
  78. {
  79. int i, ret;
  80. uint32_t empty_blocks = 0, bad_blocks = 0;
  81. unsigned char *flashbuf = NULL;
  82. uint32_t buf_size = 0;
  83. struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
  84. #ifndef __ECOS
  85. size_t pointlen;
  86. if (c->mtd->point) {
  87. ret = c->mtd->point(c->mtd, 0, c->mtd->size, &pointlen,
  88. (void **)&flashbuf, NULL);
  89. if (!ret && pointlen < c->mtd->size) {
  90. /* Don't muck about if it won't let us point to the whole flash */
  91. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  92. c->mtd->unpoint(c->mtd, 0, pointlen);
  93. flashbuf = NULL;
  94. }
  95. if (ret)
  96. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  97. }
  98. #endif
  99. if (!flashbuf) {
  100. /* For NAND it's quicker to read a whole eraseblock at a time,
  101. apparently */
  102. if (jffs2_cleanmarker_oob(c))
  103. buf_size = c->sector_size;
  104. else
  105. buf_size = PAGE_SIZE;
  106. /* Respect kmalloc limitations */
  107. if (buf_size > 128*1024)
  108. buf_size = 128*1024;
  109. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  110. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  111. if (!flashbuf)
  112. return -ENOMEM;
  113. }
  114. if (jffs2_sum_active()) {
  115. s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
  116. if (!s) {
  117. JFFS2_WARNING("Can't allocate memory for summary\n");
  118. ret = -ENOMEM;
  119. goto out;
  120. }
  121. }
  122. for (i=0; i<c->nr_blocks; i++) {
  123. struct jffs2_eraseblock *jeb = &c->blocks[i];
  124. cond_resched();
  125. /* reset summary info for next eraseblock scan */
  126. jffs2_sum_reset_collected(s);
  127. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
  128. buf_size, s);
  129. if (ret < 0)
  130. goto out;
  131. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  132. /* Now decide which list to put it on */
  133. switch(ret) {
  134. case BLK_STATE_ALLFF:
  135. /*
  136. * Empty block. Since we can't be sure it
  137. * was entirely erased, we just queue it for erase
  138. * again. It will be marked as such when the erase
  139. * is complete. Meanwhile we still count it as empty
  140. * for later checks.
  141. */
  142. empty_blocks++;
  143. list_add(&jeb->list, &c->erase_pending_list);
  144. c->nr_erasing_blocks++;
  145. break;
  146. case BLK_STATE_CLEANMARKER:
  147. /* Only a CLEANMARKER node is valid */
  148. if (!jeb->dirty_size) {
  149. /* It's actually free */
  150. list_add(&jeb->list, &c->free_list);
  151. c->nr_free_blocks++;
  152. } else {
  153. /* Dirt */
  154. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  155. list_add(&jeb->list, &c->erase_pending_list);
  156. c->nr_erasing_blocks++;
  157. }
  158. break;
  159. case BLK_STATE_CLEAN:
  160. /* Full (or almost full) of clean data. Clean list */
  161. list_add(&jeb->list, &c->clean_list);
  162. break;
  163. case BLK_STATE_PARTDIRTY:
  164. /* Some data, but not full. Dirty list. */
  165. /* We want to remember the block with most free space
  166. and stick it in the 'nextblock' position to start writing to it. */
  167. if (jeb->free_size > min_free(c) &&
  168. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  169. /* Better candidate for the next writes to go to */
  170. if (c->nextblock) {
  171. ret = file_dirty(c, c->nextblock);
  172. if (ret)
  173. goto out;
  174. /* deleting summary information of the old nextblock */
  175. jffs2_sum_reset_collected(c->summary);
  176. }
  177. /* update collected summary information for the current nextblock */
  178. jffs2_sum_move_collected(c, s);
  179. D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
  180. c->nextblock = jeb;
  181. } else {
  182. ret = file_dirty(c, jeb);
  183. if (ret)
  184. goto out;
  185. }
  186. break;
  187. case BLK_STATE_ALLDIRTY:
  188. /* Nothing valid - not even a clean marker. Needs erasing. */
  189. /* For now we just put it on the erasing list. We'll start the erases later */
  190. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  191. list_add(&jeb->list, &c->erase_pending_list);
  192. c->nr_erasing_blocks++;
  193. break;
  194. case BLK_STATE_BADBLOCK:
  195. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  196. list_add(&jeb->list, &c->bad_list);
  197. c->bad_size += c->sector_size;
  198. c->free_size -= c->sector_size;
  199. bad_blocks++;
  200. break;
  201. default:
  202. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  203. BUG();
  204. }
  205. }
  206. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  207. if (c->nextblock && (c->nextblock->dirty_size)) {
  208. c->nextblock->wasted_size += c->nextblock->dirty_size;
  209. c->wasted_size += c->nextblock->dirty_size;
  210. c->dirty_size -= c->nextblock->dirty_size;
  211. c->nextblock->dirty_size = 0;
  212. }
  213. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  214. if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
  215. /* If we're going to start writing into a block which already
  216. contains data, and the end of the data isn't page-aligned,
  217. skip a little and align it. */
  218. uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
  219. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  220. skip));
  221. jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
  222. jffs2_scan_dirty_space(c, c->nextblock, skip);
  223. }
  224. #endif
  225. if (c->nr_erasing_blocks) {
  226. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  227. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  228. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  229. ret = -EIO;
  230. goto out;
  231. }
  232. spin_lock(&c->erase_completion_lock);
  233. jffs2_garbage_collect_trigger(c);
  234. spin_unlock(&c->erase_completion_lock);
  235. }
  236. ret = 0;
  237. out:
  238. if (buf_size)
  239. kfree(flashbuf);
  240. #ifndef __ECOS
  241. else
  242. c->mtd->unpoint(c->mtd, 0, c->mtd->size);
  243. #endif
  244. if (s)
  245. kfree(s);
  246. return ret;
  247. }
  248. static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
  249. uint32_t ofs, uint32_t len)
  250. {
  251. int ret;
  252. size_t retlen;
  253. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  254. if (ret) {
  255. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  256. return ret;
  257. }
  258. if (retlen < len) {
  259. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  260. return -EIO;
  261. }
  262. return 0;
  263. }
  264. int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  265. {
  266. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  267. && (!jeb->first_node || !ref_next(jeb->first_node)) )
  268. return BLK_STATE_CLEANMARKER;
  269. /* move blocks with max 4 byte dirty space to cleanlist */
  270. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  271. c->dirty_size -= jeb->dirty_size;
  272. c->wasted_size += jeb->dirty_size;
  273. jeb->wasted_size += jeb->dirty_size;
  274. jeb->dirty_size = 0;
  275. return BLK_STATE_CLEAN;
  276. } else if (jeb->used_size || jeb->unchecked_size)
  277. return BLK_STATE_PARTDIRTY;
  278. else
  279. return BLK_STATE_ALLDIRTY;
  280. }
  281. #ifdef CONFIG_JFFS2_FS_XATTR
  282. static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  283. struct jffs2_raw_xattr *rx, uint32_t ofs,
  284. struct jffs2_summary *s)
  285. {
  286. struct jffs2_xattr_datum *xd;
  287. uint32_t xid, version, totlen, crc;
  288. int err;
  289. crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
  290. if (crc != je32_to_cpu(rx->node_crc)) {
  291. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  292. ofs, je32_to_cpu(rx->node_crc), crc);
  293. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  294. return err;
  295. return 0;
  296. }
  297. xid = je32_to_cpu(rx->xid);
  298. version = je32_to_cpu(rx->version);
  299. totlen = PAD(sizeof(struct jffs2_raw_xattr)
  300. + rx->name_len + 1 + je16_to_cpu(rx->value_len));
  301. if (totlen != je32_to_cpu(rx->totlen)) {
  302. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  303. ofs, je32_to_cpu(rx->totlen), totlen);
  304. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  305. return err;
  306. return 0;
  307. }
  308. xd = jffs2_setup_xattr_datum(c, xid, version);
  309. if (IS_ERR(xd))
  310. return PTR_ERR(xd);
  311. if (xd->version > version) {
  312. struct jffs2_raw_node_ref *raw
  313. = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
  314. raw->next_in_ino = xd->node->next_in_ino;
  315. xd->node->next_in_ino = raw;
  316. } else {
  317. xd->version = version;
  318. xd->xprefix = rx->xprefix;
  319. xd->name_len = rx->name_len;
  320. xd->value_len = je16_to_cpu(rx->value_len);
  321. xd->data_crc = je32_to_cpu(rx->data_crc);
  322. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
  323. }
  324. if (jffs2_sum_active())
  325. jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
  326. dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
  327. ofs, xd->xid, xd->version);
  328. return 0;
  329. }
  330. static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  331. struct jffs2_raw_xref *rr, uint32_t ofs,
  332. struct jffs2_summary *s)
  333. {
  334. struct jffs2_xattr_ref *ref;
  335. uint32_t crc;
  336. int err;
  337. crc = crc32(0, rr, sizeof(*rr) - 4);
  338. if (crc != je32_to_cpu(rr->node_crc)) {
  339. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  340. ofs, je32_to_cpu(rr->node_crc), crc);
  341. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
  342. return err;
  343. return 0;
  344. }
  345. if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
  346. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
  347. ofs, je32_to_cpu(rr->totlen),
  348. PAD(sizeof(struct jffs2_raw_xref)));
  349. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
  350. return err;
  351. return 0;
  352. }
  353. ref = jffs2_alloc_xattr_ref();
  354. if (!ref)
  355. return -ENOMEM;
  356. /* BEFORE jffs2_build_xattr_subsystem() called,
  357. * and AFTER xattr_ref is marked as a dead xref,
  358. * ref->xid is used to store 32bit xid, xd is not used
  359. * ref->ino is used to store 32bit inode-number, ic is not used
  360. * Thoes variables are declared as union, thus using those
  361. * are exclusive. In a similar way, ref->next is temporarily
  362. * used to chain all xattr_ref object. It's re-chained to
  363. * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
  364. */
  365. ref->ino = je32_to_cpu(rr->ino);
  366. ref->xid = je32_to_cpu(rr->xid);
  367. ref->xseqno = je32_to_cpu(rr->xseqno);
  368. if (ref->xseqno > c->highest_xseqno)
  369. c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
  370. ref->next = c->xref_temp;
  371. c->xref_temp = ref;
  372. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
  373. if (jffs2_sum_active())
  374. jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
  375. dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
  376. ofs, ref->xid, ref->ino);
  377. return 0;
  378. }
  379. #endif
  380. /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
  381. the flash, XIP-style */
  382. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  383. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
  384. struct jffs2_unknown_node *node;
  385. struct jffs2_unknown_node crcnode;
  386. uint32_t ofs, prevofs;
  387. uint32_t hdr_crc, buf_ofs, buf_len;
  388. int err;
  389. int noise = 0;
  390. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  391. int cleanmarkerfound = 0;
  392. #endif
  393. ofs = jeb->offset;
  394. prevofs = jeb->offset - 1;
  395. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  396. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  397. if (jffs2_cleanmarker_oob(c)) {
  398. int ret;
  399. if (c->mtd->block_isbad(c->mtd, jeb->offset))
  400. return BLK_STATE_BADBLOCK;
  401. ret = jffs2_check_nand_cleanmarker(c, jeb);
  402. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  403. /* Even if it's not found, we still scan to see
  404. if the block is empty. We use this information
  405. to decide whether to erase it or not. */
  406. switch (ret) {
  407. case 0: cleanmarkerfound = 1; break;
  408. case 1: break;
  409. default: return ret;
  410. }
  411. }
  412. #endif
  413. if (jffs2_sum_active()) {
  414. struct jffs2_sum_marker *sm;
  415. void *sumptr = NULL;
  416. uint32_t sumlen;
  417. if (!buf_size) {
  418. /* XIP case. Just look, point at the summary if it's there */
  419. sm = (void *)buf + c->sector_size - sizeof(*sm);
  420. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  421. sumptr = buf + je32_to_cpu(sm->offset);
  422. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  423. }
  424. } else {
  425. /* If NAND flash, read a whole page of it. Else just the end */
  426. if (c->wbuf_pagesize)
  427. buf_len = c->wbuf_pagesize;
  428. else
  429. buf_len = sizeof(*sm);
  430. /* Read as much as we want into the _end_ of the preallocated buffer */
  431. err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
  432. jeb->offset + c->sector_size - buf_len,
  433. buf_len);
  434. if (err)
  435. return err;
  436. sm = (void *)buf + buf_size - sizeof(*sm);
  437. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  438. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  439. sumptr = buf + buf_size - sumlen;
  440. /* Now, make sure the summary itself is available */
  441. if (sumlen > buf_size) {
  442. /* Need to kmalloc for this. */
  443. sumptr = kmalloc(sumlen, GFP_KERNEL);
  444. if (!sumptr)
  445. return -ENOMEM;
  446. memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
  447. }
  448. if (buf_len < sumlen) {
  449. /* Need to read more so that the entire summary node is present */
  450. err = jffs2_fill_scan_buf(c, sumptr,
  451. jeb->offset + c->sector_size - sumlen,
  452. sumlen - buf_len);
  453. if (err)
  454. return err;
  455. }
  456. }
  457. }
  458. if (sumptr) {
  459. err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
  460. if (buf_size && sumlen > buf_size)
  461. kfree(sumptr);
  462. /* If it returns with a real error, bail.
  463. If it returns positive, that's a block classification
  464. (i.e. BLK_STATE_xxx) so return that too.
  465. If it returns zero, fall through to full scan. */
  466. if (err)
  467. return err;
  468. }
  469. }
  470. buf_ofs = jeb->offset;
  471. if (!buf_size) {
  472. /* This is the XIP case -- we're reading _directly_ from the flash chip */
  473. buf_len = c->sector_size;
  474. } else {
  475. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  476. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  477. if (err)
  478. return err;
  479. }
  480. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  481. ofs = 0;
  482. /* Scan only 4KiB of 0xFF before declaring it's empty */
  483. while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  484. ofs += 4;
  485. if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
  486. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  487. if (jffs2_cleanmarker_oob(c)) {
  488. /* scan oob, take care of cleanmarker */
  489. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  490. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  491. switch (ret) {
  492. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  493. case 1: return BLK_STATE_ALLDIRTY;
  494. default: return ret;
  495. }
  496. }
  497. #endif
  498. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  499. if (c->cleanmarker_size == 0)
  500. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  501. else
  502. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  503. }
  504. if (ofs) {
  505. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  506. jeb->offset + ofs));
  507. if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  508. return err;
  509. if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
  510. return err;
  511. }
  512. /* Now ofs is a complete physical flash offset as it always was... */
  513. ofs += jeb->offset;
  514. noise = 10;
  515. dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
  516. scan_more:
  517. while(ofs < jeb->offset + c->sector_size) {
  518. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  519. /* Make sure there are node refs available for use */
  520. err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
  521. if (err)
  522. return err;
  523. cond_resched();
  524. if (ofs & 3) {
  525. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  526. ofs = PAD(ofs);
  527. continue;
  528. }
  529. if (ofs == prevofs) {
  530. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  531. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  532. return err;
  533. ofs += 4;
  534. continue;
  535. }
  536. prevofs = ofs;
  537. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  538. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  539. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  540. if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
  541. return err;
  542. break;
  543. }
  544. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  545. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  546. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  547. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  548. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  549. if (err)
  550. return err;
  551. buf_ofs = ofs;
  552. }
  553. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  554. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  555. uint32_t inbuf_ofs;
  556. uint32_t empty_start, scan_end;
  557. empty_start = ofs;
  558. ofs += 4;
  559. scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
  560. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  561. more_empty:
  562. inbuf_ofs = ofs - buf_ofs;
  563. while (inbuf_ofs < scan_end) {
  564. if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
  565. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  566. empty_start, ofs);
  567. if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
  568. return err;
  569. goto scan_more;
  570. }
  571. inbuf_ofs+=4;
  572. ofs += 4;
  573. }
  574. /* Ran off end. */
  575. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  576. /* If we're only checking the beginning of a block with a cleanmarker,
  577. bail now */
  578. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  579. c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
  580. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
  581. return BLK_STATE_CLEANMARKER;
  582. }
  583. if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
  584. scan_end = buf_len;
  585. goto more_empty;
  586. }
  587. /* See how much more there is to read in this eraseblock... */
  588. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  589. if (!buf_len) {
  590. /* No more to read. Break out of main loop without marking
  591. this range of empty space as dirty (because it's not) */
  592. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  593. empty_start));
  594. break;
  595. }
  596. /* point never reaches here */
  597. scan_end = buf_len;
  598. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  599. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  600. if (err)
  601. return err;
  602. buf_ofs = ofs;
  603. goto more_empty;
  604. }
  605. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  606. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  607. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  608. return err;
  609. ofs += 4;
  610. continue;
  611. }
  612. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  613. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  614. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  615. return err;
  616. ofs += 4;
  617. continue;
  618. }
  619. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  620. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  621. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  622. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  623. return err;
  624. ofs += 4;
  625. continue;
  626. }
  627. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  628. /* OK. We're out of possibilities. Whinge and move on */
  629. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  630. JFFS2_MAGIC_BITMASK, ofs,
  631. je16_to_cpu(node->magic));
  632. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  633. return err;
  634. ofs += 4;
  635. continue;
  636. }
  637. /* We seem to have a node of sorts. Check the CRC */
  638. crcnode.magic = node->magic;
  639. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  640. crcnode.totlen = node->totlen;
  641. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  642. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  643. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  644. ofs, je16_to_cpu(node->magic),
  645. je16_to_cpu(node->nodetype),
  646. je32_to_cpu(node->totlen),
  647. je32_to_cpu(node->hdr_crc),
  648. hdr_crc);
  649. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  650. return err;
  651. ofs += 4;
  652. continue;
  653. }
  654. if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
  655. /* Eep. Node goes over the end of the erase block. */
  656. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  657. ofs, je32_to_cpu(node->totlen));
  658. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  659. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  660. return err;
  661. ofs += 4;
  662. continue;
  663. }
  664. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  665. /* Wheee. This is an obsoleted node */
  666. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  667. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  668. return err;
  669. ofs += PAD(je32_to_cpu(node->totlen));
  670. continue;
  671. }
  672. switch(je16_to_cpu(node->nodetype)) {
  673. case JFFS2_NODETYPE_INODE:
  674. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  675. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  676. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  677. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  678. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  679. if (err)
  680. return err;
  681. buf_ofs = ofs;
  682. node = (void *)buf;
  683. }
  684. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
  685. if (err) return err;
  686. ofs += PAD(je32_to_cpu(node->totlen));
  687. break;
  688. case JFFS2_NODETYPE_DIRENT:
  689. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  690. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  691. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  692. je32_to_cpu(node->totlen), buf_len, ofs));
  693. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  694. if (err)
  695. return err;
  696. buf_ofs = ofs;
  697. node = (void *)buf;
  698. }
  699. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
  700. if (err) return err;
  701. ofs += PAD(je32_to_cpu(node->totlen));
  702. break;
  703. #ifdef CONFIG_JFFS2_FS_XATTR
  704. case JFFS2_NODETYPE_XATTR:
  705. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  706. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  707. D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
  708. " left to end of buf. Reading 0x%x at 0x%08x\n",
  709. je32_to_cpu(node->totlen), buf_len, ofs));
  710. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  711. if (err)
  712. return err;
  713. buf_ofs = ofs;
  714. node = (void *)buf;
  715. }
  716. err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
  717. if (err)
  718. return err;
  719. ofs += PAD(je32_to_cpu(node->totlen));
  720. break;
  721. case JFFS2_NODETYPE_XREF:
  722. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  723. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  724. D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
  725. " left to end of buf. Reading 0x%x at 0x%08x\n",
  726. je32_to_cpu(node->totlen), buf_len, ofs));
  727. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  728. if (err)
  729. return err;
  730. buf_ofs = ofs;
  731. node = (void *)buf;
  732. }
  733. err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
  734. if (err)
  735. return err;
  736. ofs += PAD(je32_to_cpu(node->totlen));
  737. break;
  738. #endif /* CONFIG_JFFS2_FS_XATTR */
  739. case JFFS2_NODETYPE_CLEANMARKER:
  740. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  741. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  742. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  743. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  744. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  745. return err;
  746. ofs += PAD(sizeof(struct jffs2_unknown_node));
  747. } else if (jeb->first_node) {
  748. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  749. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  750. return err;
  751. ofs += PAD(sizeof(struct jffs2_unknown_node));
  752. } else {
  753. jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
  754. ofs += PAD(c->cleanmarker_size);
  755. }
  756. break;
  757. case JFFS2_NODETYPE_PADDING:
  758. if (jffs2_sum_active())
  759. jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
  760. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  761. return err;
  762. ofs += PAD(je32_to_cpu(node->totlen));
  763. break;
  764. default:
  765. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  766. case JFFS2_FEATURE_ROCOMPAT:
  767. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  768. c->flags |= JFFS2_SB_FLAG_RO;
  769. if (!(jffs2_is_readonly(c)))
  770. return -EROFS;
  771. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  772. return err;
  773. ofs += PAD(je32_to_cpu(node->totlen));
  774. break;
  775. case JFFS2_FEATURE_INCOMPAT:
  776. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  777. return -EINVAL;
  778. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  779. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  780. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  781. return err;
  782. ofs += PAD(je32_to_cpu(node->totlen));
  783. break;
  784. case JFFS2_FEATURE_RWCOMPAT_COPY: {
  785. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  786. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
  787. /* We can't summarise nodes we don't grok */
  788. jffs2_sum_disable_collecting(s);
  789. ofs += PAD(je32_to_cpu(node->totlen));
  790. break;
  791. }
  792. }
  793. }
  794. }
  795. if (jffs2_sum_active()) {
  796. if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
  797. dbg_summary("There is not enough space for "
  798. "summary information, disabling for this jeb!\n");
  799. jffs2_sum_disable_collecting(s);
  800. }
  801. }
  802. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
  803. jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
  804. /* mark_node_obsolete can add to wasted !! */
  805. if (jeb->wasted_size) {
  806. jeb->dirty_size += jeb->wasted_size;
  807. c->dirty_size += jeb->wasted_size;
  808. c->wasted_size -= jeb->wasted_size;
  809. jeb->wasted_size = 0;
  810. }
  811. return jffs2_scan_classify_jeb(c, jeb);
  812. }
  813. struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  814. {
  815. struct jffs2_inode_cache *ic;
  816. ic = jffs2_get_ino_cache(c, ino);
  817. if (ic)
  818. return ic;
  819. if (ino > c->highest_ino)
  820. c->highest_ino = ino;
  821. ic = jffs2_alloc_inode_cache();
  822. if (!ic) {
  823. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  824. return NULL;
  825. }
  826. memset(ic, 0, sizeof(*ic));
  827. ic->ino = ino;
  828. ic->nodes = (void *)ic;
  829. jffs2_add_ino_cache(c, ic);
  830. if (ino == 1)
  831. ic->pino_nlink = 1;
  832. return ic;
  833. }
  834. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  835. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
  836. {
  837. struct jffs2_inode_cache *ic;
  838. uint32_t crc, ino = je32_to_cpu(ri->ino);
  839. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  840. /* We do very little here now. Just check the ino# to which we should attribute
  841. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  842. we used to scan the flash once only, reading everything we want from it into
  843. memory, then building all our in-core data structures and freeing the extra
  844. information. Now we allow the first part of the mount to complete a lot quicker,
  845. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  846. Which means that the _full_ amount of time to get to proper write mode with GC
  847. operational may actually be _longer_ than before. Sucks to be me. */
  848. /* Check the node CRC in any case. */
  849. crc = crc32(0, ri, sizeof(*ri)-8);
  850. if (crc != je32_to_cpu(ri->node_crc)) {
  851. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on "
  852. "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  853. ofs, je32_to_cpu(ri->node_crc), crc);
  854. /*
  855. * We believe totlen because the CRC on the node
  856. * _header_ was OK, just the node itself failed.
  857. */
  858. return jffs2_scan_dirty_space(c, jeb,
  859. PAD(je32_to_cpu(ri->totlen)));
  860. }
  861. ic = jffs2_get_ino_cache(c, ino);
  862. if (!ic) {
  863. ic = jffs2_scan_make_ino_cache(c, ino);
  864. if (!ic)
  865. return -ENOMEM;
  866. }
  867. /* Wheee. It worked */
  868. jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
  869. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  870. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  871. je32_to_cpu(ri->offset),
  872. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  873. pseudo_random += je32_to_cpu(ri->version);
  874. if (jffs2_sum_active()) {
  875. jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
  876. }
  877. return 0;
  878. }
  879. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  880. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
  881. {
  882. struct jffs2_full_dirent *fd;
  883. struct jffs2_inode_cache *ic;
  884. uint32_t checkedlen;
  885. uint32_t crc;
  886. int err;
  887. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  888. /* We don't get here unless the node is still valid, so we don't have to
  889. mask in the ACCURATE bit any more. */
  890. crc = crc32(0, rd, sizeof(*rd)-8);
  891. if (crc != je32_to_cpu(rd->node_crc)) {
  892. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  893. ofs, je32_to_cpu(rd->node_crc), crc);
  894. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  895. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  896. return err;
  897. return 0;
  898. }
  899. pseudo_random += je32_to_cpu(rd->version);
  900. /* Should never happen. Did. (OLPC trac #4184)*/
  901. checkedlen = strnlen(rd->name, rd->nsize);
  902. if (checkedlen < rd->nsize) {
  903. printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n",
  904. ofs, checkedlen);
  905. }
  906. fd = jffs2_alloc_full_dirent(checkedlen+1);
  907. if (!fd) {
  908. return -ENOMEM;
  909. }
  910. memcpy(&fd->name, rd->name, checkedlen);
  911. fd->name[checkedlen] = 0;
  912. crc = crc32(0, fd->name, rd->nsize);
  913. if (crc != je32_to_cpu(rd->name_crc)) {
  914. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  915. ofs, je32_to_cpu(rd->name_crc), crc);
  916. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  917. jffs2_free_full_dirent(fd);
  918. /* FIXME: Why do we believe totlen? */
  919. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  920. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  921. return err;
  922. return 0;
  923. }
  924. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  925. if (!ic) {
  926. jffs2_free_full_dirent(fd);
  927. return -ENOMEM;
  928. }
  929. fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
  930. PAD(je32_to_cpu(rd->totlen)), ic);
  931. fd->next = NULL;
  932. fd->version = je32_to_cpu(rd->version);
  933. fd->ino = je32_to_cpu(rd->ino);
  934. fd->nhash = full_name_hash(fd->name, checkedlen);
  935. fd->type = rd->type;
  936. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  937. if (jffs2_sum_active()) {
  938. jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
  939. }
  940. return 0;
  941. }
  942. static int count_list(struct list_head *l)
  943. {
  944. uint32_t count = 0;
  945. struct list_head *tmp;
  946. list_for_each(tmp, l) {
  947. count++;
  948. }
  949. return count;
  950. }
  951. /* Note: This breaks if list_empty(head). I don't care. You
  952. might, if you copy this code and use it elsewhere :) */
  953. static void rotate_list(struct list_head *head, uint32_t count)
  954. {
  955. struct list_head *n = head->next;
  956. list_del(head);
  957. while(count--) {
  958. n = n->next;
  959. }
  960. list_add(head, n);
  961. }
  962. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  963. {
  964. uint32_t x;
  965. uint32_t rotateby;
  966. x = count_list(&c->clean_list);
  967. if (x) {
  968. rotateby = pseudo_random % x;
  969. rotate_list((&c->clean_list), rotateby);
  970. }
  971. x = count_list(&c->very_dirty_list);
  972. if (x) {
  973. rotateby = pseudo_random % x;
  974. rotate_list((&c->very_dirty_list), rotateby);
  975. }
  976. x = count_list(&c->dirty_list);
  977. if (x) {
  978. rotateby = pseudo_random % x;
  979. rotate_list((&c->dirty_list), rotateby);
  980. }
  981. x = count_list(&c->erasable_list);
  982. if (x) {
  983. rotateby = pseudo_random % x;
  984. rotate_list((&c->erasable_list), rotateby);
  985. }
  986. if (c->nr_erasing_blocks) {
  987. rotateby = pseudo_random % c->nr_erasing_blocks;
  988. rotate_list((&c->erase_pending_list), rotateby);
  989. }
  990. if (c->nr_free_blocks) {
  991. rotateby = pseudo_random % c->nr_free_blocks;
  992. rotate_list((&c->free_list), rotateby);
  993. }
  994. }