scan.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright © 2001-2007 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/crc32.h>
  18. #include <linux/compiler.h>
  19. #include "nodelist.h"
  20. #include "summary.h"
  21. #include "debug.h"
  22. #define DEFAULT_EMPTY_SCAN_SIZE 256
  23. #define noisy_printk(noise, fmt, ...) \
  24. do { \
  25. if (*(noise)) { \
  26. pr_notice(fmt, ##__VA_ARGS__); \
  27. (*(noise))--; \
  28. if (!(*(noise))) \
  29. pr_notice("Further such events for this erase block will not be printed\n"); \
  30. } \
  31. } while (0)
  32. static uint32_t pseudo_random;
  33. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  34. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
  35. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  36. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  37. * as dirty.
  38. */
  39. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  40. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
  41. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  42. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
  43. static inline int min_free(struct jffs2_sb_info *c)
  44. {
  45. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  46. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  47. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  48. return c->wbuf_pagesize;
  49. #endif
  50. return min;
  51. }
  52. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  53. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  54. return sector_size;
  55. else
  56. return DEFAULT_EMPTY_SCAN_SIZE;
  57. }
  58. static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  59. {
  60. int ret;
  61. if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  62. return ret;
  63. if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
  64. return ret;
  65. /* Turned wasted size into dirty, since we apparently
  66. think it's recoverable now. */
  67. jeb->dirty_size += jeb->wasted_size;
  68. c->dirty_size += jeb->wasted_size;
  69. c->wasted_size -= jeb->wasted_size;
  70. jeb->wasted_size = 0;
  71. if (VERYDIRTY(c, jeb->dirty_size)) {
  72. list_add(&jeb->list, &c->very_dirty_list);
  73. } else {
  74. list_add(&jeb->list, &c->dirty_list);
  75. }
  76. return 0;
  77. }
  78. int jffs2_scan_medium(struct jffs2_sb_info *c)
  79. {
  80. int i, ret;
  81. uint32_t empty_blocks = 0, bad_blocks = 0;
  82. unsigned char *flashbuf = NULL;
  83. uint32_t buf_size = 0;
  84. struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
  85. #ifndef __ECOS
  86. size_t pointlen, try_size;
  87. ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
  88. (void **)&flashbuf, NULL);
  89. if (!ret && pointlen < c->mtd->size) {
  90. /* Don't muck about if it won't let us point to the whole flash */
  91. jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
  92. pointlen);
  93. mtd_unpoint(c->mtd, 0, pointlen);
  94. flashbuf = NULL;
  95. }
  96. if (ret && ret != -EOPNOTSUPP)
  97. jffs2_dbg(1, "MTD point failed %d\n", ret);
  98. #endif
  99. if (!flashbuf) {
  100. /* For NAND it's quicker to read a whole eraseblock at a time,
  101. apparently */
  102. if (jffs2_cleanmarker_oob(c))
  103. try_size = c->sector_size;
  104. else
  105. try_size = PAGE_SIZE;
  106. jffs2_dbg(1, "Trying to allocate readbuf of %zu "
  107. "bytes\n", try_size);
  108. flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
  109. if (!flashbuf)
  110. return -ENOMEM;
  111. jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
  112. try_size);
  113. buf_size = (uint32_t)try_size;
  114. }
  115. if (jffs2_sum_active()) {
  116. s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
  117. if (!s) {
  118. JFFS2_WARNING("Can't allocate memory for summary\n");
  119. ret = -ENOMEM;
  120. goto out;
  121. }
  122. }
  123. for (i=0; i<c->nr_blocks; i++) {
  124. struct jffs2_eraseblock *jeb = &c->blocks[i];
  125. cond_resched();
  126. /* reset summary info for next eraseblock scan */
  127. jffs2_sum_reset_collected(s);
  128. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
  129. buf_size, s);
  130. if (ret < 0)
  131. goto out;
  132. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  133. /* Now decide which list to put it on */
  134. switch(ret) {
  135. case BLK_STATE_ALLFF:
  136. /*
  137. * Empty block. Since we can't be sure it
  138. * was entirely erased, we just queue it for erase
  139. * again. It will be marked as such when the erase
  140. * is complete. Meanwhile we still count it as empty
  141. * for later checks.
  142. */
  143. empty_blocks++;
  144. list_add(&jeb->list, &c->erase_pending_list);
  145. c->nr_erasing_blocks++;
  146. break;
  147. case BLK_STATE_CLEANMARKER:
  148. /* Only a CLEANMARKER node is valid */
  149. if (!jeb->dirty_size) {
  150. /* It's actually free */
  151. list_add(&jeb->list, &c->free_list);
  152. c->nr_free_blocks++;
  153. } else {
  154. /* Dirt */
  155. jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
  156. jeb->offset);
  157. list_add(&jeb->list, &c->erase_pending_list);
  158. c->nr_erasing_blocks++;
  159. }
  160. break;
  161. case BLK_STATE_CLEAN:
  162. /* Full (or almost full) of clean data. Clean list */
  163. list_add(&jeb->list, &c->clean_list);
  164. break;
  165. case BLK_STATE_PARTDIRTY:
  166. /* Some data, but not full. Dirty list. */
  167. /* We want to remember the block with most free space
  168. and stick it in the 'nextblock' position to start writing to it. */
  169. if (jeb->free_size > min_free(c) &&
  170. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  171. /* Better candidate for the next writes to go to */
  172. if (c->nextblock) {
  173. ret = file_dirty(c, c->nextblock);
  174. if (ret)
  175. goto out;
  176. /* deleting summary information of the old nextblock */
  177. jffs2_sum_reset_collected(c->summary);
  178. }
  179. /* update collected summary information for the current nextblock */
  180. jffs2_sum_move_collected(c, s);
  181. jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
  182. __func__, jeb->offset);
  183. c->nextblock = jeb;
  184. } else {
  185. ret = file_dirty(c, jeb);
  186. if (ret)
  187. goto out;
  188. }
  189. break;
  190. case BLK_STATE_ALLDIRTY:
  191. /* Nothing valid - not even a clean marker. Needs erasing. */
  192. /* For now we just put it on the erasing list. We'll start the erases later */
  193. jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
  194. jeb->offset);
  195. list_add(&jeb->list, &c->erase_pending_list);
  196. c->nr_erasing_blocks++;
  197. break;
  198. case BLK_STATE_BADBLOCK:
  199. jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
  200. list_add(&jeb->list, &c->bad_list);
  201. c->bad_size += c->sector_size;
  202. c->free_size -= c->sector_size;
  203. bad_blocks++;
  204. break;
  205. default:
  206. pr_warn("%s(): unknown block state\n", __func__);
  207. BUG();
  208. }
  209. }
  210. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  211. if (c->nextblock && (c->nextblock->dirty_size)) {
  212. c->nextblock->wasted_size += c->nextblock->dirty_size;
  213. c->wasted_size += c->nextblock->dirty_size;
  214. c->dirty_size -= c->nextblock->dirty_size;
  215. c->nextblock->dirty_size = 0;
  216. }
  217. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  218. if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
  219. /* If we're going to start writing into a block which already
  220. contains data, and the end of the data isn't page-aligned,
  221. skip a little and align it. */
  222. uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
  223. jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
  224. __func__, skip);
  225. jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
  226. jffs2_scan_dirty_space(c, c->nextblock, skip);
  227. }
  228. #endif
  229. if (c->nr_erasing_blocks) {
  230. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  231. pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  232. pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
  233. empty_blocks, bad_blocks, c->nr_blocks);
  234. ret = -EIO;
  235. goto out;
  236. }
  237. spin_lock(&c->erase_completion_lock);
  238. jffs2_garbage_collect_trigger(c);
  239. spin_unlock(&c->erase_completion_lock);
  240. }
  241. ret = 0;
  242. out:
  243. if (buf_size)
  244. kfree(flashbuf);
  245. #ifndef __ECOS
  246. else
  247. mtd_unpoint(c->mtd, 0, c->mtd->size);
  248. #endif
  249. kfree(s);
  250. return ret;
  251. }
  252. static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
  253. uint32_t ofs, uint32_t len)
  254. {
  255. int ret;
  256. size_t retlen;
  257. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  258. if (ret) {
  259. jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
  260. len, ofs, ret);
  261. return ret;
  262. }
  263. if (retlen < len) {
  264. jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
  265. ofs, retlen);
  266. return -EIO;
  267. }
  268. return 0;
  269. }
  270. int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  271. {
  272. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  273. && (!jeb->first_node || !ref_next(jeb->first_node)) )
  274. return BLK_STATE_CLEANMARKER;
  275. /* move blocks with max 4 byte dirty space to cleanlist */
  276. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  277. c->dirty_size -= jeb->dirty_size;
  278. c->wasted_size += jeb->dirty_size;
  279. jeb->wasted_size += jeb->dirty_size;
  280. jeb->dirty_size = 0;
  281. return BLK_STATE_CLEAN;
  282. } else if (jeb->used_size || jeb->unchecked_size)
  283. return BLK_STATE_PARTDIRTY;
  284. else
  285. return BLK_STATE_ALLDIRTY;
  286. }
  287. #ifdef CONFIG_JFFS2_FS_XATTR
  288. static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  289. struct jffs2_raw_xattr *rx, uint32_t ofs,
  290. struct jffs2_summary *s)
  291. {
  292. struct jffs2_xattr_datum *xd;
  293. uint32_t xid, version, totlen, crc;
  294. int err;
  295. crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
  296. if (crc != je32_to_cpu(rx->node_crc)) {
  297. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  298. ofs, je32_to_cpu(rx->node_crc), crc);
  299. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  300. return err;
  301. return 0;
  302. }
  303. xid = je32_to_cpu(rx->xid);
  304. version = je32_to_cpu(rx->version);
  305. totlen = PAD(sizeof(struct jffs2_raw_xattr)
  306. + rx->name_len + 1 + je16_to_cpu(rx->value_len));
  307. if (totlen != je32_to_cpu(rx->totlen)) {
  308. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  309. ofs, je32_to_cpu(rx->totlen), totlen);
  310. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  311. return err;
  312. return 0;
  313. }
  314. xd = jffs2_setup_xattr_datum(c, xid, version);
  315. if (IS_ERR(xd))
  316. return PTR_ERR(xd);
  317. if (xd->version > version) {
  318. struct jffs2_raw_node_ref *raw
  319. = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
  320. raw->next_in_ino = xd->node->next_in_ino;
  321. xd->node->next_in_ino = raw;
  322. } else {
  323. xd->version = version;
  324. xd->xprefix = rx->xprefix;
  325. xd->name_len = rx->name_len;
  326. xd->value_len = je16_to_cpu(rx->value_len);
  327. xd->data_crc = je32_to_cpu(rx->data_crc);
  328. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
  329. }
  330. if (jffs2_sum_active())
  331. jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
  332. dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
  333. ofs, xd->xid, xd->version);
  334. return 0;
  335. }
  336. static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  337. struct jffs2_raw_xref *rr, uint32_t ofs,
  338. struct jffs2_summary *s)
  339. {
  340. struct jffs2_xattr_ref *ref;
  341. uint32_t crc;
  342. int err;
  343. crc = crc32(0, rr, sizeof(*rr) - 4);
  344. if (crc != je32_to_cpu(rr->node_crc)) {
  345. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  346. ofs, je32_to_cpu(rr->node_crc), crc);
  347. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
  348. return err;
  349. return 0;
  350. }
  351. if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
  352. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
  353. ofs, je32_to_cpu(rr->totlen),
  354. PAD(sizeof(struct jffs2_raw_xref)));
  355. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
  356. return err;
  357. return 0;
  358. }
  359. ref = jffs2_alloc_xattr_ref();
  360. if (!ref)
  361. return -ENOMEM;
  362. /* BEFORE jffs2_build_xattr_subsystem() called,
  363. * and AFTER xattr_ref is marked as a dead xref,
  364. * ref->xid is used to store 32bit xid, xd is not used
  365. * ref->ino is used to store 32bit inode-number, ic is not used
  366. * Thoes variables are declared as union, thus using those
  367. * are exclusive. In a similar way, ref->next is temporarily
  368. * used to chain all xattr_ref object. It's re-chained to
  369. * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
  370. */
  371. ref->ino = je32_to_cpu(rr->ino);
  372. ref->xid = je32_to_cpu(rr->xid);
  373. ref->xseqno = je32_to_cpu(rr->xseqno);
  374. if (ref->xseqno > c->highest_xseqno)
  375. c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
  376. ref->next = c->xref_temp;
  377. c->xref_temp = ref;
  378. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
  379. if (jffs2_sum_active())
  380. jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
  381. dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
  382. ofs, ref->xid, ref->ino);
  383. return 0;
  384. }
  385. #endif
  386. /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
  387. the flash, XIP-style */
  388. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  389. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
  390. struct jffs2_unknown_node *node;
  391. struct jffs2_unknown_node crcnode;
  392. uint32_t ofs, prevofs, max_ofs;
  393. uint32_t hdr_crc, buf_ofs, buf_len;
  394. int err;
  395. int noise = 0;
  396. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  397. int cleanmarkerfound = 0;
  398. #endif
  399. ofs = jeb->offset;
  400. prevofs = jeb->offset - 1;
  401. jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
  402. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  403. if (jffs2_cleanmarker_oob(c)) {
  404. int ret;
  405. if (mtd_block_isbad(c->mtd, jeb->offset))
  406. return BLK_STATE_BADBLOCK;
  407. ret = jffs2_check_nand_cleanmarker(c, jeb);
  408. jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
  409. /* Even if it's not found, we still scan to see
  410. if the block is empty. We use this information
  411. to decide whether to erase it or not. */
  412. switch (ret) {
  413. case 0: cleanmarkerfound = 1; break;
  414. case 1: break;
  415. default: return ret;
  416. }
  417. }
  418. #endif
  419. if (jffs2_sum_active()) {
  420. struct jffs2_sum_marker *sm;
  421. void *sumptr = NULL;
  422. uint32_t sumlen;
  423. if (!buf_size) {
  424. /* XIP case. Just look, point at the summary if it's there */
  425. sm = (void *)buf + c->sector_size - sizeof(*sm);
  426. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  427. sumptr = buf + je32_to_cpu(sm->offset);
  428. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  429. }
  430. } else {
  431. /* If NAND flash, read a whole page of it. Else just the end */
  432. if (c->wbuf_pagesize)
  433. buf_len = c->wbuf_pagesize;
  434. else
  435. buf_len = sizeof(*sm);
  436. /* Read as much as we want into the _end_ of the preallocated buffer */
  437. err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
  438. jeb->offset + c->sector_size - buf_len,
  439. buf_len);
  440. if (err)
  441. return err;
  442. sm = (void *)buf + buf_size - sizeof(*sm);
  443. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  444. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  445. sumptr = buf + buf_size - sumlen;
  446. /* Now, make sure the summary itself is available */
  447. if (sumlen > buf_size) {
  448. /* Need to kmalloc for this. */
  449. sumptr = kmalloc(sumlen, GFP_KERNEL);
  450. if (!sumptr)
  451. return -ENOMEM;
  452. memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
  453. }
  454. if (buf_len < sumlen) {
  455. /* Need to read more so that the entire summary node is present */
  456. err = jffs2_fill_scan_buf(c, sumptr,
  457. jeb->offset + c->sector_size - sumlen,
  458. sumlen - buf_len);
  459. if (err)
  460. return err;
  461. }
  462. }
  463. }
  464. if (sumptr) {
  465. err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
  466. if (buf_size && sumlen > buf_size)
  467. kfree(sumptr);
  468. /* If it returns with a real error, bail.
  469. If it returns positive, that's a block classification
  470. (i.e. BLK_STATE_xxx) so return that too.
  471. If it returns zero, fall through to full scan. */
  472. if (err)
  473. return err;
  474. }
  475. }
  476. buf_ofs = jeb->offset;
  477. if (!buf_size) {
  478. /* This is the XIP case -- we're reading _directly_ from the flash chip */
  479. buf_len = c->sector_size;
  480. } else {
  481. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  482. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  483. if (err)
  484. return err;
  485. }
  486. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  487. ofs = 0;
  488. max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
  489. /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */
  490. while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  491. ofs += 4;
  492. if (ofs == max_ofs) {
  493. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  494. if (jffs2_cleanmarker_oob(c)) {
  495. /* scan oob, take care of cleanmarker */
  496. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  497. jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
  498. ret);
  499. switch (ret) {
  500. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  501. case 1: return BLK_STATE_ALLDIRTY;
  502. default: return ret;
  503. }
  504. }
  505. #endif
  506. jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
  507. jeb->offset);
  508. if (c->cleanmarker_size == 0)
  509. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  510. else
  511. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  512. }
  513. if (ofs) {
  514. jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
  515. jeb->offset + ofs);
  516. if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  517. return err;
  518. if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
  519. return err;
  520. }
  521. /* Now ofs is a complete physical flash offset as it always was... */
  522. ofs += jeb->offset;
  523. noise = 10;
  524. dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
  525. scan_more:
  526. while(ofs < jeb->offset + c->sector_size) {
  527. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  528. /* Make sure there are node refs available for use */
  529. err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
  530. if (err)
  531. return err;
  532. cond_resched();
  533. if (ofs & 3) {
  534. pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
  535. ofs = PAD(ofs);
  536. continue;
  537. }
  538. if (ofs == prevofs) {
  539. pr_warn("ofs 0x%08x has already been seen. Skipping\n",
  540. ofs);
  541. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  542. return err;
  543. ofs += 4;
  544. continue;
  545. }
  546. prevofs = ofs;
  547. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  548. jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
  549. sizeof(struct jffs2_unknown_node),
  550. jeb->offset, c->sector_size, ofs,
  551. sizeof(*node));
  552. if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
  553. return err;
  554. break;
  555. }
  556. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  557. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  558. jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  559. sizeof(struct jffs2_unknown_node),
  560. buf_len, ofs);
  561. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  562. if (err)
  563. return err;
  564. buf_ofs = ofs;
  565. }
  566. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  567. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  568. uint32_t inbuf_ofs;
  569. uint32_t empty_start, scan_end;
  570. empty_start = ofs;
  571. ofs += 4;
  572. scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
  573. jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
  574. more_empty:
  575. inbuf_ofs = ofs - buf_ofs;
  576. while (inbuf_ofs < scan_end) {
  577. if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
  578. pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
  579. empty_start, ofs);
  580. if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
  581. return err;
  582. goto scan_more;
  583. }
  584. inbuf_ofs+=4;
  585. ofs += 4;
  586. }
  587. /* Ran off end. */
  588. jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
  589. ofs);
  590. /* If we're only checking the beginning of a block with a cleanmarker,
  591. bail now */
  592. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  593. c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
  594. jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
  595. EMPTY_SCAN_SIZE(c->sector_size));
  596. return BLK_STATE_CLEANMARKER;
  597. }
  598. if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
  599. scan_end = buf_len;
  600. goto more_empty;
  601. }
  602. /* See how much more there is to read in this eraseblock... */
  603. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  604. if (!buf_len) {
  605. /* No more to read. Break out of main loop without marking
  606. this range of empty space as dirty (because it's not) */
  607. jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
  608. empty_start);
  609. break;
  610. }
  611. /* point never reaches here */
  612. scan_end = buf_len;
  613. jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
  614. buf_len, ofs);
  615. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  616. if (err)
  617. return err;
  618. buf_ofs = ofs;
  619. goto more_empty;
  620. }
  621. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  622. pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
  623. ofs);
  624. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  625. return err;
  626. ofs += 4;
  627. continue;
  628. }
  629. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  630. jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
  631. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  632. return err;
  633. ofs += 4;
  634. continue;
  635. }
  636. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  637. pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
  638. pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
  639. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  640. return err;
  641. ofs += 4;
  642. continue;
  643. }
  644. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  645. /* OK. We're out of possibilities. Whinge and move on */
  646. noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  647. __func__,
  648. JFFS2_MAGIC_BITMASK, ofs,
  649. je16_to_cpu(node->magic));
  650. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  651. return err;
  652. ofs += 4;
  653. continue;
  654. }
  655. /* We seem to have a node of sorts. Check the CRC */
  656. crcnode.magic = node->magic;
  657. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  658. crcnode.totlen = node->totlen;
  659. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  660. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  661. noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  662. __func__,
  663. ofs, je16_to_cpu(node->magic),
  664. je16_to_cpu(node->nodetype),
  665. je32_to_cpu(node->totlen),
  666. je32_to_cpu(node->hdr_crc),
  667. hdr_crc);
  668. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  669. return err;
  670. ofs += 4;
  671. continue;
  672. }
  673. if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
  674. /* Eep. Node goes over the end of the erase block. */
  675. pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  676. ofs, je32_to_cpu(node->totlen));
  677. pr_warn("Perhaps the file system was created with the wrong erase size?\n");
  678. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  679. return err;
  680. ofs += 4;
  681. continue;
  682. }
  683. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  684. /* Wheee. This is an obsoleted node */
  685. jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
  686. ofs);
  687. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  688. return err;
  689. ofs += PAD(je32_to_cpu(node->totlen));
  690. continue;
  691. }
  692. switch(je16_to_cpu(node->nodetype)) {
  693. case JFFS2_NODETYPE_INODE:
  694. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  695. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  696. jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  697. sizeof(struct jffs2_raw_inode),
  698. buf_len, ofs);
  699. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  700. if (err)
  701. return err;
  702. buf_ofs = ofs;
  703. node = (void *)buf;
  704. }
  705. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
  706. if (err) return err;
  707. ofs += PAD(je32_to_cpu(node->totlen));
  708. break;
  709. case JFFS2_NODETYPE_DIRENT:
  710. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  711. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  712. jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  713. je32_to_cpu(node->totlen), buf_len,
  714. ofs);
  715. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  716. if (err)
  717. return err;
  718. buf_ofs = ofs;
  719. node = (void *)buf;
  720. }
  721. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
  722. if (err) return err;
  723. ofs += PAD(je32_to_cpu(node->totlen));
  724. break;
  725. #ifdef CONFIG_JFFS2_FS_XATTR
  726. case JFFS2_NODETYPE_XATTR:
  727. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  728. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  729. jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
  730. je32_to_cpu(node->totlen), buf_len,
  731. ofs);
  732. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  733. if (err)
  734. return err;
  735. buf_ofs = ofs;
  736. node = (void *)buf;
  737. }
  738. err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
  739. if (err)
  740. return err;
  741. ofs += PAD(je32_to_cpu(node->totlen));
  742. break;
  743. case JFFS2_NODETYPE_XREF:
  744. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  745. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  746. jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
  747. je32_to_cpu(node->totlen), buf_len,
  748. ofs);
  749. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  750. if (err)
  751. return err;
  752. buf_ofs = ofs;
  753. node = (void *)buf;
  754. }
  755. err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
  756. if (err)
  757. return err;
  758. ofs += PAD(je32_to_cpu(node->totlen));
  759. break;
  760. #endif /* CONFIG_JFFS2_FS_XATTR */
  761. case JFFS2_NODETYPE_CLEANMARKER:
  762. jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
  763. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  764. pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  765. ofs, je32_to_cpu(node->totlen),
  766. c->cleanmarker_size);
  767. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  768. return err;
  769. ofs += PAD(sizeof(struct jffs2_unknown_node));
  770. } else if (jeb->first_node) {
  771. pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
  772. ofs, jeb->offset);
  773. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  774. return err;
  775. ofs += PAD(sizeof(struct jffs2_unknown_node));
  776. } else {
  777. jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
  778. ofs += PAD(c->cleanmarker_size);
  779. }
  780. break;
  781. case JFFS2_NODETYPE_PADDING:
  782. if (jffs2_sum_active())
  783. jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
  784. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  785. return err;
  786. ofs += PAD(je32_to_cpu(node->totlen));
  787. break;
  788. default:
  789. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  790. case JFFS2_FEATURE_ROCOMPAT:
  791. pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
  792. je16_to_cpu(node->nodetype), ofs);
  793. c->flags |= JFFS2_SB_FLAG_RO;
  794. if (!(jffs2_is_readonly(c)))
  795. return -EROFS;
  796. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  797. return err;
  798. ofs += PAD(je32_to_cpu(node->totlen));
  799. break;
  800. case JFFS2_FEATURE_INCOMPAT:
  801. pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
  802. je16_to_cpu(node->nodetype), ofs);
  803. return -EINVAL;
  804. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  805. jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
  806. je16_to_cpu(node->nodetype), ofs);
  807. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  808. return err;
  809. ofs += PAD(je32_to_cpu(node->totlen));
  810. break;
  811. case JFFS2_FEATURE_RWCOMPAT_COPY: {
  812. jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
  813. je16_to_cpu(node->nodetype), ofs);
  814. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
  815. /* We can't summarise nodes we don't grok */
  816. jffs2_sum_disable_collecting(s);
  817. ofs += PAD(je32_to_cpu(node->totlen));
  818. break;
  819. }
  820. }
  821. }
  822. }
  823. if (jffs2_sum_active()) {
  824. if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
  825. dbg_summary("There is not enough space for "
  826. "summary information, disabling for this jeb!\n");
  827. jffs2_sum_disable_collecting(s);
  828. }
  829. }
  830. jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
  831. jeb->offset, jeb->free_size, jeb->dirty_size,
  832. jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
  833. /* mark_node_obsolete can add to wasted !! */
  834. if (jeb->wasted_size) {
  835. jeb->dirty_size += jeb->wasted_size;
  836. c->dirty_size += jeb->wasted_size;
  837. c->wasted_size -= jeb->wasted_size;
  838. jeb->wasted_size = 0;
  839. }
  840. return jffs2_scan_classify_jeb(c, jeb);
  841. }
  842. struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  843. {
  844. struct jffs2_inode_cache *ic;
  845. ic = jffs2_get_ino_cache(c, ino);
  846. if (ic)
  847. return ic;
  848. if (ino > c->highest_ino)
  849. c->highest_ino = ino;
  850. ic = jffs2_alloc_inode_cache();
  851. if (!ic) {
  852. pr_notice("%s(): allocation of inode cache failed\n", __func__);
  853. return NULL;
  854. }
  855. memset(ic, 0, sizeof(*ic));
  856. ic->ino = ino;
  857. ic->nodes = (void *)ic;
  858. jffs2_add_ino_cache(c, ic);
  859. if (ino == 1)
  860. ic->pino_nlink = 1;
  861. return ic;
  862. }
  863. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  864. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
  865. {
  866. struct jffs2_inode_cache *ic;
  867. uint32_t crc, ino = je32_to_cpu(ri->ino);
  868. jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
  869. /* We do very little here now. Just check the ino# to which we should attribute
  870. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  871. we used to scan the flash once only, reading everything we want from it into
  872. memory, then building all our in-core data structures and freeing the extra
  873. information. Now we allow the first part of the mount to complete a lot quicker,
  874. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  875. Which means that the _full_ amount of time to get to proper write mode with GC
  876. operational may actually be _longer_ than before. Sucks to be me. */
  877. /* Check the node CRC in any case. */
  878. crc = crc32(0, ri, sizeof(*ri)-8);
  879. if (crc != je32_to_cpu(ri->node_crc)) {
  880. pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  881. __func__, ofs, je32_to_cpu(ri->node_crc), crc);
  882. /*
  883. * We believe totlen because the CRC on the node
  884. * _header_ was OK, just the node itself failed.
  885. */
  886. return jffs2_scan_dirty_space(c, jeb,
  887. PAD(je32_to_cpu(ri->totlen)));
  888. }
  889. ic = jffs2_get_ino_cache(c, ino);
  890. if (!ic) {
  891. ic = jffs2_scan_make_ino_cache(c, ino);
  892. if (!ic)
  893. return -ENOMEM;
  894. }
  895. /* Wheee. It worked */
  896. jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
  897. jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  898. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  899. je32_to_cpu(ri->offset),
  900. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
  901. pseudo_random += je32_to_cpu(ri->version);
  902. if (jffs2_sum_active()) {
  903. jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
  904. }
  905. return 0;
  906. }
  907. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  908. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
  909. {
  910. struct jffs2_full_dirent *fd;
  911. struct jffs2_inode_cache *ic;
  912. uint32_t checkedlen;
  913. uint32_t crc;
  914. int err;
  915. jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
  916. /* We don't get here unless the node is still valid, so we don't have to
  917. mask in the ACCURATE bit any more. */
  918. crc = crc32(0, rd, sizeof(*rd)-8);
  919. if (crc != je32_to_cpu(rd->node_crc)) {
  920. pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  921. __func__, ofs, je32_to_cpu(rd->node_crc), crc);
  922. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  923. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  924. return err;
  925. return 0;
  926. }
  927. pseudo_random += je32_to_cpu(rd->version);
  928. /* Should never happen. Did. (OLPC trac #4184)*/
  929. checkedlen = strnlen(rd->name, rd->nsize);
  930. if (checkedlen < rd->nsize) {
  931. pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
  932. ofs, checkedlen);
  933. }
  934. fd = jffs2_alloc_full_dirent(checkedlen+1);
  935. if (!fd) {
  936. return -ENOMEM;
  937. }
  938. memcpy(&fd->name, rd->name, checkedlen);
  939. fd->name[checkedlen] = 0;
  940. crc = crc32(0, fd->name, rd->nsize);
  941. if (crc != je32_to_cpu(rd->name_crc)) {
  942. pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  943. __func__, ofs, je32_to_cpu(rd->name_crc), crc);
  944. jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
  945. fd->name, je32_to_cpu(rd->ino));
  946. jffs2_free_full_dirent(fd);
  947. /* FIXME: Why do we believe totlen? */
  948. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  949. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  950. return err;
  951. return 0;
  952. }
  953. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  954. if (!ic) {
  955. jffs2_free_full_dirent(fd);
  956. return -ENOMEM;
  957. }
  958. fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
  959. PAD(je32_to_cpu(rd->totlen)), ic);
  960. fd->next = NULL;
  961. fd->version = je32_to_cpu(rd->version);
  962. fd->ino = je32_to_cpu(rd->ino);
  963. fd->nhash = full_name_hash(fd->name, checkedlen);
  964. fd->type = rd->type;
  965. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  966. if (jffs2_sum_active()) {
  967. jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
  968. }
  969. return 0;
  970. }
  971. static int count_list(struct list_head *l)
  972. {
  973. uint32_t count = 0;
  974. struct list_head *tmp;
  975. list_for_each(tmp, l) {
  976. count++;
  977. }
  978. return count;
  979. }
  980. /* Note: This breaks if list_empty(head). I don't care. You
  981. might, if you copy this code and use it elsewhere :) */
  982. static void rotate_list(struct list_head *head, uint32_t count)
  983. {
  984. struct list_head *n = head->next;
  985. list_del(head);
  986. while(count--) {
  987. n = n->next;
  988. }
  989. list_add(head, n);
  990. }
  991. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  992. {
  993. uint32_t x;
  994. uint32_t rotateby;
  995. x = count_list(&c->clean_list);
  996. if (x) {
  997. rotateby = pseudo_random % x;
  998. rotate_list((&c->clean_list), rotateby);
  999. }
  1000. x = count_list(&c->very_dirty_list);
  1001. if (x) {
  1002. rotateby = pseudo_random % x;
  1003. rotate_list((&c->very_dirty_list), rotateby);
  1004. }
  1005. x = count_list(&c->dirty_list);
  1006. if (x) {
  1007. rotateby = pseudo_random % x;
  1008. rotate_list((&c->dirty_list), rotateby);
  1009. }
  1010. x = count_list(&c->erasable_list);
  1011. if (x) {
  1012. rotateby = pseudo_random % x;
  1013. rotate_list((&c->erasable_list), rotateby);
  1014. }
  1015. if (c->nr_erasing_blocks) {
  1016. rotateby = pseudo_random % c->nr_erasing_blocks;
  1017. rotate_list((&c->erase_pending_list), rotateby);
  1018. }
  1019. if (c->nr_free_blocks) {
  1020. rotateby = pseudo_random % c->nr_free_blocks;
  1021. rotate_list((&c->free_list), rotateby);
  1022. }
  1023. }