scan.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: scan.c,v 1.125 2005/09/30 13:59:13 dedekind Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/crc32.h>
  19. #include <linux/compiler.h>
  20. #include "nodelist.h"
  21. #include "summary.h"
  22. #include "debug.h"
  23. #define DEFAULT_EMPTY_SCAN_SIZE 1024
  24. #define noisy_printk(noise, args...) do { \
  25. if (*(noise)) { \
  26. printk(KERN_NOTICE args); \
  27. (*(noise))--; \
  28. if (!(*(noise))) { \
  29. printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
  30. } \
  31. } \
  32. } while(0)
  33. static uint32_t pseudo_random;
  34. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  35. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
  36. /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
  37. * Returning an error will abort the mount - bad checksums etc. should just mark the space
  38. * as dirty.
  39. */
  40. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  41. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
  42. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  43. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
  44. static inline int min_free(struct jffs2_sb_info *c)
  45. {
  46. uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
  47. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  48. if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
  49. return c->wbuf_pagesize;
  50. #endif
  51. return min;
  52. }
  53. static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
  54. if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
  55. return sector_size;
  56. else
  57. return DEFAULT_EMPTY_SCAN_SIZE;
  58. }
  59. static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  60. {
  61. int ret;
  62. if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  63. return ret;
  64. if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
  65. return ret;
  66. /* Turned wasted size into dirty, since we apparently
  67. think it's recoverable now. */
  68. jeb->dirty_size += jeb->wasted_size;
  69. c->dirty_size += jeb->wasted_size;
  70. c->wasted_size -= jeb->wasted_size;
  71. jeb->wasted_size = 0;
  72. if (VERYDIRTY(c, jeb->dirty_size)) {
  73. list_add(&jeb->list, &c->very_dirty_list);
  74. } else {
  75. list_add(&jeb->list, &c->dirty_list);
  76. }
  77. return 0;
  78. }
  79. int jffs2_scan_medium(struct jffs2_sb_info *c)
  80. {
  81. int i, ret;
  82. uint32_t empty_blocks = 0, bad_blocks = 0;
  83. unsigned char *flashbuf = NULL;
  84. uint32_t buf_size = 0;
  85. struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
  86. #ifndef __ECOS
  87. size_t pointlen;
  88. if (c->mtd->point) {
  89. ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
  90. if (!ret && pointlen < c->mtd->size) {
  91. /* Don't muck about if it won't let us point to the whole flash */
  92. D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
  93. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  94. flashbuf = NULL;
  95. }
  96. if (ret)
  97. D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
  98. }
  99. #endif
  100. if (!flashbuf) {
  101. /* For NAND it's quicker to read a whole eraseblock at a time,
  102. apparently */
  103. if (jffs2_cleanmarker_oob(c))
  104. buf_size = c->sector_size;
  105. else
  106. buf_size = PAGE_SIZE;
  107. /* Respect kmalloc limitations */
  108. if (buf_size > 128*1024)
  109. buf_size = 128*1024;
  110. D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
  111. flashbuf = kmalloc(buf_size, GFP_KERNEL);
  112. if (!flashbuf)
  113. return -ENOMEM;
  114. }
  115. if (jffs2_sum_active()) {
  116. s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
  117. if (!s) {
  118. kfree(flashbuf);
  119. JFFS2_WARNING("Can't allocate memory for summary\n");
  120. return -ENOMEM;
  121. }
  122. }
  123. for (i=0; i<c->nr_blocks; i++) {
  124. struct jffs2_eraseblock *jeb = &c->blocks[i];
  125. cond_resched();
  126. /* reset summary info for next eraseblock scan */
  127. jffs2_sum_reset_collected(s);
  128. ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
  129. buf_size, s);
  130. if (ret < 0)
  131. goto out;
  132. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  133. /* Now decide which list to put it on */
  134. switch(ret) {
  135. case BLK_STATE_ALLFF:
  136. /*
  137. * Empty block. Since we can't be sure it
  138. * was entirely erased, we just queue it for erase
  139. * again. It will be marked as such when the erase
  140. * is complete. Meanwhile we still count it as empty
  141. * for later checks.
  142. */
  143. empty_blocks++;
  144. list_add(&jeb->list, &c->erase_pending_list);
  145. c->nr_erasing_blocks++;
  146. break;
  147. case BLK_STATE_CLEANMARKER:
  148. /* Only a CLEANMARKER node is valid */
  149. if (!jeb->dirty_size) {
  150. /* It's actually free */
  151. list_add(&jeb->list, &c->free_list);
  152. c->nr_free_blocks++;
  153. } else {
  154. /* Dirt */
  155. D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
  156. list_add(&jeb->list, &c->erase_pending_list);
  157. c->nr_erasing_blocks++;
  158. }
  159. break;
  160. case BLK_STATE_CLEAN:
  161. /* Full (or almost full) of clean data. Clean list */
  162. list_add(&jeb->list, &c->clean_list);
  163. break;
  164. case BLK_STATE_PARTDIRTY:
  165. /* Some data, but not full. Dirty list. */
  166. /* We want to remember the block with most free space
  167. and stick it in the 'nextblock' position to start writing to it. */
  168. if (jeb->free_size > min_free(c) &&
  169. (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
  170. /* Better candidate for the next writes to go to */
  171. if (c->nextblock) {
  172. ret = file_dirty(c, c->nextblock);
  173. if (ret)
  174. return ret;
  175. /* deleting summary information of the old nextblock */
  176. jffs2_sum_reset_collected(c->summary);
  177. }
  178. /* update collected summary information for the current nextblock */
  179. jffs2_sum_move_collected(c, s);
  180. D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
  181. c->nextblock = jeb;
  182. } else {
  183. ret = file_dirty(c, jeb);
  184. if (ret)
  185. return ret;
  186. }
  187. break;
  188. case BLK_STATE_ALLDIRTY:
  189. /* Nothing valid - not even a clean marker. Needs erasing. */
  190. /* For now we just put it on the erasing list. We'll start the erases later */
  191. D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
  192. list_add(&jeb->list, &c->erase_pending_list);
  193. c->nr_erasing_blocks++;
  194. break;
  195. case BLK_STATE_BADBLOCK:
  196. D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
  197. list_add(&jeb->list, &c->bad_list);
  198. c->bad_size += c->sector_size;
  199. c->free_size -= c->sector_size;
  200. bad_blocks++;
  201. break;
  202. default:
  203. printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
  204. BUG();
  205. }
  206. }
  207. /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
  208. if (c->nextblock && (c->nextblock->dirty_size)) {
  209. c->nextblock->wasted_size += c->nextblock->dirty_size;
  210. c->wasted_size += c->nextblock->dirty_size;
  211. c->dirty_size -= c->nextblock->dirty_size;
  212. c->nextblock->dirty_size = 0;
  213. }
  214. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  215. if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
  216. /* If we're going to start writing into a block which already
  217. contains data, and the end of the data isn't page-aligned,
  218. skip a little and align it. */
  219. uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
  220. D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
  221. skip));
  222. jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
  223. jffs2_scan_dirty_space(c, c->nextblock, skip);
  224. }
  225. #endif
  226. if (c->nr_erasing_blocks) {
  227. if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
  228. printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
  229. printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
  230. ret = -EIO;
  231. goto out;
  232. }
  233. jffs2_erase_pending_trigger(c);
  234. }
  235. ret = 0;
  236. out:
  237. if (buf_size)
  238. kfree(flashbuf);
  239. #ifndef __ECOS
  240. else
  241. c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
  242. #endif
  243. if (s)
  244. kfree(s);
  245. return ret;
  246. }
  247. static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
  248. uint32_t ofs, uint32_t len)
  249. {
  250. int ret;
  251. size_t retlen;
  252. ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
  253. if (ret) {
  254. D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
  255. return ret;
  256. }
  257. if (retlen < len) {
  258. D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
  259. return -EIO;
  260. }
  261. return 0;
  262. }
  263. int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
  264. {
  265. if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
  266. && (!jeb->first_node || !ref_next(jeb->first_node)) )
  267. return BLK_STATE_CLEANMARKER;
  268. /* move blocks with max 4 byte dirty space to cleanlist */
  269. else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
  270. c->dirty_size -= jeb->dirty_size;
  271. c->wasted_size += jeb->dirty_size;
  272. jeb->wasted_size += jeb->dirty_size;
  273. jeb->dirty_size = 0;
  274. return BLK_STATE_CLEAN;
  275. } else if (jeb->used_size || jeb->unchecked_size)
  276. return BLK_STATE_PARTDIRTY;
  277. else
  278. return BLK_STATE_ALLDIRTY;
  279. }
  280. #ifdef CONFIG_JFFS2_FS_XATTR
  281. static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  282. struct jffs2_raw_xattr *rx, uint32_t ofs,
  283. struct jffs2_summary *s)
  284. {
  285. struct jffs2_xattr_datum *xd;
  286. uint32_t xid, version, totlen, crc;
  287. int err;
  288. crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
  289. if (crc != je32_to_cpu(rx->node_crc)) {
  290. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  291. ofs, je32_to_cpu(rx->node_crc), crc);
  292. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  293. return err;
  294. return 0;
  295. }
  296. xid = je32_to_cpu(rx->xid);
  297. version = je32_to_cpu(rx->version);
  298. totlen = PAD(sizeof(struct jffs2_raw_xattr)
  299. + rx->name_len + 1 + je16_to_cpu(rx->value_len));
  300. if (totlen != je32_to_cpu(rx->totlen)) {
  301. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
  302. ofs, je32_to_cpu(rx->totlen), totlen);
  303. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
  304. return err;
  305. return 0;
  306. }
  307. xd = jffs2_setup_xattr_datum(c, xid, version);
  308. if (IS_ERR(xd))
  309. return PTR_ERR(xd);
  310. if (xd->version > version) {
  311. struct jffs2_raw_node_ref *raw
  312. = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
  313. raw->next_in_ino = xd->node->next_in_ino;
  314. xd->node->next_in_ino = raw;
  315. } else {
  316. xd->version = version;
  317. xd->xprefix = rx->xprefix;
  318. xd->name_len = rx->name_len;
  319. xd->value_len = je16_to_cpu(rx->value_len);
  320. xd->data_crc = je32_to_cpu(rx->data_crc);
  321. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
  322. }
  323. if (jffs2_sum_active())
  324. jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
  325. dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
  326. ofs, xd->xid, xd->version);
  327. return 0;
  328. }
  329. static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  330. struct jffs2_raw_xref *rr, uint32_t ofs,
  331. struct jffs2_summary *s)
  332. {
  333. struct jffs2_xattr_ref *ref;
  334. uint32_t crc;
  335. int err;
  336. crc = crc32(0, rr, sizeof(*rr) - 4);
  337. if (crc != je32_to_cpu(rr->node_crc)) {
  338. JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
  339. ofs, je32_to_cpu(rr->node_crc), crc);
  340. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
  341. return err;
  342. return 0;
  343. }
  344. if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
  345. JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
  346. ofs, je32_to_cpu(rr->totlen),
  347. PAD(sizeof(struct jffs2_raw_xref)));
  348. if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
  349. return err;
  350. return 0;
  351. }
  352. ref = jffs2_alloc_xattr_ref();
  353. if (!ref)
  354. return -ENOMEM;
  355. /* BEFORE jffs2_build_xattr_subsystem() called,
  356. * and AFTER xattr_ref is marked as a dead xref,
  357. * ref->xid is used to store 32bit xid, xd is not used
  358. * ref->ino is used to store 32bit inode-number, ic is not used
  359. * Thoes variables are declared as union, thus using those
  360. * are exclusive. In a similar way, ref->next is temporarily
  361. * used to chain all xattr_ref object. It's re-chained to
  362. * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
  363. */
  364. ref->ino = je32_to_cpu(rr->ino);
  365. ref->xid = je32_to_cpu(rr->xid);
  366. ref->xseqno = je32_to_cpu(rr->xseqno);
  367. if (ref->xseqno > c->highest_xseqno)
  368. c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
  369. ref->next = c->xref_temp;
  370. c->xref_temp = ref;
  371. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
  372. if (jffs2_sum_active())
  373. jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
  374. dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
  375. ofs, ref->xid, ref->ino);
  376. return 0;
  377. }
  378. #endif
  379. /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
  380. the flash, XIP-style */
  381. static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  382. unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
  383. struct jffs2_unknown_node *node;
  384. struct jffs2_unknown_node crcnode;
  385. uint32_t ofs, prevofs;
  386. uint32_t hdr_crc, buf_ofs, buf_len;
  387. int err;
  388. int noise = 0;
  389. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  390. int cleanmarkerfound = 0;
  391. #endif
  392. ofs = jeb->offset;
  393. prevofs = jeb->offset - 1;
  394. D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
  395. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  396. if (jffs2_cleanmarker_oob(c)) {
  397. int ret = jffs2_check_nand_cleanmarker(c, jeb);
  398. D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
  399. /* Even if it's not found, we still scan to see
  400. if the block is empty. We use this information
  401. to decide whether to erase it or not. */
  402. switch (ret) {
  403. case 0: cleanmarkerfound = 1; break;
  404. case 1: break;
  405. case 2: return BLK_STATE_BADBLOCK;
  406. case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
  407. default: return ret;
  408. }
  409. }
  410. #endif
  411. if (jffs2_sum_active()) {
  412. struct jffs2_sum_marker *sm;
  413. void *sumptr = NULL;
  414. uint32_t sumlen;
  415. if (!buf_size) {
  416. /* XIP case. Just look, point at the summary if it's there */
  417. sm = (void *)buf + c->sector_size - sizeof(*sm);
  418. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  419. sumptr = buf + je32_to_cpu(sm->offset);
  420. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  421. }
  422. } else {
  423. /* If NAND flash, read a whole page of it. Else just the end */
  424. if (c->wbuf_pagesize)
  425. buf_len = c->wbuf_pagesize;
  426. else
  427. buf_len = sizeof(*sm);
  428. /* Read as much as we want into the _end_ of the preallocated buffer */
  429. err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
  430. jeb->offset + c->sector_size - buf_len,
  431. buf_len);
  432. if (err)
  433. return err;
  434. sm = (void *)buf + buf_size - sizeof(*sm);
  435. if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
  436. sumlen = c->sector_size - je32_to_cpu(sm->offset);
  437. sumptr = buf + buf_size - sumlen;
  438. /* Now, make sure the summary itself is available */
  439. if (sumlen > buf_size) {
  440. /* Need to kmalloc for this. */
  441. sumptr = kmalloc(sumlen, GFP_KERNEL);
  442. if (!sumptr)
  443. return -ENOMEM;
  444. memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
  445. }
  446. if (buf_len < sumlen) {
  447. /* Need to read more so that the entire summary node is present */
  448. err = jffs2_fill_scan_buf(c, sumptr,
  449. jeb->offset + c->sector_size - sumlen,
  450. sumlen - buf_len);
  451. if (err)
  452. return err;
  453. }
  454. }
  455. }
  456. if (sumptr) {
  457. err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
  458. if (buf_size && sumlen > buf_size)
  459. kfree(sumptr);
  460. /* If it returns with a real error, bail.
  461. If it returns positive, that's a block classification
  462. (i.e. BLK_STATE_xxx) so return that too.
  463. If it returns zero, fall through to full scan. */
  464. if (err)
  465. return err;
  466. }
  467. }
  468. buf_ofs = jeb->offset;
  469. if (!buf_size) {
  470. /* This is the XIP case -- we're reading _directly_ from the flash chip */
  471. buf_len = c->sector_size;
  472. } else {
  473. buf_len = EMPTY_SCAN_SIZE(c->sector_size);
  474. err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
  475. if (err)
  476. return err;
  477. }
  478. /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
  479. ofs = 0;
  480. /* Scan only 4KiB of 0xFF before declaring it's empty */
  481. while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
  482. ofs += 4;
  483. if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
  484. #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
  485. if (jffs2_cleanmarker_oob(c)) {
  486. /* scan oob, take care of cleanmarker */
  487. int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
  488. D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
  489. switch (ret) {
  490. case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
  491. case 1: return BLK_STATE_ALLDIRTY;
  492. default: return ret;
  493. }
  494. }
  495. #endif
  496. D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
  497. if (c->cleanmarker_size == 0)
  498. return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
  499. else
  500. return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
  501. }
  502. if (ofs) {
  503. D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
  504. jeb->offset + ofs));
  505. if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
  506. return err;
  507. if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
  508. return err;
  509. }
  510. /* Now ofs is a complete physical flash offset as it always was... */
  511. ofs += jeb->offset;
  512. noise = 10;
  513. dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
  514. scan_more:
  515. while(ofs < jeb->offset + c->sector_size) {
  516. jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
  517. /* Make sure there are node refs available for use */
  518. err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
  519. if (err)
  520. return err;
  521. cond_resched();
  522. if (ofs & 3) {
  523. printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
  524. ofs = PAD(ofs);
  525. continue;
  526. }
  527. if (ofs == prevofs) {
  528. printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
  529. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  530. return err;
  531. ofs += 4;
  532. continue;
  533. }
  534. prevofs = ofs;
  535. if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
  536. D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
  537. jeb->offset, c->sector_size, ofs, sizeof(*node)));
  538. if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
  539. return err;
  540. break;
  541. }
  542. if (buf_ofs + buf_len < ofs + sizeof(*node)) {
  543. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  544. D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
  545. sizeof(struct jffs2_unknown_node), buf_len, ofs));
  546. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  547. if (err)
  548. return err;
  549. buf_ofs = ofs;
  550. }
  551. node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
  552. if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
  553. uint32_t inbuf_ofs;
  554. uint32_t empty_start;
  555. empty_start = ofs;
  556. ofs += 4;
  557. D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
  558. more_empty:
  559. inbuf_ofs = ofs - buf_ofs;
  560. while (inbuf_ofs < buf_len) {
  561. if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
  562. printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
  563. empty_start, ofs);
  564. if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
  565. return err;
  566. goto scan_more;
  567. }
  568. inbuf_ofs+=4;
  569. ofs += 4;
  570. }
  571. /* Ran off end. */
  572. D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
  573. /* If we're only checking the beginning of a block with a cleanmarker,
  574. bail now */
  575. if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
  576. c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
  577. D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
  578. return BLK_STATE_CLEANMARKER;
  579. }
  580. /* See how much more there is to read in this eraseblock... */
  581. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  582. if (!buf_len) {
  583. /* No more to read. Break out of main loop without marking
  584. this range of empty space as dirty (because it's not) */
  585. D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
  586. empty_start));
  587. break;
  588. }
  589. D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
  590. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  591. if (err)
  592. return err;
  593. buf_ofs = ofs;
  594. goto more_empty;
  595. }
  596. if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
  597. printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
  598. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  599. return err;
  600. ofs += 4;
  601. continue;
  602. }
  603. if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
  604. D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
  605. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  606. return err;
  607. ofs += 4;
  608. continue;
  609. }
  610. if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
  611. printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
  612. printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
  613. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  614. return err;
  615. ofs += 4;
  616. continue;
  617. }
  618. if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
  619. /* OK. We're out of possibilities. Whinge and move on */
  620. noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
  621. JFFS2_MAGIC_BITMASK, ofs,
  622. je16_to_cpu(node->magic));
  623. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  624. return err;
  625. ofs += 4;
  626. continue;
  627. }
  628. /* We seem to have a node of sorts. Check the CRC */
  629. crcnode.magic = node->magic;
  630. crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
  631. crcnode.totlen = node->totlen;
  632. hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
  633. if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
  634. noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
  635. ofs, je16_to_cpu(node->magic),
  636. je16_to_cpu(node->nodetype),
  637. je32_to_cpu(node->totlen),
  638. je32_to_cpu(node->hdr_crc),
  639. hdr_crc);
  640. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  641. return err;
  642. ofs += 4;
  643. continue;
  644. }
  645. if (ofs + je32_to_cpu(node->totlen) >
  646. jeb->offset + c->sector_size) {
  647. /* Eep. Node goes over the end of the erase block. */
  648. printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
  649. ofs, je32_to_cpu(node->totlen));
  650. printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
  651. if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
  652. return err;
  653. ofs += 4;
  654. continue;
  655. }
  656. if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
  657. /* Wheee. This is an obsoleted node */
  658. D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
  659. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  660. return err;
  661. ofs += PAD(je32_to_cpu(node->totlen));
  662. continue;
  663. }
  664. switch(je16_to_cpu(node->nodetype)) {
  665. case JFFS2_NODETYPE_INODE:
  666. if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
  667. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  668. D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
  669. sizeof(struct jffs2_raw_inode), buf_len, ofs));
  670. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  671. if (err)
  672. return err;
  673. buf_ofs = ofs;
  674. node = (void *)buf;
  675. }
  676. err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
  677. if (err) return err;
  678. ofs += PAD(je32_to_cpu(node->totlen));
  679. break;
  680. case JFFS2_NODETYPE_DIRENT:
  681. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  682. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  683. D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
  684. je32_to_cpu(node->totlen), buf_len, ofs));
  685. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  686. if (err)
  687. return err;
  688. buf_ofs = ofs;
  689. node = (void *)buf;
  690. }
  691. err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
  692. if (err) return err;
  693. ofs += PAD(je32_to_cpu(node->totlen));
  694. break;
  695. #ifdef CONFIG_JFFS2_FS_XATTR
  696. case JFFS2_NODETYPE_XATTR:
  697. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  698. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  699. D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
  700. " left to end of buf. Reading 0x%x at 0x%08x\n",
  701. je32_to_cpu(node->totlen), buf_len, ofs));
  702. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  703. if (err)
  704. return err;
  705. buf_ofs = ofs;
  706. node = (void *)buf;
  707. }
  708. err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
  709. if (err)
  710. return err;
  711. ofs += PAD(je32_to_cpu(node->totlen));
  712. break;
  713. case JFFS2_NODETYPE_XREF:
  714. if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
  715. buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
  716. D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
  717. " left to end of buf. Reading 0x%x at 0x%08x\n",
  718. je32_to_cpu(node->totlen), buf_len, ofs));
  719. err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
  720. if (err)
  721. return err;
  722. buf_ofs = ofs;
  723. node = (void *)buf;
  724. }
  725. err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
  726. if (err)
  727. return err;
  728. ofs += PAD(je32_to_cpu(node->totlen));
  729. break;
  730. #endif /* CONFIG_JFFS2_FS_XATTR */
  731. case JFFS2_NODETYPE_CLEANMARKER:
  732. D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
  733. if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
  734. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
  735. ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
  736. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  737. return err;
  738. ofs += PAD(sizeof(struct jffs2_unknown_node));
  739. } else if (jeb->first_node) {
  740. printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
  741. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
  742. return err;
  743. ofs += PAD(sizeof(struct jffs2_unknown_node));
  744. } else {
  745. jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
  746. ofs += PAD(c->cleanmarker_size);
  747. }
  748. break;
  749. case JFFS2_NODETYPE_PADDING:
  750. if (jffs2_sum_active())
  751. jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
  752. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  753. return err;
  754. ofs += PAD(je32_to_cpu(node->totlen));
  755. break;
  756. default:
  757. switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
  758. case JFFS2_FEATURE_ROCOMPAT:
  759. printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  760. c->flags |= JFFS2_SB_FLAG_RO;
  761. if (!(jffs2_is_readonly(c)))
  762. return -EROFS;
  763. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  764. return err;
  765. ofs += PAD(je32_to_cpu(node->totlen));
  766. break;
  767. case JFFS2_FEATURE_INCOMPAT:
  768. printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
  769. return -EINVAL;
  770. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  771. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  772. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
  773. return err;
  774. ofs += PAD(je32_to_cpu(node->totlen));
  775. break;
  776. case JFFS2_FEATURE_RWCOMPAT_COPY: {
  777. D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
  778. jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
  779. /* We can't summarise nodes we don't grok */
  780. jffs2_sum_disable_collecting(s);
  781. ofs += PAD(je32_to_cpu(node->totlen));
  782. break;
  783. }
  784. }
  785. }
  786. }
  787. if (jffs2_sum_active()) {
  788. if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
  789. dbg_summary("There is not enough space for "
  790. "summary information, disabling for this jeb!\n");
  791. jffs2_sum_disable_collecting(s);
  792. }
  793. }
  794. D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
  795. jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
  796. /* mark_node_obsolete can add to wasted !! */
  797. if (jeb->wasted_size) {
  798. jeb->dirty_size += jeb->wasted_size;
  799. c->dirty_size += jeb->wasted_size;
  800. c->wasted_size -= jeb->wasted_size;
  801. jeb->wasted_size = 0;
  802. }
  803. return jffs2_scan_classify_jeb(c, jeb);
  804. }
  805. struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  806. {
  807. struct jffs2_inode_cache *ic;
  808. ic = jffs2_get_ino_cache(c, ino);
  809. if (ic)
  810. return ic;
  811. if (ino > c->highest_ino)
  812. c->highest_ino = ino;
  813. ic = jffs2_alloc_inode_cache();
  814. if (!ic) {
  815. printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
  816. return NULL;
  817. }
  818. memset(ic, 0, sizeof(*ic));
  819. ic->ino = ino;
  820. ic->nodes = (void *)ic;
  821. jffs2_add_ino_cache(c, ic);
  822. if (ino == 1)
  823. ic->nlink = 1;
  824. return ic;
  825. }
  826. static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  827. struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
  828. {
  829. struct jffs2_inode_cache *ic;
  830. uint32_t ino = je32_to_cpu(ri->ino);
  831. int err;
  832. D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
  833. /* We do very little here now. Just check the ino# to which we should attribute
  834. this node; we can do all the CRC checking etc. later. There's a tradeoff here --
  835. we used to scan the flash once only, reading everything we want from it into
  836. memory, then building all our in-core data structures and freeing the extra
  837. information. Now we allow the first part of the mount to complete a lot quicker,
  838. but we have to go _back_ to the flash in order to finish the CRC checking, etc.
  839. Which means that the _full_ amount of time to get to proper write mode with GC
  840. operational may actually be _longer_ than before. Sucks to be me. */
  841. ic = jffs2_get_ino_cache(c, ino);
  842. if (!ic) {
  843. /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
  844. first node we found for this inode. Do a CRC check to protect against the former
  845. case */
  846. uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
  847. if (crc != je32_to_cpu(ri->node_crc)) {
  848. printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  849. ofs, je32_to_cpu(ri->node_crc), crc);
  850. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  851. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen)))))
  852. return err;
  853. return 0;
  854. }
  855. ic = jffs2_scan_make_ino_cache(c, ino);
  856. if (!ic)
  857. return -ENOMEM;
  858. }
  859. /* Wheee. It worked */
  860. jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
  861. D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
  862. je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
  863. je32_to_cpu(ri->offset),
  864. je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
  865. pseudo_random += je32_to_cpu(ri->version);
  866. if (jffs2_sum_active()) {
  867. jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
  868. }
  869. return 0;
  870. }
  871. static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  872. struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
  873. {
  874. struct jffs2_full_dirent *fd;
  875. struct jffs2_inode_cache *ic;
  876. uint32_t crc;
  877. int err;
  878. D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
  879. /* We don't get here unless the node is still valid, so we don't have to
  880. mask in the ACCURATE bit any more. */
  881. crc = crc32(0, rd, sizeof(*rd)-8);
  882. if (crc != je32_to_cpu(rd->node_crc)) {
  883. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  884. ofs, je32_to_cpu(rd->node_crc), crc);
  885. /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
  886. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  887. return err;
  888. return 0;
  889. }
  890. pseudo_random += je32_to_cpu(rd->version);
  891. fd = jffs2_alloc_full_dirent(rd->nsize+1);
  892. if (!fd) {
  893. return -ENOMEM;
  894. }
  895. memcpy(&fd->name, rd->name, rd->nsize);
  896. fd->name[rd->nsize] = 0;
  897. crc = crc32(0, fd->name, rd->nsize);
  898. if (crc != je32_to_cpu(rd->name_crc)) {
  899. printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
  900. ofs, je32_to_cpu(rd->name_crc), crc);
  901. D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
  902. jffs2_free_full_dirent(fd);
  903. /* FIXME: Why do we believe totlen? */
  904. /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
  905. if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
  906. return err;
  907. return 0;
  908. }
  909. ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
  910. if (!ic) {
  911. jffs2_free_full_dirent(fd);
  912. return -ENOMEM;
  913. }
  914. fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic);
  915. fd->next = NULL;
  916. fd->version = je32_to_cpu(rd->version);
  917. fd->ino = je32_to_cpu(rd->ino);
  918. fd->nhash = full_name_hash(fd->name, rd->nsize);
  919. fd->type = rd->type;
  920. jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
  921. if (jffs2_sum_active()) {
  922. jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
  923. }
  924. return 0;
  925. }
  926. static int count_list(struct list_head *l)
  927. {
  928. uint32_t count = 0;
  929. struct list_head *tmp;
  930. list_for_each(tmp, l) {
  931. count++;
  932. }
  933. return count;
  934. }
  935. /* Note: This breaks if list_empty(head). I don't care. You
  936. might, if you copy this code and use it elsewhere :) */
  937. static void rotate_list(struct list_head *head, uint32_t count)
  938. {
  939. struct list_head *n = head->next;
  940. list_del(head);
  941. while(count--) {
  942. n = n->next;
  943. }
  944. list_add(head, n);
  945. }
  946. void jffs2_rotate_lists(struct jffs2_sb_info *c)
  947. {
  948. uint32_t x;
  949. uint32_t rotateby;
  950. x = count_list(&c->clean_list);
  951. if (x) {
  952. rotateby = pseudo_random % x;
  953. rotate_list((&c->clean_list), rotateby);
  954. }
  955. x = count_list(&c->very_dirty_list);
  956. if (x) {
  957. rotateby = pseudo_random % x;
  958. rotate_list((&c->very_dirty_list), rotateby);
  959. }
  960. x = count_list(&c->dirty_list);
  961. if (x) {
  962. rotateby = pseudo_random % x;
  963. rotate_list((&c->dirty_list), rotateby);
  964. }
  965. x = count_list(&c->erasable_list);
  966. if (x) {
  967. rotateby = pseudo_random % x;
  968. rotate_list((&c->erasable_list), rotateby);
  969. }
  970. if (c->nr_erasing_blocks) {
  971. rotateby = pseudo_random % c->nr_erasing_blocks;
  972. rotate_list((&c->erase_pending_list), rotateby);
  973. }
  974. if (c->nr_free_blocks) {
  975. rotateby = pseudo_random % c->nr_free_blocks;
  976. rotate_list((&c->free_list), rotateby);
  977. }
  978. }