nodelist.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: nodelist.c,v 1.115 2005/11/07 11:14:40 gleixner Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/fs.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/rbtree.h>
  18. #include <linux/crc32.h>
  19. #include <linux/slab.h>
  20. #include <linux/pagemap.h>
  21. #include "nodelist.h"
  22. void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
  23. {
  24. struct jffs2_full_dirent **prev = list;
  25. dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino);
  26. while ((*prev) && (*prev)->nhash <= new->nhash) {
  27. if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
  28. /* Duplicate. Free one */
  29. if (new->version < (*prev)->version) {
  30. dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
  31. (*prev)->name, (*prev)->ino);
  32. jffs2_mark_node_obsolete(c, new->raw);
  33. jffs2_free_full_dirent(new);
  34. } else {
  35. dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
  36. (*prev)->name, (*prev)->ino);
  37. new->next = (*prev)->next;
  38. jffs2_mark_node_obsolete(c, ((*prev)->raw));
  39. jffs2_free_full_dirent(*prev);
  40. *prev = new;
  41. }
  42. return;
  43. }
  44. prev = &((*prev)->next);
  45. }
  46. new->next = *prev;
  47. *prev = new;
  48. }
  49. void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
  50. {
  51. struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
  52. dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size);
  53. /* We know frag->ofs <= size. That's what lookup does for us */
  54. if (frag && frag->ofs != size) {
  55. if (frag->ofs+frag->size > size) {
  56. frag->size = size - frag->ofs;
  57. }
  58. frag = frag_next(frag);
  59. }
  60. while (frag && frag->ofs >= size) {
  61. struct jffs2_node_frag *next = frag_next(frag);
  62. frag_erase(frag, list);
  63. jffs2_obsolete_node_frag(c, frag);
  64. frag = next;
  65. }
  66. if (size == 0)
  67. return;
  68. /*
  69. * If the last fragment starts at the RAM page boundary, it is
  70. * REF_PRISTINE irrespective of its size.
  71. */
  72. frag = frag_last(list);
  73. if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
  74. dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
  75. frag->ofs, frag->ofs + frag->size);
  76. frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
  77. }
  78. }
  79. void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
  80. {
  81. if (this->node) {
  82. this->node->frags--;
  83. if (!this->node->frags) {
  84. /* The node has no valid frags left. It's totally obsoleted */
  85. dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
  86. ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size);
  87. jffs2_mark_node_obsolete(c, this->node->raw);
  88. jffs2_free_full_dnode(this->node);
  89. } else {
  90. dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
  91. ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags);
  92. mark_ref_normal(this->node->raw);
  93. }
  94. }
  95. jffs2_free_node_frag(this);
  96. }
  97. static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
  98. {
  99. struct rb_node *parent = &base->rb;
  100. struct rb_node **link = &parent;
  101. dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size);
  102. while (*link) {
  103. parent = *link;
  104. base = rb_entry(parent, struct jffs2_node_frag, rb);
  105. if (newfrag->ofs > base->ofs)
  106. link = &base->rb.rb_right;
  107. else if (newfrag->ofs < base->ofs)
  108. link = &base->rb.rb_left;
  109. else {
  110. JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
  111. BUG();
  112. }
  113. }
  114. rb_link_node(&newfrag->rb, &base->rb, link);
  115. }
  116. /*
  117. * Allocate and initializes a new fragment.
  118. */
  119. static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size)
  120. {
  121. struct jffs2_node_frag *newfrag;
  122. newfrag = jffs2_alloc_node_frag();
  123. if (likely(newfrag)) {
  124. newfrag->ofs = ofs;
  125. newfrag->size = size;
  126. newfrag->node = fn;
  127. } else {
  128. JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n");
  129. }
  130. return newfrag;
  131. }
  132. /*
  133. * Called when there is no overlapping fragment exist. Inserts a hole before the new
  134. * fragment and inserts the new fragment to the fragtree.
  135. */
  136. static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root,
  137. struct jffs2_node_frag *newfrag,
  138. struct jffs2_node_frag *this, uint32_t lastend)
  139. {
  140. if (lastend < newfrag->node->ofs) {
  141. /* put a hole in before the new fragment */
  142. struct jffs2_node_frag *holefrag;
  143. holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
  144. if (unlikely(!holefrag)) {
  145. jffs2_free_node_frag(newfrag);
  146. return -ENOMEM;
  147. }
  148. if (this) {
  149. /* By definition, the 'this' node has no right-hand child,
  150. because there are no frags with offset greater than it.
  151. So that's where we want to put the hole */
  152. dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n",
  153. holefrag->ofs, holefrag->ofs + holefrag->size);
  154. rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
  155. } else {
  156. dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n",
  157. holefrag->ofs, holefrag->ofs + holefrag->size);
  158. rb_link_node(&holefrag->rb, NULL, &root->rb_node);
  159. }
  160. rb_insert_color(&holefrag->rb, root);
  161. this = holefrag;
  162. }
  163. if (this) {
  164. /* By definition, the 'this' node has no right-hand child,
  165. because there are no frags with offset greater than it.
  166. So that's where we want to put new fragment */
  167. dbg_fragtree2("add the new node at the right\n");
  168. rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
  169. } else {
  170. dbg_fragtree2("insert the new node at the root of the tree\n");
  171. rb_link_node(&newfrag->rb, NULL, &root->rb_node);
  172. }
  173. rb_insert_color(&newfrag->rb, root);
  174. return 0;
  175. }
  176. /* Doesn't set inode->i_size */
  177. static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag)
  178. {
  179. struct jffs2_node_frag *this;
  180. uint32_t lastend;
  181. /* Skip all the nodes which are completed before this one starts */
  182. this = jffs2_lookup_node_frag(root, newfrag->node->ofs);
  183. if (this) {
  184. dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
  185. this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this);
  186. lastend = this->ofs + this->size;
  187. } else {
  188. dbg_fragtree2("lookup gave no frag\n");
  189. lastend = 0;
  190. }
  191. /* See if we ran off the end of the fragtree */
  192. if (lastend <= newfrag->ofs) {
  193. /* We did */
  194. /* Check if 'this' node was on the same page as the new node.
  195. If so, both 'this' and the new node get marked REF_NORMAL so
  196. the GC can take a look.
  197. */
  198. if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
  199. if (this->node)
  200. mark_ref_normal(this->node->raw);
  201. mark_ref_normal(newfrag->node->raw);
  202. }
  203. return no_overlapping_node(c, root, newfrag, this, lastend);
  204. }
  205. if (this->node)
  206. dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n",
  207. this->ofs, this->ofs + this->size,
  208. ref_offset(this->node->raw), ref_flags(this->node->raw));
  209. else
  210. dbg_fragtree2("dealing with hole frag %u-%u.\n",
  211. this->ofs, this->ofs + this->size);
  212. /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
  213. * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
  214. */
  215. if (newfrag->ofs > this->ofs) {
  216. /* This node isn't completely obsoleted. The start of it remains valid */
  217. /* Mark the new node and the partially covered node REF_NORMAL -- let
  218. the GC take a look at them */
  219. mark_ref_normal(newfrag->node->raw);
  220. if (this->node)
  221. mark_ref_normal(this->node->raw);
  222. if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
  223. /* The new node splits 'this' frag into two */
  224. struct jffs2_node_frag *newfrag2;
  225. if (this->node)
  226. dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n",
  227. this->ofs, this->ofs+this->size, ref_offset(this->node->raw));
  228. else
  229. dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n",
  230. this->ofs, this->ofs+this->size);
  231. /* New second frag pointing to this's node */
  232. newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
  233. this->ofs + this->size - newfrag->ofs - newfrag->size);
  234. if (unlikely(!newfrag2))
  235. return -ENOMEM;
  236. if (this->node)
  237. this->node->frags++;
  238. /* Adjust size of original 'this' */
  239. this->size = newfrag->ofs - this->ofs;
  240. /* Now, we know there's no node with offset
  241. greater than this->ofs but smaller than
  242. newfrag2->ofs or newfrag->ofs, for obvious
  243. reasons. So we can do a tree insert from
  244. 'this' to insert newfrag, and a tree insert
  245. from newfrag to insert newfrag2. */
  246. jffs2_fragtree_insert(newfrag, this);
  247. rb_insert_color(&newfrag->rb, root);
  248. jffs2_fragtree_insert(newfrag2, newfrag);
  249. rb_insert_color(&newfrag2->rb, root);
  250. return 0;
  251. }
  252. /* New node just reduces 'this' frag in size, doesn't split it */
  253. this->size = newfrag->ofs - this->ofs;
  254. /* Again, we know it lives down here in the tree */
  255. jffs2_fragtree_insert(newfrag, this);
  256. rb_insert_color(&newfrag->rb, root);
  257. } else {
  258. /* New frag starts at the same point as 'this' used to. Replace
  259. it in the tree without doing a delete and insertion */
  260. dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
  261. newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size);
  262. rb_replace_node(&this->rb, &newfrag->rb, root);
  263. if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
  264. dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size);
  265. jffs2_obsolete_node_frag(c, this);
  266. } else {
  267. this->ofs += newfrag->size;
  268. this->size -= newfrag->size;
  269. jffs2_fragtree_insert(this, newfrag);
  270. rb_insert_color(&this->rb, root);
  271. return 0;
  272. }
  273. }
  274. /* OK, now we have newfrag added in the correct place in the tree, but
  275. frag_next(newfrag) may be a fragment which is overlapped by it
  276. */
  277. while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
  278. /* 'this' frag is obsoleted completely. */
  279. dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n",
  280. this, this->ofs, this->ofs+this->size);
  281. rb_erase(&this->rb, root);
  282. jffs2_obsolete_node_frag(c, this);
  283. }
  284. /* Now we're pointing at the first frag which isn't totally obsoleted by
  285. the new frag */
  286. if (!this || newfrag->ofs + newfrag->size == this->ofs)
  287. return 0;
  288. /* Still some overlap but we don't need to move it in the tree */
  289. this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
  290. this->ofs = newfrag->ofs + newfrag->size;
  291. /* And mark them REF_NORMAL so the GC takes a look at them */
  292. if (this->node)
  293. mark_ref_normal(this->node->raw);
  294. mark_ref_normal(newfrag->node->raw);
  295. return 0;
  296. }
  297. /*
  298. * Given an inode, probably with existing tree of fragments, add the new node
  299. * to the fragment tree.
  300. */
  301. int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
  302. {
  303. int ret;
  304. struct jffs2_node_frag *newfrag;
  305. if (unlikely(!fn->size))
  306. return 0;
  307. newfrag = new_fragment(fn, fn->ofs, fn->size);
  308. if (unlikely(!newfrag))
  309. return -ENOMEM;
  310. newfrag->node->frags = 1;
  311. dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n",
  312. fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
  313. ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
  314. if (unlikely(ret))
  315. return ret;
  316. /* If we now share a page with other nodes, mark either previous
  317. or next node REF_NORMAL, as appropriate. */
  318. if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
  319. struct jffs2_node_frag *prev = frag_prev(newfrag);
  320. mark_ref_normal(fn->raw);
  321. /* If we don't start at zero there's _always_ a previous */
  322. if (prev->node)
  323. mark_ref_normal(prev->node->raw);
  324. }
  325. if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
  326. struct jffs2_node_frag *next = frag_next(newfrag);
  327. if (next) {
  328. mark_ref_normal(fn->raw);
  329. if (next->node)
  330. mark_ref_normal(next->node->raw);
  331. }
  332. }
  333. jffs2_dbg_fragtree_paranoia_check_nolock(f);
  334. return 0;
  335. }
  336. /*
  337. * Check the data CRC of the node.
  338. *
  339. * Returns: 0 if the data CRC is correct;
  340. * 1 - if incorrect;
  341. * error code if an error occured.
  342. */
  343. static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
  344. {
  345. struct jffs2_raw_node_ref *ref = tn->fn->raw;
  346. int err = 0, pointed = 0;
  347. struct jffs2_eraseblock *jeb;
  348. unsigned char *buffer;
  349. uint32_t crc, ofs, len;
  350. size_t retlen;
  351. BUG_ON(tn->csize == 0);
  352. if (!jffs2_is_writebuffered(c))
  353. goto adj_acc;
  354. /* Calculate how many bytes were already checked */
  355. ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
  356. len = ofs % c->wbuf_pagesize;
  357. if (likely(len))
  358. len = c->wbuf_pagesize - len;
  359. if (len >= tn->csize) {
  360. dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
  361. ref_offset(ref), tn->csize, ofs);
  362. goto adj_acc;
  363. }
  364. ofs += len;
  365. len = tn->csize - len;
  366. dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
  367. ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
  368. #ifndef __ECOS
  369. /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
  370. * adding and jffs2_flash_read_end() interface. */
  371. if (c->mtd->point) {
  372. err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
  373. if (!err && retlen < tn->csize) {
  374. JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
  375. c->mtd->unpoint(c->mtd, buffer, ofs, len);
  376. } else if (err)
  377. JFFS2_WARNING("MTD point failed: error code %d.\n", err);
  378. else
  379. pointed = 1; /* succefully pointed to device */
  380. }
  381. #endif
  382. if (!pointed) {
  383. buffer = kmalloc(len, GFP_KERNEL);
  384. if (unlikely(!buffer))
  385. return -ENOMEM;
  386. /* TODO: this is very frequent pattern, make it a separate
  387. * routine */
  388. err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
  389. if (err) {
  390. JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
  391. goto free_out;
  392. }
  393. if (retlen != len) {
  394. JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
  395. err = -EIO;
  396. goto free_out;
  397. }
  398. }
  399. /* Continue calculating CRC */
  400. crc = crc32(tn->partial_crc, buffer, len);
  401. if(!pointed)
  402. kfree(buffer);
  403. #ifndef __ECOS
  404. else
  405. c->mtd->unpoint(c->mtd, buffer, ofs, len);
  406. #endif
  407. if (crc != tn->data_crc) {
  408. JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
  409. ofs, tn->data_crc, crc);
  410. return 1;
  411. }
  412. adj_acc:
  413. jeb = &c->blocks[ref->flash_offset / c->sector_size];
  414. len = ref_totlen(c, jeb, ref);
  415. /*
  416. * Mark the node as having been checked and fix the
  417. * accounting accordingly.
  418. */
  419. spin_lock(&c->erase_completion_lock);
  420. jeb->used_size += len;
  421. jeb->unchecked_size -= len;
  422. c->used_size += len;
  423. c->unchecked_size -= len;
  424. spin_unlock(&c->erase_completion_lock);
  425. return 0;
  426. free_out:
  427. if(!pointed)
  428. kfree(buffer);
  429. #ifndef __ECOS
  430. else
  431. c->mtd->unpoint(c->mtd, buffer, ofs, len);
  432. #endif
  433. return err;
  434. }
  435. /*
  436. * Helper function for jffs2_add_older_frag_to_fragtree().
  437. *
  438. * Checks the node if we are in the checking stage.
  439. */
  440. static int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn)
  441. {
  442. int ret;
  443. BUG_ON(ref_obsolete(tn->fn->raw));
  444. /* We only check the data CRC of unchecked nodes */
  445. if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
  446. return 0;
  447. dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n",
  448. tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
  449. ret = check_node_data(c, tn);
  450. if (unlikely(ret < 0)) {
  451. JFFS2_ERROR("check_node_data() returned error: %d.\n",
  452. ret);
  453. } else if (unlikely(ret > 0)) {
  454. dbg_fragtree2("CRC error, mark it obsolete.\n");
  455. jffs2_mark_node_obsolete(c, tn->fn->raw);
  456. }
  457. return ret;
  458. }
  459. /*
  460. * Helper function for jffs2_add_older_frag_to_fragtree().
  461. *
  462. * Called when the new fragment that is being inserted
  463. * splits a hole fragment.
  464. */
  465. static int split_hole(struct jffs2_sb_info *c, struct rb_root *root,
  466. struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole)
  467. {
  468. dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n",
  469. newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size);
  470. if (hole->ofs == newfrag->ofs) {
  471. /*
  472. * Well, the new fragment actually starts at the same offset as
  473. * the hole.
  474. */
  475. if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
  476. /*
  477. * We replace the overlapped left part of the hole by
  478. * the new node.
  479. */
  480. dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n",
  481. newfrag->ofs, newfrag->ofs + newfrag->size);
  482. rb_replace_node(&hole->rb, &newfrag->rb, root);
  483. hole->ofs += newfrag->size;
  484. hole->size -= newfrag->size;
  485. /*
  486. * We know that 'hole' should be the right hand
  487. * fragment.
  488. */
  489. jffs2_fragtree_insert(hole, newfrag);
  490. rb_insert_color(&hole->rb, root);
  491. } else {
  492. /*
  493. * Ah, the new fragment is of the same size as the hole.
  494. * Relace the hole by it.
  495. */
  496. dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n",
  497. newfrag->ofs, newfrag->ofs + newfrag->size);
  498. rb_replace_node(&hole->rb, &newfrag->rb, root);
  499. jffs2_free_node_frag(hole);
  500. }
  501. } else {
  502. /* The new fragment lefts some hole space at the left */
  503. struct jffs2_node_frag * newfrag2 = NULL;
  504. if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
  505. /* The new frag also lefts some space at the right */
  506. newfrag2 = new_fragment(NULL, newfrag->ofs +
  507. newfrag->size, hole->ofs + hole->size
  508. - newfrag->ofs - newfrag->size);
  509. if (unlikely(!newfrag2)) {
  510. jffs2_free_node_frag(newfrag);
  511. return -ENOMEM;
  512. }
  513. }
  514. hole->size = newfrag->ofs - hole->ofs;
  515. dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n",
  516. hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size);
  517. jffs2_fragtree_insert(newfrag, hole);
  518. rb_insert_color(&newfrag->rb, root);
  519. if (newfrag2) {
  520. dbg_fragtree2("left the hole %#04x-%#04x at the right\n",
  521. newfrag2->ofs, newfrag2->ofs + newfrag2->size);
  522. jffs2_fragtree_insert(newfrag2, newfrag);
  523. rb_insert_color(&newfrag2->rb, root);
  524. }
  525. }
  526. return 0;
  527. }
  528. /*
  529. * This function is used when we build inode. It expects the nodes are passed
  530. * in the decreasing version order. The whole point of this is to improve the
  531. * inodes checking on NAND: we check the nodes' data CRC only when they are not
  532. * obsoleted. Previously, add_frag_to_fragtree() function was used and
  533. * nodes were passed to it in the increasing version ordes and CRCs of all
  534. * nodes were checked.
  535. *
  536. * Note: tn->fn->size shouldn't be zero.
  537. *
  538. * Returns 0 if the node was inserted
  539. * 1 if it wasn't inserted (since it is obsolete)
  540. * < 0 an if error occured
  541. */
  542. int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
  543. struct jffs2_tmp_dnode_info *tn)
  544. {
  545. struct jffs2_node_frag *this, *newfrag;
  546. uint32_t lastend;
  547. struct jffs2_full_dnode *fn = tn->fn;
  548. struct rb_root *root = &f->fragtree;
  549. uint32_t fn_size = fn->size, fn_ofs = fn->ofs;
  550. int err, checked = 0;
  551. int ref_flag;
  552. dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version);
  553. /* Skip all the nodes which are completed before this one starts */
  554. this = jffs2_lookup_node_frag(root, fn_ofs);
  555. if (this)
  556. dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole");
  557. if (this)
  558. lastend = this->ofs + this->size;
  559. else
  560. lastend = 0;
  561. /* Detect the preliminary type of node */
  562. if (fn->size >= PAGE_CACHE_SIZE)
  563. ref_flag = REF_PRISTINE;
  564. else
  565. ref_flag = REF_NORMAL;
  566. /* See if we ran off the end of the root */
  567. if (lastend <= fn_ofs) {
  568. /* We did */
  569. /*
  570. * We are going to insert the new node into the
  571. * fragment tree, so check it.
  572. */
  573. err = check_node(c, f, tn);
  574. if (err != 0)
  575. return err;
  576. fn->frags = 1;
  577. newfrag = new_fragment(fn, fn_ofs, fn_size);
  578. if (unlikely(!newfrag))
  579. return -ENOMEM;
  580. err = no_overlapping_node(c, root, newfrag, this, lastend);
  581. if (unlikely(err != 0)) {
  582. jffs2_free_node_frag(newfrag);
  583. return err;
  584. }
  585. goto out_ok;
  586. }
  587. fn->frags = 0;
  588. while (1) {
  589. /*
  590. * Here we have:
  591. * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs.
  592. *
  593. * Remember, 'this' has higher version, any non-hole node
  594. * which is already in the fragtree is newer then the newly
  595. * inserted.
  596. */
  597. if (!this->node) {
  598. /*
  599. * 'this' is the hole fragment, so at least the
  600. * beginning of the new fragment is valid.
  601. */
  602. /*
  603. * We are going to insert the new node into the
  604. * fragment tree, so check it.
  605. */
  606. if (!checked) {
  607. err = check_node(c, f, tn);
  608. if (unlikely(err != 0))
  609. return err;
  610. checked = 1;
  611. }
  612. if (this->ofs + this->size >= fn_ofs + fn_size) {
  613. /* We split the hole on two parts */
  614. fn->frags += 1;
  615. newfrag = new_fragment(fn, fn_ofs, fn_size);
  616. if (unlikely(!newfrag))
  617. return -ENOMEM;
  618. err = split_hole(c, root, newfrag, this);
  619. if (unlikely(err))
  620. return err;
  621. goto out_ok;
  622. }
  623. /*
  624. * The beginning of the new fragment is valid since it
  625. * overlaps the hole node.
  626. */
  627. ref_flag = REF_NORMAL;
  628. fn->frags += 1;
  629. newfrag = new_fragment(fn, fn_ofs,
  630. this->ofs + this->size - fn_ofs);
  631. if (unlikely(!newfrag))
  632. return -ENOMEM;
  633. if (fn_ofs == this->ofs) {
  634. /*
  635. * The new node starts at the same offset as
  636. * the hole and supersieds the hole.
  637. */
  638. dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n",
  639. fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
  640. rb_replace_node(&this->rb, &newfrag->rb, root);
  641. jffs2_free_node_frag(this);
  642. } else {
  643. /*
  644. * The hole becomes shorter as its right part
  645. * is supersieded by the new fragment.
  646. */
  647. dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n",
  648. this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size);
  649. dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs,
  650. fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
  651. this->size -= newfrag->size;
  652. jffs2_fragtree_insert(newfrag, this);
  653. rb_insert_color(&newfrag->rb, root);
  654. }
  655. fn_ofs += newfrag->size;
  656. fn_size -= newfrag->size;
  657. this = rb_entry(rb_next(&newfrag->rb),
  658. struct jffs2_node_frag, rb);
  659. dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
  660. this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
  661. }
  662. /*
  663. * 'This' node is not the hole so it obsoletes the new fragment
  664. * either fully or partially.
  665. */
  666. if (this->ofs + this->size >= fn_ofs + fn_size) {
  667. /* The new node is obsolete, drop it */
  668. if (fn->frags == 0) {
  669. dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size);
  670. ref_flag = REF_OBSOLETE;
  671. }
  672. goto out_ok;
  673. } else {
  674. struct jffs2_node_frag *new_this;
  675. /* 'This' node obsoletes the beginning of the new node */
  676. dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size);
  677. ref_flag = REF_NORMAL;
  678. fn_size -= this->ofs + this->size - fn_ofs;
  679. fn_ofs = this->ofs + this->size;
  680. dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size);
  681. new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb);
  682. if (!new_this) {
  683. /*
  684. * There is no next fragment. Add the rest of
  685. * the new node as the right-hand child.
  686. */
  687. if (!checked) {
  688. err = check_node(c, f, tn);
  689. if (unlikely(err != 0))
  690. return err;
  691. checked = 1;
  692. }
  693. fn->frags += 1;
  694. newfrag = new_fragment(fn, fn_ofs, fn_size);
  695. if (unlikely(!newfrag))
  696. return -ENOMEM;
  697. dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n",
  698. newfrag->ofs, newfrag->ofs + newfrag->size);
  699. rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
  700. rb_insert_color(&newfrag->rb, root);
  701. goto out_ok;
  702. } else {
  703. this = new_this;
  704. dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
  705. this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
  706. }
  707. }
  708. }
  709. out_ok:
  710. BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE);
  711. if (ref_flag == REF_OBSOLETE) {
  712. dbg_fragtree2("the node is obsolete now\n");
  713. /* jffs2_mark_node_obsolete() will adjust space accounting */
  714. jffs2_mark_node_obsolete(c, fn->raw);
  715. return 1;
  716. }
  717. dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE");
  718. /* Space accounting was adjusted at check_node_data() */
  719. spin_lock(&c->erase_completion_lock);
  720. fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag;
  721. spin_unlock(&c->erase_completion_lock);
  722. return 0;
  723. }
  724. void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
  725. {
  726. spin_lock(&c->inocache_lock);
  727. ic->state = state;
  728. wake_up(&c->inocache_wq);
  729. spin_unlock(&c->inocache_lock);
  730. }
  731. /* During mount, this needs no locking. During normal operation, its
  732. callers want to do other stuff while still holding the inocache_lock.
  733. Rather than introducing special case get_ino_cache functions or
  734. callbacks, we just let the caller do the locking itself. */
  735. struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
  736. {
  737. struct jffs2_inode_cache *ret;
  738. ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
  739. while (ret && ret->ino < ino) {
  740. ret = ret->next;
  741. }
  742. if (ret && ret->ino != ino)
  743. ret = NULL;
  744. return ret;
  745. }
  746. void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
  747. {
  748. struct jffs2_inode_cache **prev;
  749. spin_lock(&c->inocache_lock);
  750. if (!new->ino)
  751. new->ino = ++c->highest_ino;
  752. dbg_inocache("add %p (ino #%u)\n", new, new->ino);
  753. prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
  754. while ((*prev) && (*prev)->ino < new->ino) {
  755. prev = &(*prev)->next;
  756. }
  757. new->next = *prev;
  758. *prev = new;
  759. spin_unlock(&c->inocache_lock);
  760. }
  761. void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
  762. {
  763. struct jffs2_inode_cache **prev;
  764. #ifdef CONFIG_JFFS2_FS_XATTR
  765. BUG_ON(old->xref);
  766. #endif
  767. dbg_inocache("del %p (ino #%u)\n", old, old->ino);
  768. spin_lock(&c->inocache_lock);
  769. prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
  770. while ((*prev) && (*prev)->ino < old->ino) {
  771. prev = &(*prev)->next;
  772. }
  773. if ((*prev) == old) {
  774. *prev = old->next;
  775. }
  776. /* Free it now unless it's in READING or CLEARING state, which
  777. are the transitions upon read_inode() and clear_inode(). The
  778. rest of the time we know nobody else is looking at it, and
  779. if it's held by read_inode() or clear_inode() they'll free it
  780. for themselves. */
  781. if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
  782. jffs2_free_inode_cache(old);
  783. spin_unlock(&c->inocache_lock);
  784. }
  785. void jffs2_free_ino_caches(struct jffs2_sb_info *c)
  786. {
  787. int i;
  788. struct jffs2_inode_cache *this, *next;
  789. for (i=0; i<INOCACHE_HASHSIZE; i++) {
  790. this = c->inocache_list[i];
  791. while (this) {
  792. next = this->next;
  793. jffs2_xattr_free_inode(c, this);
  794. jffs2_free_inode_cache(this);
  795. this = next;
  796. }
  797. c->inocache_list[i] = NULL;
  798. }
  799. }
  800. void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
  801. {
  802. int i;
  803. struct jffs2_raw_node_ref *this, *next;
  804. for (i=0; i<c->nr_blocks; i++) {
  805. this = c->blocks[i].first_node;
  806. while (this) {
  807. if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
  808. next = this[REFS_PER_BLOCK].next_in_ino;
  809. else
  810. next = NULL;
  811. jffs2_free_refblock(this);
  812. this = next;
  813. }
  814. c->blocks[i].first_node = c->blocks[i].last_node = NULL;
  815. }
  816. }
  817. struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
  818. {
  819. /* The common case in lookup is that there will be a node
  820. which precisely matches. So we go looking for that first */
  821. struct rb_node *next;
  822. struct jffs2_node_frag *prev = NULL;
  823. struct jffs2_node_frag *frag = NULL;
  824. dbg_fragtree2("root %p, offset %d\n", fragtree, offset);
  825. next = fragtree->rb_node;
  826. while(next) {
  827. frag = rb_entry(next, struct jffs2_node_frag, rb);
  828. if (frag->ofs + frag->size <= offset) {
  829. /* Remember the closest smaller match on the way down */
  830. if (!prev || frag->ofs > prev->ofs)
  831. prev = frag;
  832. next = frag->rb.rb_right;
  833. } else if (frag->ofs > offset) {
  834. next = frag->rb.rb_left;
  835. } else {
  836. return frag;
  837. }
  838. }
  839. /* Exact match not found. Go back up looking at each parent,
  840. and return the closest smaller one */
  841. if (prev)
  842. dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n",
  843. prev->ofs, prev->ofs+prev->size);
  844. else
  845. dbg_fragtree2("returning NULL, empty fragtree\n");
  846. return prev;
  847. }
  848. /* Pass 'c' argument to indicate that nodes should be marked obsolete as
  849. they're killed. */
  850. void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
  851. {
  852. struct jffs2_node_frag *frag;
  853. struct jffs2_node_frag *parent;
  854. if (!root->rb_node)
  855. return;
  856. dbg_fragtree("killing\n");
  857. frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
  858. while(frag) {
  859. if (frag->rb.rb_left) {
  860. frag = frag_left(frag);
  861. continue;
  862. }
  863. if (frag->rb.rb_right) {
  864. frag = frag_right(frag);
  865. continue;
  866. }
  867. if (frag->node && !(--frag->node->frags)) {
  868. /* Not a hole, and it's the final remaining frag
  869. of this node. Free the node */
  870. if (c)
  871. jffs2_mark_node_obsolete(c, frag->node->raw);
  872. jffs2_free_full_dnode(frag->node);
  873. }
  874. parent = frag_parent(frag);
  875. if (parent) {
  876. if (frag_left(parent) == frag)
  877. parent->rb.rb_left = NULL;
  878. else
  879. parent->rb.rb_right = NULL;
  880. }
  881. jffs2_free_node_frag(frag);
  882. frag = parent;
  883. cond_resched();
  884. }
  885. }
  886. struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
  887. struct jffs2_eraseblock *jeb,
  888. uint32_t ofs, uint32_t len,
  889. struct jffs2_inode_cache *ic)
  890. {
  891. struct jffs2_raw_node_ref *ref;
  892. BUG_ON(!jeb->allocated_refs);
  893. jeb->allocated_refs--;
  894. ref = jeb->last_node;
  895. dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
  896. ref->next_in_ino);
  897. while (ref->flash_offset != REF_EMPTY_NODE) {
  898. if (ref->flash_offset == REF_LINK_NODE)
  899. ref = ref->next_in_ino;
  900. else
  901. ref++;
  902. }
  903. dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
  904. ref->flash_offset, ofs, ref->next_in_ino, len);
  905. ref->flash_offset = ofs;
  906. if (!jeb->first_node) {
  907. jeb->first_node = ref;
  908. BUG_ON(ref_offset(ref) != jeb->offset);
  909. } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
  910. uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
  911. JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
  912. ref, ref_offset(ref), ref_offset(ref)+len,
  913. ref_offset(jeb->last_node),
  914. ref_offset(jeb->last_node)+last_len);
  915. BUG();
  916. }
  917. jeb->last_node = ref;
  918. if (ic) {
  919. ref->next_in_ino = ic->nodes;
  920. ic->nodes = ref;
  921. } else {
  922. ref->next_in_ino = NULL;
  923. }
  924. switch(ref_flags(ref)) {
  925. case REF_UNCHECKED:
  926. c->unchecked_size += len;
  927. jeb->unchecked_size += len;
  928. break;
  929. case REF_NORMAL:
  930. case REF_PRISTINE:
  931. c->used_size += len;
  932. jeb->used_size += len;
  933. break;
  934. case REF_OBSOLETE:
  935. c->dirty_size += len;
  936. jeb->dirty_size += len;
  937. break;
  938. }
  939. c->free_size -= len;
  940. jeb->free_size -= len;
  941. #ifdef TEST_TOTLEN
  942. /* Set (and test) __totlen field... for now */
  943. ref->__totlen = len;
  944. ref_totlen(c, jeb, ref);
  945. #endif
  946. return ref;
  947. }
  948. /* No locking, no reservation of 'ref'. Do not use on a live file system */
  949. int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  950. uint32_t size)
  951. {
  952. if (!size)
  953. return 0;
  954. if (unlikely(size > jeb->free_size)) {
  955. printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
  956. size, jeb->free_size, jeb->wasted_size);
  957. BUG();
  958. }
  959. /* REF_EMPTY_NODE is !obsolete, so that works OK */
  960. if (jeb->last_node && ref_obsolete(jeb->last_node)) {
  961. #ifdef TEST_TOTLEN
  962. jeb->last_node->__totlen += size;
  963. #endif
  964. c->dirty_size += size;
  965. c->free_size -= size;
  966. jeb->dirty_size += size;
  967. jeb->free_size -= size;
  968. } else {
  969. uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size;
  970. ofs |= REF_OBSOLETE;
  971. jffs2_link_node_ref(c, jeb, ofs, size, NULL);
  972. }
  973. return 0;
  974. }
  975. /* Calculate totlen from surrounding nodes or eraseblock */
  976. static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
  977. struct jffs2_eraseblock *jeb,
  978. struct jffs2_raw_node_ref *ref)
  979. {
  980. uint32_t ref_end;
  981. struct jffs2_raw_node_ref *next_ref = ref_next(ref);
  982. if (next_ref)
  983. ref_end = ref_offset(next_ref);
  984. else {
  985. if (!jeb)
  986. jeb = &c->blocks[ref->flash_offset / c->sector_size];
  987. /* Last node in block. Use free_space */
  988. if (unlikely(ref != jeb->last_node)) {
  989. printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
  990. ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
  991. BUG();
  992. }
  993. ref_end = jeb->offset + c->sector_size - jeb->free_size;
  994. }
  995. return ref_end - ref_offset(ref);
  996. }
  997. uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
  998. struct jffs2_raw_node_ref *ref)
  999. {
  1000. uint32_t ret;
  1001. ret = __ref_totlen(c, jeb, ref);
  1002. #ifdef TEST_TOTLEN
  1003. if (unlikely(ret != ref->__totlen)) {
  1004. if (!jeb)
  1005. jeb = &c->blocks[ref->flash_offset / c->sector_size];
  1006. printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
  1007. ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
  1008. ret, ref->__totlen);
  1009. if (ref_next(ref)) {
  1010. printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)),
  1011. ref_offset(ref_next(ref))+ref->__totlen);
  1012. } else
  1013. printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
  1014. printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
  1015. #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
  1016. __jffs2_dbg_dump_node_refs_nolock(c, jeb);
  1017. #endif
  1018. WARN_ON(1);
  1019. ret = ref->__totlen;
  1020. }
  1021. #endif /* TEST_TOTLEN */
  1022. return ret;
  1023. }