readinode.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. * JFFS2 -- Journalling Flash File System, Version 2.
  3. *
  4. * Copyright (C) 2001-2003 Red Hat, Inc.
  5. *
  6. * Created by David Woodhouse <dwmw2@infradead.org>
  7. *
  8. * For licensing information, see the file 'LICENCE' in this directory.
  9. *
  10. * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/fs.h>
  17. #include <linux/crc32.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/compiler.h>
  21. #include "nodelist.h"
  22. /*
  23. * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
  24. * order of increasing version.
  25. */
  26. static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
  27. {
  28. struct rb_node **p = &list->rb_node;
  29. struct rb_node * parent = NULL;
  30. struct jffs2_tmp_dnode_info *this;
  31. while (*p) {
  32. parent = *p;
  33. this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
  34. /* There may actually be a collision here, but it doesn't
  35. actually matter. As long as the two nodes with the same
  36. version are together, it's all fine. */
  37. if (tn->version > this->version)
  38. p = &(*p)->rb_left;
  39. else
  40. p = &(*p)->rb_right;
  41. }
  42. rb_link_node(&tn->rb, parent, p);
  43. rb_insert_color(&tn->rb, list);
  44. }
  45. static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
  46. {
  47. struct rb_node *this;
  48. struct jffs2_tmp_dnode_info *tn;
  49. this = list->rb_node;
  50. /* Now at bottom of tree */
  51. while (this) {
  52. if (this->rb_left)
  53. this = this->rb_left;
  54. else if (this->rb_right)
  55. this = this->rb_right;
  56. else {
  57. tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
  58. jffs2_free_full_dnode(tn->fn);
  59. jffs2_free_tmp_dnode_info(tn);
  60. this = this->rb_parent;
  61. if (!this)
  62. break;
  63. if (this->rb_left == &tn->rb)
  64. this->rb_left = NULL;
  65. else if (this->rb_right == &tn->rb)
  66. this->rb_right = NULL;
  67. else BUG();
  68. }
  69. }
  70. list->rb_node = NULL;
  71. }
  72. static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
  73. {
  74. struct jffs2_full_dirent *next;
  75. while (fd) {
  76. next = fd->next;
  77. jffs2_free_full_dirent(fd);
  78. fd = next;
  79. }
  80. }
  81. /* Returns first valid node after 'ref'. May return 'ref' */
  82. static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
  83. {
  84. while (ref && ref->next_in_ino) {
  85. if (!ref_obsolete(ref))
  86. return ref;
  87. dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
  88. ref = ref->next_in_ino;
  89. }
  90. return NULL;
  91. }
  92. /*
  93. * Helper function for jffs2_get_inode_nodes().
  94. * It is called every time an directory entry node is found.
  95. *
  96. * Returns: 0 on succes;
  97. * 1 if the node should be marked obsolete;
  98. * negative error code on failure.
  99. */
  100. static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
  101. struct jffs2_raw_dirent *rd, size_t read, struct jffs2_full_dirent **fdp,
  102. uint32_t *latest_mctime, uint32_t *mctime_ver)
  103. {
  104. struct jffs2_full_dirent *fd;
  105. /* The direntry nodes are checked during the flash scanning */
  106. BUG_ON(ref_flags(ref) == REF_UNCHECKED);
  107. /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
  108. BUG_ON(ref_obsolete(ref));
  109. /* Sanity check */
  110. if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
  111. JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
  112. ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
  113. return 1;
  114. }
  115. fd = jffs2_alloc_full_dirent(rd->nsize + 1);
  116. if (unlikely(!fd))
  117. return -ENOMEM;
  118. fd->raw = ref;
  119. fd->version = je32_to_cpu(rd->version);
  120. fd->ino = je32_to_cpu(rd->ino);
  121. fd->type = rd->type;
  122. /* Pick out the mctime of the latest dirent */
  123. if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
  124. *mctime_ver = fd->version;
  125. *latest_mctime = je32_to_cpu(rd->mctime);
  126. }
  127. /*
  128. * Copy as much of the name as possible from the raw
  129. * dirent we've already read from the flash.
  130. */
  131. if (read > sizeof(*rd))
  132. memcpy(&fd->name[0], &rd->name[0],
  133. min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
  134. /* Do we need to copy any more of the name directly from the flash? */
  135. if (rd->nsize + sizeof(*rd) > read) {
  136. /* FIXME: point() */
  137. int err;
  138. int already = read - sizeof(*rd);
  139. err = jffs2_flash_read(c, (ref_offset(ref)) + read,
  140. rd->nsize - already, &read, &fd->name[already]);
  141. if (unlikely(read != rd->nsize - already) && likely(!err))
  142. return -EIO;
  143. if (unlikely(err)) {
  144. JFFS2_ERROR("read remainder of name: error %d\n", err);
  145. jffs2_free_full_dirent(fd);
  146. return -EIO;
  147. }
  148. }
  149. fd->nhash = full_name_hash(fd->name, rd->nsize);
  150. fd->next = NULL;
  151. fd->name[rd->nsize] = '\0';
  152. /*
  153. * Wheee. We now have a complete jffs2_full_dirent structure, with
  154. * the name in it and everything. Link it into the list
  155. */
  156. jffs2_add_fd_to_list(c, fd, fdp);
  157. return 0;
  158. }
  159. /*
  160. * Helper function for jffs2_get_inode_nodes().
  161. * It is called every time an inode node is found.
  162. *
  163. * Returns: 0 on succes;
  164. * 1 if the node should be marked obsolete;
  165. * negative error code on failure.
  166. */
  167. static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
  168. struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
  169. uint32_t *latest_mctime, uint32_t *mctime_ver)
  170. {
  171. struct jffs2_tmp_dnode_info *tn;
  172. uint32_t len, csize;
  173. int ret = 1;
  174. /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
  175. BUG_ON(ref_obsolete(ref));
  176. tn = jffs2_alloc_tmp_dnode_info();
  177. if (!tn) {
  178. JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
  179. return -ENOMEM;
  180. }
  181. tn->partial_crc = 0;
  182. csize = je32_to_cpu(rd->csize);
  183. /* If we've never checked the CRCs on this node, check them now */
  184. if (ref_flags(ref) == REF_UNCHECKED) {
  185. uint32_t crc;
  186. crc = crc32(0, rd, sizeof(*rd) - 8);
  187. if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
  188. JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
  189. ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
  190. goto free_out;
  191. }
  192. /* Sanity checks */
  193. if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
  194. unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
  195. JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
  196. jffs2_dbg_dump_node(c, ref_offset(ref));
  197. goto free_out;
  198. }
  199. if (jffs2_is_writebuffered(c) && csize != 0) {
  200. /* At this point we are supposed to check the data CRC
  201. * of our unchecked node. But thus far, we do not
  202. * know whether the node is valid or obsolete. To
  203. * figure this out, we need to walk all the nodes of
  204. * the inode and build the inode fragtree. We don't
  205. * want to spend time checking data of nodes which may
  206. * later be found to be obsolete. So we put off the full
  207. * data CRC checking until we have read all the inode
  208. * nodes and have started building the fragtree.
  209. *
  210. * The fragtree is being built starting with nodes
  211. * having the highest version number, so we'll be able
  212. * to detect whether a node is valid (i.e., it is not
  213. * overlapped by a node with higher version) or not.
  214. * And we'll be able to check only those nodes, which
  215. * are not obsolete.
  216. *
  217. * Of course, this optimization only makes sense in case
  218. * of NAND flashes (or other flashes whith
  219. * !jffs2_can_mark_obsolete()), since on NOR flashes
  220. * nodes are marked obsolete physically.
  221. *
  222. * Since NAND flashes (or other flashes with
  223. * jffs2_is_writebuffered(c)) are anyway read by
  224. * fractions of c->wbuf_pagesize, and we have just read
  225. * the node header, it is likely that the starting part
  226. * of the node data is also read when we read the
  227. * header. So we don't mind to check the CRC of the
  228. * starting part of the data of the node now, and check
  229. * the second part later (in jffs2_check_node_data()).
  230. * Of course, we will not need to re-read and re-check
  231. * the NAND page which we have just read. This is why we
  232. * read the whole NAND page at jffs2_get_inode_nodes(),
  233. * while we needed only the node header.
  234. */
  235. unsigned char *buf;
  236. /* 'buf' will point to the start of data */
  237. buf = (unsigned char *)rd + sizeof(*rd);
  238. /* len will be the read data length */
  239. len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
  240. tn->partial_crc = crc32(0, buf, len);
  241. dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
  242. /* If we actually calculated the whole data CRC
  243. * and it is wrong, drop the node. */
  244. if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
  245. JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
  246. ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
  247. goto free_out;
  248. }
  249. } else if (csize == 0) {
  250. /*
  251. * We checked the header CRC. If the node has no data, adjust
  252. * the space accounting now. For other nodes this will be done
  253. * later either when the node is marked obsolete or when its
  254. * data is checked.
  255. */
  256. struct jffs2_eraseblock *jeb;
  257. dbg_readinode("the node has no data.\n");
  258. jeb = &c->blocks[ref->flash_offset / c->sector_size];
  259. len = ref_totlen(c, jeb, ref);
  260. spin_lock(&c->erase_completion_lock);
  261. jeb->used_size += len;
  262. jeb->unchecked_size -= len;
  263. c->used_size += len;
  264. c->unchecked_size -= len;
  265. ref->flash_offset = ref_offset(ref) | REF_NORMAL;
  266. spin_unlock(&c->erase_completion_lock);
  267. }
  268. }
  269. tn->fn = jffs2_alloc_full_dnode();
  270. if (!tn->fn) {
  271. JFFS2_ERROR("alloc fn failed\n");
  272. ret = -ENOMEM;
  273. goto free_out;
  274. }
  275. tn->version = je32_to_cpu(rd->version);
  276. tn->fn->ofs = je32_to_cpu(rd->offset);
  277. tn->data_crc = je32_to_cpu(rd->data_crc);
  278. tn->csize = csize;
  279. tn->fn->raw = ref;
  280. /* There was a bug where we wrote hole nodes out with
  281. csize/dsize swapped. Deal with it */
  282. if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
  283. tn->fn->size = csize;
  284. else // normal case...
  285. tn->fn->size = je32_to_cpu(rd->dsize);
  286. dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
  287. ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
  288. jffs2_add_tn_to_tree(tn, tnp);
  289. return 0;
  290. free_out:
  291. jffs2_free_tmp_dnode_info(tn);
  292. return ret;
  293. }
  294. /*
  295. * Helper function for jffs2_get_inode_nodes().
  296. * It is called every time an unknown node is found.
  297. *
  298. * Returns: 0 on succes;
  299. * 1 if the node should be marked obsolete;
  300. * negative error code on failure.
  301. */
  302. static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
  303. {
  304. /* We don't mark unknown nodes as REF_UNCHECKED */
  305. BUG_ON(ref_flags(ref) == REF_UNCHECKED);
  306. un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
  307. if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
  308. /* Hmmm. This should have been caught at scan time. */
  309. JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
  310. jffs2_dbg_dump_node(c, ref_offset(ref));
  311. return 1;
  312. } else {
  313. switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
  314. case JFFS2_FEATURE_INCOMPAT:
  315. JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
  316. je16_to_cpu(un->nodetype), ref_offset(ref));
  317. /* EEP */
  318. BUG();
  319. break;
  320. case JFFS2_FEATURE_ROCOMPAT:
  321. JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
  322. je16_to_cpu(un->nodetype), ref_offset(ref));
  323. BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
  324. break;
  325. case JFFS2_FEATURE_RWCOMPAT_COPY:
  326. JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
  327. je16_to_cpu(un->nodetype), ref_offset(ref));
  328. break;
  329. case JFFS2_FEATURE_RWCOMPAT_DELETE:
  330. JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
  331. je16_to_cpu(un->nodetype), ref_offset(ref));
  332. return 1;
  333. }
  334. }
  335. return 0;
  336. }
  337. /*
  338. * Helper function for jffs2_get_inode_nodes().
  339. * The function detects whether more data should be read and reads it if yes.
  340. *
  341. * Returns: 0 on succes;
  342. * negative error code on failure.
  343. */
  344. static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
  345. int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
  346. {
  347. int right_len, err, len;
  348. size_t retlen;
  349. uint32_t offs;
  350. if (jffs2_is_writebuffered(c)) {
  351. right_len = c->wbuf_pagesize - (bufstart - buf);
  352. if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
  353. right_len += c->wbuf_pagesize;
  354. } else
  355. right_len = right_size;
  356. if (*rdlen == right_len)
  357. return 0;
  358. /* We need to read more data */
  359. offs = ref_offset(ref) + *rdlen;
  360. if (jffs2_is_writebuffered(c)) {
  361. bufstart = buf + c->wbuf_pagesize;
  362. len = c->wbuf_pagesize;
  363. } else {
  364. bufstart = buf + *rdlen;
  365. len = right_size - *rdlen;
  366. }
  367. dbg_readinode("read more %d bytes\n", len);
  368. err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
  369. if (err) {
  370. JFFS2_ERROR("can not read %d bytes from 0x%08x, "
  371. "error code: %d.\n", len, offs, err);
  372. return err;
  373. }
  374. if (retlen < len) {
  375. JFFS2_ERROR("short read at %#08x: %d instead of %d.\n",
  376. offs, retlen, len);
  377. return -EIO;
  378. }
  379. *rdlen = right_len;
  380. return 0;
  381. }
  382. /* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
  383. with this ino, returning the former in order of version */
  384. static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
  385. struct rb_root *tnp, struct jffs2_full_dirent **fdp,
  386. uint32_t *highest_version, uint32_t *latest_mctime,
  387. uint32_t *mctime_ver)
  388. {
  389. struct jffs2_raw_node_ref *ref, *valid_ref;
  390. struct rb_root ret_tn = RB_ROOT;
  391. struct jffs2_full_dirent *ret_fd = NULL;
  392. unsigned char *buf = NULL;
  393. union jffs2_node_union *node;
  394. size_t retlen;
  395. int len, err;
  396. *mctime_ver = 0;
  397. dbg_readinode("ino #%u\n", f->inocache->ino);
  398. if (jffs2_is_writebuffered(c)) {
  399. /*
  400. * If we have the write buffer, we assume the minimal I/O unit
  401. * is c->wbuf_pagesize. We implement some optimizations which in
  402. * this case and we need a temporary buffer of size =
  403. * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
  404. * Basically, we want to read not only the node header, but the
  405. * whole wbuf (NAND page in case of NAND) or 2, if the node
  406. * header overlaps the border between the 2 wbufs.
  407. */
  408. len = 2*c->wbuf_pagesize;
  409. } else {
  410. /*
  411. * When there is no write buffer, the size of the temporary
  412. * buffer is the size of the larges node header.
  413. */
  414. len = sizeof(union jffs2_node_union);
  415. }
  416. /* FIXME: in case of NOR and available ->point() this
  417. * needs to be fixed. */
  418. buf = kmalloc(len, GFP_KERNEL);
  419. if (!buf)
  420. return -ENOMEM;
  421. spin_lock(&c->erase_completion_lock);
  422. valid_ref = jffs2_first_valid_node(f->inocache->nodes);
  423. if (!valid_ref && f->inocache->ino != 1)
  424. JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
  425. while (valid_ref) {
  426. unsigned char *bufstart;
  427. /* We can hold a pointer to a non-obsolete node without the spinlock,
  428. but _obsolete_ nodes may disappear at any time, if the block
  429. they're in gets erased. So if we mark 'ref' obsolete while we're
  430. not holding the lock, it can go away immediately. For that reason,
  431. we find the next valid node first, before processing 'ref'.
  432. */
  433. ref = valid_ref;
  434. valid_ref = jffs2_first_valid_node(ref->next_in_ino);
  435. spin_unlock(&c->erase_completion_lock);
  436. cond_resched();
  437. /*
  438. * At this point we don't know the type of the node we're going
  439. * to read, so we do not know the size of its header. In order
  440. * to minimize the amount of flash IO we assume the node has
  441. * size = JFFS2_MIN_NODE_HEADER.
  442. */
  443. if (jffs2_is_writebuffered(c)) {
  444. /*
  445. * We treat 'buf' as 2 adjacent wbufs. We want to
  446. * adjust bufstart such as it points to the
  447. * beginning of the node within this wbuf.
  448. */
  449. bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
  450. /* We will read either one wbuf or 2 wbufs. */
  451. len = c->wbuf_pagesize - (bufstart - buf);
  452. if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
  453. /* The header spans the border of the first wbuf */
  454. len += c->wbuf_pagesize;
  455. }
  456. } else {
  457. bufstart = buf;
  458. len = JFFS2_MIN_NODE_HEADER;
  459. }
  460. dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
  461. /* FIXME: point() */
  462. err = jffs2_flash_read(c, ref_offset(ref), len,
  463. &retlen, bufstart);
  464. if (err) {
  465. JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
  466. goto free_out;
  467. }
  468. if (retlen < len) {
  469. JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
  470. err = -EIO;
  471. goto free_out;
  472. }
  473. node = (union jffs2_node_union *)bufstart;
  474. switch (je16_to_cpu(node->u.nodetype)) {
  475. case JFFS2_NODETYPE_DIRENT:
  476. if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
  477. err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
  478. if (unlikely(err))
  479. goto free_out;
  480. }
  481. err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
  482. if (err == 1) {
  483. jffs2_mark_node_obsolete(c, ref);
  484. break;
  485. } else if (unlikely(err))
  486. goto free_out;
  487. if (je32_to_cpu(node->d.version) > *highest_version)
  488. *highest_version = je32_to_cpu(node->d.version);
  489. break;
  490. case JFFS2_NODETYPE_INODE:
  491. if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
  492. err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
  493. if (unlikely(err))
  494. goto free_out;
  495. }
  496. err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
  497. if (err == 1) {
  498. jffs2_mark_node_obsolete(c, ref);
  499. break;
  500. } else if (unlikely(err))
  501. goto free_out;
  502. if (je32_to_cpu(node->i.version) > *highest_version)
  503. *highest_version = je32_to_cpu(node->i.version);
  504. break;
  505. default:
  506. if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
  507. err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
  508. if (unlikely(err))
  509. goto free_out;
  510. }
  511. err = read_unknown(c, ref, &node->u);
  512. if (err == 1) {
  513. jffs2_mark_node_obsolete(c, ref);
  514. break;
  515. } else if (unlikely(err))
  516. goto free_out;
  517. }
  518. spin_lock(&c->erase_completion_lock);
  519. }
  520. spin_unlock(&c->erase_completion_lock);
  521. *tnp = ret_tn;
  522. *fdp = ret_fd;
  523. kfree(buf);
  524. dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
  525. f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
  526. return 0;
  527. free_out:
  528. jffs2_free_tmp_dnode_info_list(&ret_tn);
  529. jffs2_free_full_dirent_list(ret_fd);
  530. kfree(buf);
  531. return err;
  532. }
  533. static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
  534. struct jffs2_inode_info *f,
  535. struct jffs2_raw_inode *latest_node)
  536. {
  537. struct jffs2_tmp_dnode_info *tn;
  538. struct rb_root tn_list;
  539. struct rb_node *rb, *repl_rb;
  540. struct jffs2_full_dirent *fd_list;
  541. struct jffs2_full_dnode *fn, *first_fn = NULL;
  542. uint32_t crc;
  543. uint32_t latest_mctime, mctime_ver;
  544. size_t retlen;
  545. int ret;
  546. dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
  547. /* Grab all nodes relevant to this ino */
  548. ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
  549. if (ret) {
  550. JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
  551. if (f->inocache->state == INO_STATE_READING)
  552. jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
  553. return ret;
  554. }
  555. f->dents = fd_list;
  556. rb = rb_first(&tn_list);
  557. while (rb) {
  558. cond_resched();
  559. tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
  560. fn = tn->fn;
  561. ret = 1;
  562. dbg_readinode("consider node ver %u, phys offset "
  563. "%#08x(%d), range %u-%u.\n", tn->version,
  564. ref_offset(fn->raw), ref_flags(fn->raw),
  565. fn->ofs, fn->ofs + fn->size);
  566. if (fn->size) {
  567. ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
  568. /* TODO: the error code isn't checked, check it */
  569. jffs2_dbg_fragtree_paranoia_check_nolock(f);
  570. BUG_ON(ret < 0);
  571. if (!first_fn && ret == 0)
  572. first_fn = fn;
  573. } else if (!first_fn) {
  574. first_fn = fn;
  575. f->metadata = fn;
  576. ret = 0; /* Prevent freeing the metadata update node */
  577. } else
  578. jffs2_mark_node_obsolete(c, fn->raw);
  579. BUG_ON(rb->rb_left);
  580. if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
  581. /* We were then left-hand child of our parent. We need
  582. * to move our own right-hand child into our place. */
  583. repl_rb = rb->rb_right;
  584. if (repl_rb)
  585. repl_rb->rb_parent = rb->rb_parent;
  586. } else
  587. repl_rb = NULL;
  588. rb = rb_next(rb);
  589. /* Remove the spent tn from the tree; don't bother rebalancing
  590. * but put our right-hand child in our own place. */
  591. if (tn->rb.rb_parent) {
  592. if (tn->rb.rb_parent->rb_left == &tn->rb)
  593. tn->rb.rb_parent->rb_left = repl_rb;
  594. else if (tn->rb.rb_parent->rb_right == &tn->rb)
  595. tn->rb.rb_parent->rb_right = repl_rb;
  596. else BUG();
  597. } else if (tn->rb.rb_right)
  598. tn->rb.rb_right->rb_parent = NULL;
  599. jffs2_free_tmp_dnode_info(tn);
  600. if (ret) {
  601. dbg_readinode("delete dnode %u-%u.\n",
  602. fn->ofs, fn->ofs + fn->size);
  603. jffs2_free_full_dnode(fn);
  604. }
  605. }
  606. jffs2_dbg_fragtree_paranoia_check_nolock(f);
  607. BUG_ON(first_fn && ref_obsolete(first_fn->raw));
  608. fn = first_fn;
  609. if (unlikely(!first_fn)) {
  610. /* No data nodes for this inode. */
  611. if (f->inocache->ino != 1) {
  612. JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
  613. if (!fd_list) {
  614. if (f->inocache->state == INO_STATE_READING)
  615. jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
  616. return -EIO;
  617. }
  618. JFFS2_NOTICE("but it has children so we fake some modes for it\n");
  619. }
  620. latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
  621. latest_node->version = cpu_to_je32(0);
  622. latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
  623. latest_node->isize = cpu_to_je32(0);
  624. latest_node->gid = cpu_to_je16(0);
  625. latest_node->uid = cpu_to_je16(0);
  626. if (f->inocache->state == INO_STATE_READING)
  627. jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
  628. return 0;
  629. }
  630. ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
  631. if (ret || retlen != sizeof(*latest_node)) {
  632. JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
  633. ret, retlen, sizeof(*latest_node));
  634. /* FIXME: If this fails, there seems to be a memory leak. Find it. */
  635. up(&f->sem);
  636. jffs2_do_clear_inode(c, f);
  637. return ret?ret:-EIO;
  638. }
  639. crc = crc32(0, latest_node, sizeof(*latest_node)-8);
  640. if (crc != je32_to_cpu(latest_node->node_crc)) {
  641. JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
  642. f->inocache->ino, ref_offset(fn->raw));
  643. up(&f->sem);
  644. jffs2_do_clear_inode(c, f);
  645. return -EIO;
  646. }
  647. switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
  648. case S_IFDIR:
  649. if (mctime_ver > je32_to_cpu(latest_node->version)) {
  650. /* The times in the latest_node are actually older than
  651. mctime in the latest dirent. Cheat. */
  652. latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
  653. }
  654. break;
  655. case S_IFREG:
  656. /* If it was a regular file, truncate it to the latest node's isize */
  657. jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
  658. break;
  659. case S_IFLNK:
  660. /* Hack to work around broken isize in old symlink code.
  661. Remove this when dwmw2 comes to his senses and stops
  662. symlinks from being an entirely gratuitous special
  663. case. */
  664. if (!je32_to_cpu(latest_node->isize))
  665. latest_node->isize = latest_node->dsize;
  666. if (f->inocache->state != INO_STATE_CHECKING) {
  667. /* Symlink's inode data is the target path. Read it and
  668. * keep in RAM to facilitate quick follow symlink
  669. * operation. */
  670. f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
  671. if (!f->target) {
  672. JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
  673. up(&f->sem);
  674. jffs2_do_clear_inode(c, f);
  675. return -ENOMEM;
  676. }
  677. ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
  678. je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
  679. if (ret || retlen != je32_to_cpu(latest_node->csize)) {
  680. if (retlen != je32_to_cpu(latest_node->csize))
  681. ret = -EIO;
  682. kfree(f->target);
  683. f->target = NULL;
  684. up(&f->sem);
  685. jffs2_do_clear_inode(c, f);
  686. return -ret;
  687. }
  688. f->target[je32_to_cpu(latest_node->csize)] = '\0';
  689. dbg_readinode("symlink's target '%s' cached\n", f->target);
  690. }
  691. /* fall through... */
  692. case S_IFBLK:
  693. case S_IFCHR:
  694. /* Certain inode types should have only one data node, and it's
  695. kept as the metadata node */
  696. if (f->metadata) {
  697. JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
  698. f->inocache->ino, jemode_to_cpu(latest_node->mode));
  699. up(&f->sem);
  700. jffs2_do_clear_inode(c, f);
  701. return -EIO;
  702. }
  703. if (!frag_first(&f->fragtree)) {
  704. JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
  705. f->inocache->ino, jemode_to_cpu(latest_node->mode));
  706. up(&f->sem);
  707. jffs2_do_clear_inode(c, f);
  708. return -EIO;
  709. }
  710. /* ASSERT: f->fraglist != NULL */
  711. if (frag_next(frag_first(&f->fragtree))) {
  712. JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
  713. f->inocache->ino, jemode_to_cpu(latest_node->mode));
  714. /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
  715. up(&f->sem);
  716. jffs2_do_clear_inode(c, f);
  717. return -EIO;
  718. }
  719. /* OK. We're happy */
  720. f->metadata = frag_first(&f->fragtree)->node;
  721. jffs2_free_node_frag(frag_first(&f->fragtree));
  722. f->fragtree = RB_ROOT;
  723. break;
  724. }
  725. if (f->inocache->state == INO_STATE_READING)
  726. jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
  727. return 0;
  728. }
  729. /* Scan the list of all nodes present for this ino, build map of versions, etc. */
  730. int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
  731. uint32_t ino, struct jffs2_raw_inode *latest_node)
  732. {
  733. dbg_readinode("read inode #%u\n", ino);
  734. retry_inocache:
  735. spin_lock(&c->inocache_lock);
  736. f->inocache = jffs2_get_ino_cache(c, ino);
  737. if (f->inocache) {
  738. /* Check its state. We may need to wait before we can use it */
  739. switch(f->inocache->state) {
  740. case INO_STATE_UNCHECKED:
  741. case INO_STATE_CHECKEDABSENT:
  742. f->inocache->state = INO_STATE_READING;
  743. break;
  744. case INO_STATE_CHECKING:
  745. case INO_STATE_GC:
  746. /* If it's in either of these states, we need
  747. to wait for whoever's got it to finish and
  748. put it back. */
  749. dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
  750. sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
  751. goto retry_inocache;
  752. case INO_STATE_READING:
  753. case INO_STATE_PRESENT:
  754. /* Eep. This should never happen. It can
  755. happen if Linux calls read_inode() again
  756. before clear_inode() has finished though. */
  757. JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
  758. /* Fail. That's probably better than allowing it to succeed */
  759. f->inocache = NULL;
  760. break;
  761. default:
  762. BUG();
  763. }
  764. }
  765. spin_unlock(&c->inocache_lock);
  766. if (!f->inocache && ino == 1) {
  767. /* Special case - no root inode on medium */
  768. f->inocache = jffs2_alloc_inode_cache();
  769. if (!f->inocache) {
  770. JFFS2_ERROR("cannot allocate inocache for root inode\n");
  771. return -ENOMEM;
  772. }
  773. dbg_readinode("creating inocache for root inode\n");
  774. memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
  775. f->inocache->ino = f->inocache->nlink = 1;
  776. f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
  777. f->inocache->state = INO_STATE_READING;
  778. jffs2_add_ino_cache(c, f->inocache);
  779. }
  780. if (!f->inocache) {
  781. JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
  782. return -ENOENT;
  783. }
  784. return jffs2_do_read_inode_internal(c, f, latest_node);
  785. }
  786. int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
  787. {
  788. struct jffs2_raw_inode n;
  789. struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
  790. int ret;
  791. if (!f)
  792. return -ENOMEM;
  793. memset(f, 0, sizeof(*f));
  794. init_MUTEX_LOCKED(&f->sem);
  795. f->inocache = ic;
  796. ret = jffs2_do_read_inode_internal(c, f, &n);
  797. if (!ret) {
  798. up(&f->sem);
  799. jffs2_do_clear_inode(c, f);
  800. }
  801. kfree (f);
  802. return ret;
  803. }
  804. void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
  805. {
  806. struct jffs2_full_dirent *fd, *fds;
  807. int deleted;
  808. down(&f->sem);
  809. deleted = f->inocache && !f->inocache->nlink;
  810. if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
  811. jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
  812. if (f->metadata) {
  813. if (deleted)
  814. jffs2_mark_node_obsolete(c, f->metadata->raw);
  815. jffs2_free_full_dnode(f->metadata);
  816. }
  817. jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
  818. if (f->target) {
  819. kfree(f->target);
  820. f->target = NULL;
  821. }
  822. fds = f->dents;
  823. while(fds) {
  824. fd = fds;
  825. fds = fd->next;
  826. jffs2_free_full_dirent(fd);
  827. }
  828. if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
  829. jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
  830. if (f->inocache->nodes == (void *)f->inocache)
  831. jffs2_del_ino_cache(c, f->inocache);
  832. }
  833. up(&f->sem);
  834. }