debug.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Artem Bityutskiy (Битюцкий Артём)
  20. * Adrian Hunter
  21. */
  22. /*
  23. * This file implements most of the debugging stuff which is compiled in only
  24. * when it is enabled. But some debugging check functions are implemented in
  25. * corresponding subsystem, just because they are closely related and utilize
  26. * various local functions of those subsystems.
  27. */
  28. #define UBIFS_DBG_PRESERVE_UBI
  29. #include "ubifs.h"
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #ifdef CONFIG_UBIFS_FS_DEBUG
  33. DEFINE_SPINLOCK(dbg_lock);
  34. static char dbg_key_buf0[128];
  35. static char dbg_key_buf1[128];
  36. unsigned int ubifs_msg_flags = UBIFS_MSG_FLAGS_DEFAULT;
  37. unsigned int ubifs_chk_flags = UBIFS_CHK_FLAGS_DEFAULT;
  38. unsigned int ubifs_tst_flags;
  39. module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
  40. module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
  41. module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
  42. MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
  43. MODULE_PARM_DESC(debug_chks, "Debug check flags");
  44. MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
  45. static const char *get_key_fmt(int fmt)
  46. {
  47. switch (fmt) {
  48. case UBIFS_SIMPLE_KEY_FMT:
  49. return "simple";
  50. default:
  51. return "unknown/invalid format";
  52. }
  53. }
  54. static const char *get_key_hash(int hash)
  55. {
  56. switch (hash) {
  57. case UBIFS_KEY_HASH_R5:
  58. return "R5";
  59. case UBIFS_KEY_HASH_TEST:
  60. return "test";
  61. default:
  62. return "unknown/invalid name hash";
  63. }
  64. }
  65. static const char *get_key_type(int type)
  66. {
  67. switch (type) {
  68. case UBIFS_INO_KEY:
  69. return "inode";
  70. case UBIFS_DENT_KEY:
  71. return "direntry";
  72. case UBIFS_XENT_KEY:
  73. return "xentry";
  74. case UBIFS_DATA_KEY:
  75. return "data";
  76. case UBIFS_TRUN_KEY:
  77. return "truncate";
  78. default:
  79. return "unknown/invalid key";
  80. }
  81. }
  82. static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
  83. char *buffer)
  84. {
  85. char *p = buffer;
  86. int type = key_type(c, key);
  87. if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
  88. switch (type) {
  89. case UBIFS_INO_KEY:
  90. sprintf(p, "(%lu, %s)", key_inum(c, key),
  91. get_key_type(type));
  92. break;
  93. case UBIFS_DENT_KEY:
  94. case UBIFS_XENT_KEY:
  95. sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key),
  96. get_key_type(type), key_hash(c, key));
  97. break;
  98. case UBIFS_DATA_KEY:
  99. sprintf(p, "(%lu, %s, %u)", key_inum(c, key),
  100. get_key_type(type), key_block(c, key));
  101. break;
  102. case UBIFS_TRUN_KEY:
  103. sprintf(p, "(%lu, %s)",
  104. key_inum(c, key), get_key_type(type));
  105. break;
  106. default:
  107. sprintf(p, "(bad key type: %#08x, %#08x)",
  108. key->u32[0], key->u32[1]);
  109. }
  110. } else
  111. sprintf(p, "bad key format %d", c->key_fmt);
  112. }
  113. const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
  114. {
  115. /* dbg_lock must be held */
  116. sprintf_key(c, key, dbg_key_buf0);
  117. return dbg_key_buf0;
  118. }
  119. const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
  120. {
  121. /* dbg_lock must be held */
  122. sprintf_key(c, key, dbg_key_buf1);
  123. return dbg_key_buf1;
  124. }
  125. const char *dbg_ntype(int type)
  126. {
  127. switch (type) {
  128. case UBIFS_PAD_NODE:
  129. return "padding node";
  130. case UBIFS_SB_NODE:
  131. return "superblock node";
  132. case UBIFS_MST_NODE:
  133. return "master node";
  134. case UBIFS_REF_NODE:
  135. return "reference node";
  136. case UBIFS_INO_NODE:
  137. return "inode node";
  138. case UBIFS_DENT_NODE:
  139. return "direntry node";
  140. case UBIFS_XENT_NODE:
  141. return "xentry node";
  142. case UBIFS_DATA_NODE:
  143. return "data node";
  144. case UBIFS_TRUN_NODE:
  145. return "truncate node";
  146. case UBIFS_IDX_NODE:
  147. return "indexing node";
  148. case UBIFS_CS_NODE:
  149. return "commit start node";
  150. case UBIFS_ORPH_NODE:
  151. return "orphan node";
  152. default:
  153. return "unknown node";
  154. }
  155. }
  156. static const char *dbg_gtype(int type)
  157. {
  158. switch (type) {
  159. case UBIFS_NO_NODE_GROUP:
  160. return "no node group";
  161. case UBIFS_IN_NODE_GROUP:
  162. return "in node group";
  163. case UBIFS_LAST_OF_NODE_GROUP:
  164. return "last of node group";
  165. default:
  166. return "unknown";
  167. }
  168. }
  169. const char *dbg_cstate(int cmt_state)
  170. {
  171. switch (cmt_state) {
  172. case COMMIT_RESTING:
  173. return "commit resting";
  174. case COMMIT_BACKGROUND:
  175. return "background commit requested";
  176. case COMMIT_REQUIRED:
  177. return "commit required";
  178. case COMMIT_RUNNING_BACKGROUND:
  179. return "BACKGROUND commit running";
  180. case COMMIT_RUNNING_REQUIRED:
  181. return "commit running and required";
  182. case COMMIT_BROKEN:
  183. return "broken commit";
  184. default:
  185. return "unknown commit state";
  186. }
  187. }
  188. static void dump_ch(const struct ubifs_ch *ch)
  189. {
  190. printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
  191. printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc));
  192. printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type,
  193. dbg_ntype(ch->node_type));
  194. printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type,
  195. dbg_gtype(ch->group_type));
  196. printk(KERN_DEBUG "\tsqnum %llu\n",
  197. (unsigned long long)le64_to_cpu(ch->sqnum));
  198. printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
  199. }
  200. void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
  201. {
  202. const struct ubifs_inode *ui = ubifs_inode(inode);
  203. printk(KERN_DEBUG "Dump in-memory inode:");
  204. printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
  205. printk(KERN_DEBUG "\tsize %llu\n",
  206. (unsigned long long)i_size_read(inode));
  207. printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink);
  208. printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid);
  209. printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid);
  210. printk(KERN_DEBUG "\tatime %u.%u\n",
  211. (unsigned int)inode->i_atime.tv_sec,
  212. (unsigned int)inode->i_atime.tv_nsec);
  213. printk(KERN_DEBUG "\tmtime %u.%u\n",
  214. (unsigned int)inode->i_mtime.tv_sec,
  215. (unsigned int)inode->i_mtime.tv_nsec);
  216. printk(KERN_DEBUG "\tctime %u.%u\n",
  217. (unsigned int)inode->i_ctime.tv_sec,
  218. (unsigned int)inode->i_ctime.tv_nsec);
  219. printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum);
  220. printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size);
  221. printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt);
  222. printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names);
  223. printk(KERN_DEBUG "\tdirty %u\n", ui->dirty);
  224. printk(KERN_DEBUG "\txattr %u\n", ui->xattr);
  225. printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr);
  226. printk(KERN_DEBUG "\tsynced_i_size %llu\n",
  227. (unsigned long long)ui->synced_i_size);
  228. printk(KERN_DEBUG "\tui_size %llu\n",
  229. (unsigned long long)ui->ui_size);
  230. printk(KERN_DEBUG "\tflags %d\n", ui->flags);
  231. printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type);
  232. printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
  233. printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
  234. printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
  235. }
  236. void dbg_dump_node(const struct ubifs_info *c, const void *node)
  237. {
  238. int i, n;
  239. union ubifs_key key;
  240. const struct ubifs_ch *ch = node;
  241. if (dbg_failure_mode)
  242. return;
  243. /* If the magic is incorrect, just hexdump the first bytes */
  244. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
  245. printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
  246. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  247. (void *)node, UBIFS_CH_SZ, 1);
  248. return;
  249. }
  250. spin_lock(&dbg_lock);
  251. dump_ch(node);
  252. switch (ch->node_type) {
  253. case UBIFS_PAD_NODE:
  254. {
  255. const struct ubifs_pad_node *pad = node;
  256. printk(KERN_DEBUG "\tpad_len %u\n",
  257. le32_to_cpu(pad->pad_len));
  258. break;
  259. }
  260. case UBIFS_SB_NODE:
  261. {
  262. const struct ubifs_sb_node *sup = node;
  263. unsigned int sup_flags = le32_to_cpu(sup->flags);
  264. printk(KERN_DEBUG "\tkey_hash %d (%s)\n",
  265. (int)sup->key_hash, get_key_hash(sup->key_hash));
  266. printk(KERN_DEBUG "\tkey_fmt %d (%s)\n",
  267. (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
  268. printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
  269. printk(KERN_DEBUG "\t big_lpt %u\n",
  270. !!(sup_flags & UBIFS_FLG_BIGLPT));
  271. printk(KERN_DEBUG "\tmin_io_size %u\n",
  272. le32_to_cpu(sup->min_io_size));
  273. printk(KERN_DEBUG "\tleb_size %u\n",
  274. le32_to_cpu(sup->leb_size));
  275. printk(KERN_DEBUG "\tleb_cnt %u\n",
  276. le32_to_cpu(sup->leb_cnt));
  277. printk(KERN_DEBUG "\tmax_leb_cnt %u\n",
  278. le32_to_cpu(sup->max_leb_cnt));
  279. printk(KERN_DEBUG "\tmax_bud_bytes %llu\n",
  280. (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
  281. printk(KERN_DEBUG "\tlog_lebs %u\n",
  282. le32_to_cpu(sup->log_lebs));
  283. printk(KERN_DEBUG "\tlpt_lebs %u\n",
  284. le32_to_cpu(sup->lpt_lebs));
  285. printk(KERN_DEBUG "\torph_lebs %u\n",
  286. le32_to_cpu(sup->orph_lebs));
  287. printk(KERN_DEBUG "\tjhead_cnt %u\n",
  288. le32_to_cpu(sup->jhead_cnt));
  289. printk(KERN_DEBUG "\tfanout %u\n",
  290. le32_to_cpu(sup->fanout));
  291. printk(KERN_DEBUG "\tlsave_cnt %u\n",
  292. le32_to_cpu(sup->lsave_cnt));
  293. printk(KERN_DEBUG "\tdefault_compr %u\n",
  294. (int)le16_to_cpu(sup->default_compr));
  295. printk(KERN_DEBUG "\trp_size %llu\n",
  296. (unsigned long long)le64_to_cpu(sup->rp_size));
  297. printk(KERN_DEBUG "\trp_uid %u\n",
  298. le32_to_cpu(sup->rp_uid));
  299. printk(KERN_DEBUG "\trp_gid %u\n",
  300. le32_to_cpu(sup->rp_gid));
  301. printk(KERN_DEBUG "\tfmt_version %u\n",
  302. le32_to_cpu(sup->fmt_version));
  303. printk(KERN_DEBUG "\ttime_gran %u\n",
  304. le32_to_cpu(sup->time_gran));
  305. printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X"
  306. "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
  307. sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3],
  308. sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7],
  309. sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11],
  310. sup->uuid[12], sup->uuid[13], sup->uuid[14],
  311. sup->uuid[15]);
  312. break;
  313. }
  314. case UBIFS_MST_NODE:
  315. {
  316. const struct ubifs_mst_node *mst = node;
  317. printk(KERN_DEBUG "\thighest_inum %llu\n",
  318. (unsigned long long)le64_to_cpu(mst->highest_inum));
  319. printk(KERN_DEBUG "\tcommit number %llu\n",
  320. (unsigned long long)le64_to_cpu(mst->cmt_no));
  321. printk(KERN_DEBUG "\tflags %#x\n",
  322. le32_to_cpu(mst->flags));
  323. printk(KERN_DEBUG "\tlog_lnum %u\n",
  324. le32_to_cpu(mst->log_lnum));
  325. printk(KERN_DEBUG "\troot_lnum %u\n",
  326. le32_to_cpu(mst->root_lnum));
  327. printk(KERN_DEBUG "\troot_offs %u\n",
  328. le32_to_cpu(mst->root_offs));
  329. printk(KERN_DEBUG "\troot_len %u\n",
  330. le32_to_cpu(mst->root_len));
  331. printk(KERN_DEBUG "\tgc_lnum %u\n",
  332. le32_to_cpu(mst->gc_lnum));
  333. printk(KERN_DEBUG "\tihead_lnum %u\n",
  334. le32_to_cpu(mst->ihead_lnum));
  335. printk(KERN_DEBUG "\tihead_offs %u\n",
  336. le32_to_cpu(mst->ihead_offs));
  337. printk(KERN_DEBUG "\tindex_size %u\n",
  338. le32_to_cpu(mst->index_size));
  339. printk(KERN_DEBUG "\tlpt_lnum %u\n",
  340. le32_to_cpu(mst->lpt_lnum));
  341. printk(KERN_DEBUG "\tlpt_offs %u\n",
  342. le32_to_cpu(mst->lpt_offs));
  343. printk(KERN_DEBUG "\tnhead_lnum %u\n",
  344. le32_to_cpu(mst->nhead_lnum));
  345. printk(KERN_DEBUG "\tnhead_offs %u\n",
  346. le32_to_cpu(mst->nhead_offs));
  347. printk(KERN_DEBUG "\tltab_lnum %u\n",
  348. le32_to_cpu(mst->ltab_lnum));
  349. printk(KERN_DEBUG "\tltab_offs %u\n",
  350. le32_to_cpu(mst->ltab_offs));
  351. printk(KERN_DEBUG "\tlsave_lnum %u\n",
  352. le32_to_cpu(mst->lsave_lnum));
  353. printk(KERN_DEBUG "\tlsave_offs %u\n",
  354. le32_to_cpu(mst->lsave_offs));
  355. printk(KERN_DEBUG "\tlscan_lnum %u\n",
  356. le32_to_cpu(mst->lscan_lnum));
  357. printk(KERN_DEBUG "\tleb_cnt %u\n",
  358. le32_to_cpu(mst->leb_cnt));
  359. printk(KERN_DEBUG "\tempty_lebs %u\n",
  360. le32_to_cpu(mst->empty_lebs));
  361. printk(KERN_DEBUG "\tidx_lebs %u\n",
  362. le32_to_cpu(mst->idx_lebs));
  363. printk(KERN_DEBUG "\ttotal_free %llu\n",
  364. (unsigned long long)le64_to_cpu(mst->total_free));
  365. printk(KERN_DEBUG "\ttotal_dirty %llu\n",
  366. (unsigned long long)le64_to_cpu(mst->total_dirty));
  367. printk(KERN_DEBUG "\ttotal_used %llu\n",
  368. (unsigned long long)le64_to_cpu(mst->total_used));
  369. printk(KERN_DEBUG "\ttotal_dead %llu\n",
  370. (unsigned long long)le64_to_cpu(mst->total_dead));
  371. printk(KERN_DEBUG "\ttotal_dark %llu\n",
  372. (unsigned long long)le64_to_cpu(mst->total_dark));
  373. break;
  374. }
  375. case UBIFS_REF_NODE:
  376. {
  377. const struct ubifs_ref_node *ref = node;
  378. printk(KERN_DEBUG "\tlnum %u\n",
  379. le32_to_cpu(ref->lnum));
  380. printk(KERN_DEBUG "\toffs %u\n",
  381. le32_to_cpu(ref->offs));
  382. printk(KERN_DEBUG "\tjhead %u\n",
  383. le32_to_cpu(ref->jhead));
  384. break;
  385. }
  386. case UBIFS_INO_NODE:
  387. {
  388. const struct ubifs_ino_node *ino = node;
  389. key_read(c, &ino->key, &key);
  390. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  391. printk(KERN_DEBUG "\tcreat_sqnum %llu\n",
  392. (unsigned long long)le64_to_cpu(ino->creat_sqnum));
  393. printk(KERN_DEBUG "\tsize %llu\n",
  394. (unsigned long long)le64_to_cpu(ino->size));
  395. printk(KERN_DEBUG "\tnlink %u\n",
  396. le32_to_cpu(ino->nlink));
  397. printk(KERN_DEBUG "\tatime %lld.%u\n",
  398. (long long)le64_to_cpu(ino->atime_sec),
  399. le32_to_cpu(ino->atime_nsec));
  400. printk(KERN_DEBUG "\tmtime %lld.%u\n",
  401. (long long)le64_to_cpu(ino->mtime_sec),
  402. le32_to_cpu(ino->mtime_nsec));
  403. printk(KERN_DEBUG "\tctime %lld.%u\n",
  404. (long long)le64_to_cpu(ino->ctime_sec),
  405. le32_to_cpu(ino->ctime_nsec));
  406. printk(KERN_DEBUG "\tuid %u\n",
  407. le32_to_cpu(ino->uid));
  408. printk(KERN_DEBUG "\tgid %u\n",
  409. le32_to_cpu(ino->gid));
  410. printk(KERN_DEBUG "\tmode %u\n",
  411. le32_to_cpu(ino->mode));
  412. printk(KERN_DEBUG "\tflags %#x\n",
  413. le32_to_cpu(ino->flags));
  414. printk(KERN_DEBUG "\txattr_cnt %u\n",
  415. le32_to_cpu(ino->xattr_cnt));
  416. printk(KERN_DEBUG "\txattr_size %u\n",
  417. le32_to_cpu(ino->xattr_size));
  418. printk(KERN_DEBUG "\txattr_names %u\n",
  419. le32_to_cpu(ino->xattr_names));
  420. printk(KERN_DEBUG "\tcompr_type %#x\n",
  421. (int)le16_to_cpu(ino->compr_type));
  422. printk(KERN_DEBUG "\tdata len %u\n",
  423. le32_to_cpu(ino->data_len));
  424. break;
  425. }
  426. case UBIFS_DENT_NODE:
  427. case UBIFS_XENT_NODE:
  428. {
  429. const struct ubifs_dent_node *dent = node;
  430. int nlen = le16_to_cpu(dent->nlen);
  431. key_read(c, &dent->key, &key);
  432. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  433. printk(KERN_DEBUG "\tinum %llu\n",
  434. (unsigned long long)le64_to_cpu(dent->inum));
  435. printk(KERN_DEBUG "\ttype %d\n", (int)dent->type);
  436. printk(KERN_DEBUG "\tnlen %d\n", nlen);
  437. printk(KERN_DEBUG "\tname ");
  438. if (nlen > UBIFS_MAX_NLEN)
  439. printk(KERN_DEBUG "(bad name length, not printing, "
  440. "bad or corrupted node)");
  441. else {
  442. for (i = 0; i < nlen && dent->name[i]; i++)
  443. printk("%c", dent->name[i]);
  444. }
  445. printk("\n");
  446. break;
  447. }
  448. case UBIFS_DATA_NODE:
  449. {
  450. const struct ubifs_data_node *dn = node;
  451. int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
  452. key_read(c, &dn->key, &key);
  453. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  454. printk(KERN_DEBUG "\tsize %u\n",
  455. le32_to_cpu(dn->size));
  456. printk(KERN_DEBUG "\tcompr_typ %d\n",
  457. (int)le16_to_cpu(dn->compr_type));
  458. printk(KERN_DEBUG "\tdata size %d\n",
  459. dlen);
  460. printk(KERN_DEBUG "\tdata:\n");
  461. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
  462. (void *)&dn->data, dlen, 0);
  463. break;
  464. }
  465. case UBIFS_TRUN_NODE:
  466. {
  467. const struct ubifs_trun_node *trun = node;
  468. printk(KERN_DEBUG "\tinum %u\n",
  469. le32_to_cpu(trun->inum));
  470. printk(KERN_DEBUG "\told_size %llu\n",
  471. (unsigned long long)le64_to_cpu(trun->old_size));
  472. printk(KERN_DEBUG "\tnew_size %llu\n",
  473. (unsigned long long)le64_to_cpu(trun->new_size));
  474. break;
  475. }
  476. case UBIFS_IDX_NODE:
  477. {
  478. const struct ubifs_idx_node *idx = node;
  479. n = le16_to_cpu(idx->child_cnt);
  480. printk(KERN_DEBUG "\tchild_cnt %d\n", n);
  481. printk(KERN_DEBUG "\tlevel %d\n",
  482. (int)le16_to_cpu(idx->level));
  483. printk(KERN_DEBUG "\tBranches:\n");
  484. for (i = 0; i < n && i < c->fanout - 1; i++) {
  485. const struct ubifs_branch *br;
  486. br = ubifs_idx_branch(c, idx, i);
  487. key_read(c, &br->key, &key);
  488. printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
  489. i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
  490. le32_to_cpu(br->len), DBGKEY(&key));
  491. }
  492. break;
  493. }
  494. case UBIFS_CS_NODE:
  495. break;
  496. case UBIFS_ORPH_NODE:
  497. {
  498. const struct ubifs_orph_node *orph = node;
  499. printk(KERN_DEBUG "\tcommit number %llu\n",
  500. (unsigned long long)
  501. le64_to_cpu(orph->cmt_no) & LLONG_MAX);
  502. printk(KERN_DEBUG "\tlast node flag %llu\n",
  503. (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
  504. n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
  505. printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
  506. for (i = 0; i < n; i++)
  507. printk(KERN_DEBUG "\t ino %llu\n",
  508. (unsigned long long)le64_to_cpu(orph->inos[i]));
  509. break;
  510. }
  511. default:
  512. printk(KERN_DEBUG "node type %d was not recognized\n",
  513. (int)ch->node_type);
  514. }
  515. spin_unlock(&dbg_lock);
  516. }
  517. void dbg_dump_budget_req(const struct ubifs_budget_req *req)
  518. {
  519. spin_lock(&dbg_lock);
  520. printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
  521. req->new_ino, req->dirtied_ino);
  522. printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n",
  523. req->new_ino_d, req->dirtied_ino_d);
  524. printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n",
  525. req->new_page, req->dirtied_page);
  526. printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n",
  527. req->new_dent, req->mod_dent);
  528. printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth);
  529. printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n",
  530. req->data_growth, req->dd_growth);
  531. spin_unlock(&dbg_lock);
  532. }
  533. void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
  534. {
  535. spin_lock(&dbg_lock);
  536. printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
  537. "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
  538. printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
  539. "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
  540. lst->total_dirty);
  541. printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
  542. "total_dead %lld\n", lst->total_used, lst->total_dark,
  543. lst->total_dead);
  544. spin_unlock(&dbg_lock);
  545. }
  546. void dbg_dump_budg(struct ubifs_info *c)
  547. {
  548. int i;
  549. struct rb_node *rb;
  550. struct ubifs_bud *bud;
  551. struct ubifs_gced_idx_leb *idx_gc;
  552. spin_lock(&dbg_lock);
  553. printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, "
  554. "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid,
  555. c->budg_data_growth, c->budg_dd_growth, c->budg_idx_growth);
  556. printk(KERN_DEBUG "\tdata budget sum %lld, total budget sum %lld, "
  557. "freeable_cnt %d\n", c->budg_data_growth + c->budg_dd_growth,
  558. c->budg_data_growth + c->budg_dd_growth + c->budg_idx_growth,
  559. c->freeable_cnt);
  560. printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %lld, "
  561. "calc_idx_sz %lld, idx_gc_cnt %d\n", c->min_idx_lebs,
  562. c->old_idx_sz, c->calc_idx_sz, c->idx_gc_cnt);
  563. printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
  564. "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
  565. atomic_long_read(&c->dirty_zn_cnt),
  566. atomic_long_read(&c->clean_zn_cnt));
  567. printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
  568. c->dark_wm, c->dead_wm, c->max_idx_node_sz);
  569. printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
  570. c->gc_lnum, c->ihead_lnum);
  571. for (i = 0; i < c->jhead_cnt; i++)
  572. printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
  573. c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
  574. for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
  575. bud = rb_entry(rb, struct ubifs_bud, rb);
  576. printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
  577. }
  578. list_for_each_entry(bud, &c->old_buds, list)
  579. printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
  580. list_for_each_entry(idx_gc, &c->idx_gc, list)
  581. printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
  582. idx_gc->lnum, idx_gc->unmap);
  583. printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
  584. spin_unlock(&dbg_lock);
  585. }
  586. void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
  587. {
  588. printk(KERN_DEBUG "LEB %d lprops: free %d, dirty %d (used %d), "
  589. "flags %#x\n", lp->lnum, lp->free, lp->dirty,
  590. c->leb_size - lp->free - lp->dirty, lp->flags);
  591. }
  592. void dbg_dump_lprops(struct ubifs_info *c)
  593. {
  594. int lnum, err;
  595. struct ubifs_lprops lp;
  596. struct ubifs_lp_stats lst;
  597. printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid);
  598. ubifs_get_lp_stats(c, &lst);
  599. dbg_dump_lstats(&lst);
  600. for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
  601. err = ubifs_read_one_lp(c, lnum, &lp);
  602. if (err)
  603. ubifs_err("cannot read lprops for LEB %d", lnum);
  604. dbg_dump_lprop(c, &lp);
  605. }
  606. }
  607. void dbg_dump_leb(const struct ubifs_info *c, int lnum)
  608. {
  609. struct ubifs_scan_leb *sleb;
  610. struct ubifs_scan_node *snod;
  611. if (dbg_failure_mode)
  612. return;
  613. printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum);
  614. sleb = ubifs_scan(c, lnum, 0, c->dbg_buf);
  615. if (IS_ERR(sleb)) {
  616. ubifs_err("scan error %d", (int)PTR_ERR(sleb));
  617. return;
  618. }
  619. printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
  620. sleb->nodes_cnt, sleb->endpt);
  621. list_for_each_entry(snod, &sleb->nodes, list) {
  622. cond_resched();
  623. printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
  624. snod->offs, snod->len);
  625. dbg_dump_node(c, snod->node);
  626. }
  627. ubifs_scan_destroy(sleb);
  628. return;
  629. }
  630. void dbg_dump_znode(const struct ubifs_info *c,
  631. const struct ubifs_znode *znode)
  632. {
  633. int n;
  634. const struct ubifs_zbranch *zbr;
  635. spin_lock(&dbg_lock);
  636. if (znode->parent)
  637. zbr = &znode->parent->zbranch[znode->iip];
  638. else
  639. zbr = &c->zroot;
  640. printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
  641. " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
  642. zbr->len, znode->parent, znode->iip, znode->level,
  643. znode->child_cnt, znode->flags);
  644. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  645. spin_unlock(&dbg_lock);
  646. return;
  647. }
  648. printk(KERN_DEBUG "zbranches:\n");
  649. for (n = 0; n < znode->child_cnt; n++) {
  650. zbr = &znode->zbranch[n];
  651. if (znode->level > 0)
  652. printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
  653. "%s\n", n, zbr->znode, zbr->lnum,
  654. zbr->offs, zbr->len,
  655. DBGKEY(&zbr->key));
  656. else
  657. printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
  658. "%s\n", n, zbr->znode, zbr->lnum,
  659. zbr->offs, zbr->len,
  660. DBGKEY(&zbr->key));
  661. }
  662. spin_unlock(&dbg_lock);
  663. }
  664. void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
  665. {
  666. int i;
  667. printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n",
  668. current->pid, cat, heap->cnt);
  669. for (i = 0; i < heap->cnt; i++) {
  670. struct ubifs_lprops *lprops = heap->arr[i];
  671. printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
  672. "flags %d\n", i, lprops->lnum, lprops->hpos,
  673. lprops->free, lprops->dirty, lprops->flags);
  674. }
  675. }
  676. void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
  677. struct ubifs_nnode *parent, int iip)
  678. {
  679. int i;
  680. printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid);
  681. printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
  682. (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
  683. printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
  684. pnode->flags, iip, pnode->level, pnode->num);
  685. for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
  686. struct ubifs_lprops *lp = &pnode->lprops[i];
  687. printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
  688. i, lp->free, lp->dirty, lp->flags, lp->lnum);
  689. }
  690. }
  691. void dbg_dump_tnc(struct ubifs_info *c)
  692. {
  693. struct ubifs_znode *znode;
  694. int level;
  695. printk(KERN_DEBUG "\n");
  696. printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid);
  697. znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
  698. level = znode->level;
  699. printk(KERN_DEBUG "== Level %d ==\n", level);
  700. while (znode) {
  701. if (level != znode->level) {
  702. level = znode->level;
  703. printk(KERN_DEBUG "== Level %d ==\n", level);
  704. }
  705. dbg_dump_znode(c, znode);
  706. znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
  707. }
  708. printk(KERN_DEBUG "\n");
  709. }
  710. static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
  711. void *priv)
  712. {
  713. dbg_dump_znode(c, znode);
  714. return 0;
  715. }
  716. /**
  717. * dbg_dump_index - dump the on-flash index.
  718. * @c: UBIFS file-system description object
  719. *
  720. * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
  721. * which dumps only in-memory znodes and does not read znodes which from flash.
  722. */
  723. void dbg_dump_index(struct ubifs_info *c)
  724. {
  725. dbg_walk_index(c, NULL, dump_znode, NULL);
  726. }
  727. /**
  728. * dbg_check_synced_i_size - check synchronized inode size.
  729. * @inode: inode to check
  730. *
  731. * If inode is clean, synchronized inode size has to be equivalent to current
  732. * inode size. This function has to be called only for locked inodes (@i_mutex
  733. * has to be locked). Returns %0 if synchronized inode size if correct, and
  734. * %-EINVAL if not.
  735. */
  736. int dbg_check_synced_i_size(struct inode *inode)
  737. {
  738. int err = 0;
  739. struct ubifs_inode *ui = ubifs_inode(inode);
  740. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  741. return 0;
  742. if (!S_ISREG(inode->i_mode))
  743. return 0;
  744. mutex_lock(&ui->ui_mutex);
  745. spin_lock(&ui->ui_lock);
  746. if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
  747. ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
  748. "is clean", ui->ui_size, ui->synced_i_size);
  749. ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
  750. inode->i_mode, i_size_read(inode));
  751. dbg_dump_stack();
  752. err = -EINVAL;
  753. }
  754. spin_unlock(&ui->ui_lock);
  755. mutex_unlock(&ui->ui_mutex);
  756. return err;
  757. }
  758. /*
  759. * dbg_check_dir - check directory inode size and link count.
  760. * @c: UBIFS file-system description object
  761. * @dir: the directory to calculate size for
  762. * @size: the result is returned here
  763. *
  764. * This function makes sure that directory size and link count are correct.
  765. * Returns zero in case of success and a negative error code in case of
  766. * failure.
  767. *
  768. * Note, it is good idea to make sure the @dir->i_mutex is locked before
  769. * calling this function.
  770. */
  771. int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
  772. {
  773. unsigned int nlink = 2;
  774. union ubifs_key key;
  775. struct ubifs_dent_node *dent, *pdent = NULL;
  776. struct qstr nm = { .name = NULL };
  777. loff_t size = UBIFS_INO_NODE_SZ;
  778. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  779. return 0;
  780. if (!S_ISDIR(dir->i_mode))
  781. return 0;
  782. lowest_dent_key(c, &key, dir->i_ino);
  783. while (1) {
  784. int err;
  785. dent = ubifs_tnc_next_ent(c, &key, &nm);
  786. if (IS_ERR(dent)) {
  787. err = PTR_ERR(dent);
  788. if (err == -ENOENT)
  789. break;
  790. return err;
  791. }
  792. nm.name = dent->name;
  793. nm.len = le16_to_cpu(dent->nlen);
  794. size += CALC_DENT_SIZE(nm.len);
  795. if (dent->type == UBIFS_ITYPE_DIR)
  796. nlink += 1;
  797. kfree(pdent);
  798. pdent = dent;
  799. key_read(c, &dent->key, &key);
  800. }
  801. kfree(pdent);
  802. if (i_size_read(dir) != size) {
  803. ubifs_err("directory inode %lu has size %llu, "
  804. "but calculated size is %llu", dir->i_ino,
  805. (unsigned long long)i_size_read(dir),
  806. (unsigned long long)size);
  807. dump_stack();
  808. return -EINVAL;
  809. }
  810. if (dir->i_nlink != nlink) {
  811. ubifs_err("directory inode %lu has nlink %u, but calculated "
  812. "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
  813. dump_stack();
  814. return -EINVAL;
  815. }
  816. return 0;
  817. }
  818. /**
  819. * dbg_check_key_order - make sure that colliding keys are properly ordered.
  820. * @c: UBIFS file-system description object
  821. * @zbr1: first zbranch
  822. * @zbr2: following zbranch
  823. *
  824. * In UBIFS indexing B-tree colliding keys has to be sorted in binary order of
  825. * names of the direntries/xentries which are referred by the keys. This
  826. * function reads direntries/xentries referred by @zbr1 and @zbr2 and makes
  827. * sure the name of direntry/xentry referred by @zbr1 is less than
  828. * direntry/xentry referred by @zbr2. Returns zero if this is true, %1 if not,
  829. * and a negative error code in case of failure.
  830. */
  831. static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
  832. struct ubifs_zbranch *zbr2)
  833. {
  834. int err, nlen1, nlen2, cmp;
  835. struct ubifs_dent_node *dent1, *dent2;
  836. union ubifs_key key;
  837. ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
  838. dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  839. if (!dent1)
  840. return -ENOMEM;
  841. dent2 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  842. if (!dent2) {
  843. err = -ENOMEM;
  844. goto out_free;
  845. }
  846. err = ubifs_tnc_read_node(c, zbr1, dent1);
  847. if (err)
  848. goto out_free;
  849. err = ubifs_validate_entry(c, dent1);
  850. if (err)
  851. goto out_free;
  852. err = ubifs_tnc_read_node(c, zbr2, dent2);
  853. if (err)
  854. goto out_free;
  855. err = ubifs_validate_entry(c, dent2);
  856. if (err)
  857. goto out_free;
  858. /* Make sure node keys are the same as in zbranch */
  859. err = 1;
  860. key_read(c, &dent1->key, &key);
  861. if (keys_cmp(c, &zbr1->key, &key)) {
  862. dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
  863. zbr1->offs, DBGKEY(&key));
  864. dbg_err("but it should have key %s according to tnc",
  865. DBGKEY(&zbr1->key));
  866. dbg_dump_node(c, dent1);
  867. goto out_free;
  868. }
  869. key_read(c, &dent2->key, &key);
  870. if (keys_cmp(c, &zbr2->key, &key)) {
  871. dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
  872. zbr1->offs, DBGKEY(&key));
  873. dbg_err("but it should have key %s according to tnc",
  874. DBGKEY(&zbr2->key));
  875. dbg_dump_node(c, dent2);
  876. goto out_free;
  877. }
  878. nlen1 = le16_to_cpu(dent1->nlen);
  879. nlen2 = le16_to_cpu(dent2->nlen);
  880. cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
  881. if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
  882. err = 0;
  883. goto out_free;
  884. }
  885. if (cmp == 0 && nlen1 == nlen2)
  886. dbg_err("2 xent/dent nodes with the same name");
  887. else
  888. dbg_err("bad order of colliding key %s",
  889. DBGKEY(&key));
  890. dbg_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
  891. dbg_dump_node(c, dent1);
  892. dbg_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
  893. dbg_dump_node(c, dent2);
  894. out_free:
  895. kfree(dent2);
  896. kfree(dent1);
  897. return err;
  898. }
  899. /**
  900. * dbg_check_znode - check if znode is all right.
  901. * @c: UBIFS file-system description object
  902. * @zbr: zbranch which points to this znode
  903. *
  904. * This function makes sure that znode referred to by @zbr is all right.
  905. * Returns zero if it is, and %-EINVAL if it is not.
  906. */
  907. static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
  908. {
  909. struct ubifs_znode *znode = zbr->znode;
  910. struct ubifs_znode *zp = znode->parent;
  911. int n, err, cmp;
  912. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  913. err = 1;
  914. goto out;
  915. }
  916. if (znode->level < 0) {
  917. err = 2;
  918. goto out;
  919. }
  920. if (znode->iip < 0 || znode->iip >= c->fanout) {
  921. err = 3;
  922. goto out;
  923. }
  924. if (zbr->len == 0)
  925. /* Only dirty zbranch may have no on-flash nodes */
  926. if (!ubifs_zn_dirty(znode)) {
  927. err = 4;
  928. goto out;
  929. }
  930. if (ubifs_zn_dirty(znode)) {
  931. /*
  932. * If znode is dirty, its parent has to be dirty as well. The
  933. * order of the operation is important, so we have to have
  934. * memory barriers.
  935. */
  936. smp_mb();
  937. if (zp && !ubifs_zn_dirty(zp)) {
  938. /*
  939. * The dirty flag is atomic and is cleared outside the
  940. * TNC mutex, so znode's dirty flag may now have
  941. * been cleared. The child is always cleared before the
  942. * parent, so we just need to check again.
  943. */
  944. smp_mb();
  945. if (ubifs_zn_dirty(znode)) {
  946. err = 5;
  947. goto out;
  948. }
  949. }
  950. }
  951. if (zp) {
  952. const union ubifs_key *min, *max;
  953. if (znode->level != zp->level - 1) {
  954. err = 6;
  955. goto out;
  956. }
  957. /* Make sure the 'parent' pointer in our znode is correct */
  958. err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
  959. if (!err) {
  960. /* This zbranch does not exist in the parent */
  961. err = 7;
  962. goto out;
  963. }
  964. if (znode->iip >= zp->child_cnt) {
  965. err = 8;
  966. goto out;
  967. }
  968. if (znode->iip != n) {
  969. /* This may happen only in case of collisions */
  970. if (keys_cmp(c, &zp->zbranch[n].key,
  971. &zp->zbranch[znode->iip].key)) {
  972. err = 9;
  973. goto out;
  974. }
  975. n = znode->iip;
  976. }
  977. /*
  978. * Make sure that the first key in our znode is greater than or
  979. * equal to the key in the pointing zbranch.
  980. */
  981. min = &zbr->key;
  982. cmp = keys_cmp(c, min, &znode->zbranch[0].key);
  983. if (cmp == 1) {
  984. err = 10;
  985. goto out;
  986. }
  987. if (n + 1 < zp->child_cnt) {
  988. max = &zp->zbranch[n + 1].key;
  989. /*
  990. * Make sure the last key in our znode is less or
  991. * equivalent than the the key in zbranch which goes
  992. * after our pointing zbranch.
  993. */
  994. cmp = keys_cmp(c, max,
  995. &znode->zbranch[znode->child_cnt - 1].key);
  996. if (cmp == -1) {
  997. err = 11;
  998. goto out;
  999. }
  1000. }
  1001. } else {
  1002. /* This may only be root znode */
  1003. if (zbr != &c->zroot) {
  1004. err = 12;
  1005. goto out;
  1006. }
  1007. }
  1008. /*
  1009. * Make sure that next key is greater or equivalent then the previous
  1010. * one.
  1011. */
  1012. for (n = 1; n < znode->child_cnt; n++) {
  1013. cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
  1014. &znode->zbranch[n].key);
  1015. if (cmp > 0) {
  1016. err = 13;
  1017. goto out;
  1018. }
  1019. if (cmp == 0) {
  1020. /* This can only be keys with colliding hash */
  1021. if (!is_hash_key(c, &znode->zbranch[n].key)) {
  1022. err = 14;
  1023. goto out;
  1024. }
  1025. if (znode->level != 0 || c->replaying)
  1026. continue;
  1027. /*
  1028. * Colliding keys should follow binary order of
  1029. * corresponding xentry/dentry names.
  1030. */
  1031. err = dbg_check_key_order(c, &znode->zbranch[n - 1],
  1032. &znode->zbranch[n]);
  1033. if (err < 0)
  1034. return err;
  1035. if (err) {
  1036. err = 15;
  1037. goto out;
  1038. }
  1039. }
  1040. }
  1041. for (n = 0; n < znode->child_cnt; n++) {
  1042. if (!znode->zbranch[n].znode &&
  1043. (znode->zbranch[n].lnum == 0 ||
  1044. znode->zbranch[n].len == 0)) {
  1045. err = 16;
  1046. goto out;
  1047. }
  1048. if (znode->zbranch[n].lnum != 0 &&
  1049. znode->zbranch[n].len == 0) {
  1050. err = 17;
  1051. goto out;
  1052. }
  1053. if (znode->zbranch[n].lnum == 0 &&
  1054. znode->zbranch[n].len != 0) {
  1055. err = 18;
  1056. goto out;
  1057. }
  1058. if (znode->zbranch[n].lnum == 0 &&
  1059. znode->zbranch[n].offs != 0) {
  1060. err = 19;
  1061. goto out;
  1062. }
  1063. if (znode->level != 0 && znode->zbranch[n].znode)
  1064. if (znode->zbranch[n].znode->parent != znode) {
  1065. err = 20;
  1066. goto out;
  1067. }
  1068. }
  1069. return 0;
  1070. out:
  1071. ubifs_err("failed, error %d", err);
  1072. ubifs_msg("dump of the znode");
  1073. dbg_dump_znode(c, znode);
  1074. if (zp) {
  1075. ubifs_msg("dump of the parent znode");
  1076. dbg_dump_znode(c, zp);
  1077. }
  1078. dump_stack();
  1079. return -EINVAL;
  1080. }
  1081. /**
  1082. * dbg_check_tnc - check TNC tree.
  1083. * @c: UBIFS file-system description object
  1084. * @extra: do extra checks that are possible at start commit
  1085. *
  1086. * This function traverses whole TNC tree and checks every znode. Returns zero
  1087. * if everything is all right and %-EINVAL if something is wrong with TNC.
  1088. */
  1089. int dbg_check_tnc(struct ubifs_info *c, int extra)
  1090. {
  1091. struct ubifs_znode *znode;
  1092. long clean_cnt = 0, dirty_cnt = 0;
  1093. int err, last;
  1094. if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
  1095. return 0;
  1096. ubifs_assert(mutex_is_locked(&c->tnc_mutex));
  1097. if (!c->zroot.znode)
  1098. return 0;
  1099. znode = ubifs_tnc_postorder_first(c->zroot.znode);
  1100. while (1) {
  1101. struct ubifs_znode *prev;
  1102. struct ubifs_zbranch *zbr;
  1103. if (!znode->parent)
  1104. zbr = &c->zroot;
  1105. else
  1106. zbr = &znode->parent->zbranch[znode->iip];
  1107. err = dbg_check_znode(c, zbr);
  1108. if (err)
  1109. return err;
  1110. if (extra) {
  1111. if (ubifs_zn_dirty(znode))
  1112. dirty_cnt += 1;
  1113. else
  1114. clean_cnt += 1;
  1115. }
  1116. prev = znode;
  1117. znode = ubifs_tnc_postorder_next(znode);
  1118. if (!znode)
  1119. break;
  1120. /*
  1121. * If the last key of this znode is equivalent to the first key
  1122. * of the next znode (collision), then check order of the keys.
  1123. */
  1124. last = prev->child_cnt - 1;
  1125. if (prev->level == 0 && znode->level == 0 && !c->replaying &&
  1126. !keys_cmp(c, &prev->zbranch[last].key,
  1127. &znode->zbranch[0].key)) {
  1128. err = dbg_check_key_order(c, &prev->zbranch[last],
  1129. &znode->zbranch[0]);
  1130. if (err < 0)
  1131. return err;
  1132. if (err) {
  1133. ubifs_msg("first znode");
  1134. dbg_dump_znode(c, prev);
  1135. ubifs_msg("second znode");
  1136. dbg_dump_znode(c, znode);
  1137. return -EINVAL;
  1138. }
  1139. }
  1140. }
  1141. if (extra) {
  1142. if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
  1143. ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
  1144. atomic_long_read(&c->clean_zn_cnt),
  1145. clean_cnt);
  1146. return -EINVAL;
  1147. }
  1148. if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
  1149. ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
  1150. atomic_long_read(&c->dirty_zn_cnt),
  1151. dirty_cnt);
  1152. return -EINVAL;
  1153. }
  1154. }
  1155. return 0;
  1156. }
  1157. /**
  1158. * dbg_walk_index - walk the on-flash index.
  1159. * @c: UBIFS file-system description object
  1160. * @leaf_cb: called for each leaf node
  1161. * @znode_cb: called for each indexing node
  1162. * @priv: private date which is passed to callbacks
  1163. *
  1164. * This function walks the UBIFS index and calls the @leaf_cb for each leaf
  1165. * node and @znode_cb for each indexing node. Returns zero in case of success
  1166. * and a negative error code in case of failure.
  1167. *
  1168. * It would be better if this function removed every znode it pulled to into
  1169. * the TNC, so that the behavior more closely matched the non-debugging
  1170. * behavior.
  1171. */
  1172. int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
  1173. dbg_znode_callback znode_cb, void *priv)
  1174. {
  1175. int err;
  1176. struct ubifs_zbranch *zbr;
  1177. struct ubifs_znode *znode, *child;
  1178. mutex_lock(&c->tnc_mutex);
  1179. /* If the root indexing node is not in TNC - pull it */
  1180. if (!c->zroot.znode) {
  1181. c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
  1182. if (IS_ERR(c->zroot.znode)) {
  1183. err = PTR_ERR(c->zroot.znode);
  1184. c->zroot.znode = NULL;
  1185. goto out_unlock;
  1186. }
  1187. }
  1188. /*
  1189. * We are going to traverse the indexing tree in the postorder manner.
  1190. * Go down and find the leftmost indexing node where we are going to
  1191. * start from.
  1192. */
  1193. znode = c->zroot.znode;
  1194. while (znode->level > 0) {
  1195. zbr = &znode->zbranch[0];
  1196. child = zbr->znode;
  1197. if (!child) {
  1198. child = ubifs_load_znode(c, zbr, znode, 0);
  1199. if (IS_ERR(child)) {
  1200. err = PTR_ERR(child);
  1201. goto out_unlock;
  1202. }
  1203. zbr->znode = child;
  1204. }
  1205. znode = child;
  1206. }
  1207. /* Iterate over all indexing nodes */
  1208. while (1) {
  1209. int idx;
  1210. cond_resched();
  1211. if (znode_cb) {
  1212. err = znode_cb(c, znode, priv);
  1213. if (err) {
  1214. ubifs_err("znode checking function returned "
  1215. "error %d", err);
  1216. dbg_dump_znode(c, znode);
  1217. goto out_dump;
  1218. }
  1219. }
  1220. if (leaf_cb && znode->level == 0) {
  1221. for (idx = 0; idx < znode->child_cnt; idx++) {
  1222. zbr = &znode->zbranch[idx];
  1223. err = leaf_cb(c, zbr, priv);
  1224. if (err) {
  1225. ubifs_err("leaf checking function "
  1226. "returned error %d, for leaf "
  1227. "at LEB %d:%d",
  1228. err, zbr->lnum, zbr->offs);
  1229. goto out_dump;
  1230. }
  1231. }
  1232. }
  1233. if (!znode->parent)
  1234. break;
  1235. idx = znode->iip + 1;
  1236. znode = znode->parent;
  1237. if (idx < znode->child_cnt) {
  1238. /* Switch to the next index in the parent */
  1239. zbr = &znode->zbranch[idx];
  1240. child = zbr->znode;
  1241. if (!child) {
  1242. child = ubifs_load_znode(c, zbr, znode, idx);
  1243. if (IS_ERR(child)) {
  1244. err = PTR_ERR(child);
  1245. goto out_unlock;
  1246. }
  1247. zbr->znode = child;
  1248. }
  1249. znode = child;
  1250. } else
  1251. /*
  1252. * This is the last child, switch to the parent and
  1253. * continue.
  1254. */
  1255. continue;
  1256. /* Go to the lowest leftmost znode in the new sub-tree */
  1257. while (znode->level > 0) {
  1258. zbr = &znode->zbranch[0];
  1259. child = zbr->znode;
  1260. if (!child) {
  1261. child = ubifs_load_znode(c, zbr, znode, 0);
  1262. if (IS_ERR(child)) {
  1263. err = PTR_ERR(child);
  1264. goto out_unlock;
  1265. }
  1266. zbr->znode = child;
  1267. }
  1268. znode = child;
  1269. }
  1270. }
  1271. mutex_unlock(&c->tnc_mutex);
  1272. return 0;
  1273. out_dump:
  1274. if (znode->parent)
  1275. zbr = &znode->parent->zbranch[znode->iip];
  1276. else
  1277. zbr = &c->zroot;
  1278. ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
  1279. dbg_dump_znode(c, znode);
  1280. out_unlock:
  1281. mutex_unlock(&c->tnc_mutex);
  1282. return err;
  1283. }
  1284. /**
  1285. * add_size - add znode size to partially calculated index size.
  1286. * @c: UBIFS file-system description object
  1287. * @znode: znode to add size for
  1288. * @priv: partially calculated index size
  1289. *
  1290. * This is a helper function for 'dbg_check_idx_size()' which is called for
  1291. * every indexing node and adds its size to the 'long long' variable pointed to
  1292. * by @priv.
  1293. */
  1294. static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
  1295. {
  1296. long long *idx_size = priv;
  1297. int add;
  1298. add = ubifs_idx_node_sz(c, znode->child_cnt);
  1299. add = ALIGN(add, 8);
  1300. *idx_size += add;
  1301. return 0;
  1302. }
  1303. /**
  1304. * dbg_check_idx_size - check index size.
  1305. * @c: UBIFS file-system description object
  1306. * @idx_size: size to check
  1307. *
  1308. * This function walks the UBIFS index, calculates its size and checks that the
  1309. * size is equivalent to @idx_size. Returns zero in case of success and a
  1310. * negative error code in case of failure.
  1311. */
  1312. int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
  1313. {
  1314. int err;
  1315. long long calc = 0;
  1316. if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
  1317. return 0;
  1318. err = dbg_walk_index(c, NULL, add_size, &calc);
  1319. if (err) {
  1320. ubifs_err("error %d while walking the index", err);
  1321. return err;
  1322. }
  1323. if (calc != idx_size) {
  1324. ubifs_err("index size check failed: calculated size is %lld, "
  1325. "should be %lld", calc, idx_size);
  1326. dump_stack();
  1327. return -EINVAL;
  1328. }
  1329. return 0;
  1330. }
  1331. /**
  1332. * struct fsck_inode - information about an inode used when checking the file-system.
  1333. * @rb: link in the RB-tree of inodes
  1334. * @inum: inode number
  1335. * @mode: inode type, permissions, etc
  1336. * @nlink: inode link count
  1337. * @xattr_cnt: count of extended attributes
  1338. * @references: how many directory/xattr entries refer this inode (calculated
  1339. * while walking the index)
  1340. * @calc_cnt: for directory inode count of child directories
  1341. * @size: inode size (read from on-flash inode)
  1342. * @xattr_sz: summary size of all extended attributes (read from on-flash
  1343. * inode)
  1344. * @calc_sz: for directories calculated directory size
  1345. * @calc_xcnt: count of extended attributes
  1346. * @calc_xsz: calculated summary size of all extended attributes
  1347. * @xattr_nms: sum of lengths of all extended attribute names belonging to this
  1348. * inode (read from on-flash inode)
  1349. * @calc_xnms: calculated sum of lengths of all extended attribute names
  1350. */
  1351. struct fsck_inode {
  1352. struct rb_node rb;
  1353. ino_t inum;
  1354. umode_t mode;
  1355. unsigned int nlink;
  1356. unsigned int xattr_cnt;
  1357. int references;
  1358. int calc_cnt;
  1359. long long size;
  1360. unsigned int xattr_sz;
  1361. long long calc_sz;
  1362. long long calc_xcnt;
  1363. long long calc_xsz;
  1364. unsigned int xattr_nms;
  1365. long long calc_xnms;
  1366. };
  1367. /**
  1368. * struct fsck_data - private FS checking information.
  1369. * @inodes: RB-tree of all inodes (contains @struct fsck_inode objects)
  1370. */
  1371. struct fsck_data {
  1372. struct rb_root inodes;
  1373. };
  1374. /**
  1375. * add_inode - add inode information to RB-tree of inodes.
  1376. * @c: UBIFS file-system description object
  1377. * @fsckd: FS checking information
  1378. * @ino: raw UBIFS inode to add
  1379. *
  1380. * This is a helper function for 'check_leaf()' which adds information about
  1381. * inode @ino to the RB-tree of inodes. Returns inode information pointer in
  1382. * case of success and a negative error code in case of failure.
  1383. */
  1384. static struct fsck_inode *add_inode(struct ubifs_info *c,
  1385. struct fsck_data *fsckd,
  1386. struct ubifs_ino_node *ino)
  1387. {
  1388. struct rb_node **p, *parent = NULL;
  1389. struct fsck_inode *fscki;
  1390. ino_t inum = key_inum_flash(c, &ino->key);
  1391. p = &fsckd->inodes.rb_node;
  1392. while (*p) {
  1393. parent = *p;
  1394. fscki = rb_entry(parent, struct fsck_inode, rb);
  1395. if (inum < fscki->inum)
  1396. p = &(*p)->rb_left;
  1397. else if (inum > fscki->inum)
  1398. p = &(*p)->rb_right;
  1399. else
  1400. return fscki;
  1401. }
  1402. if (inum > c->highest_inum) {
  1403. ubifs_err("too high inode number, max. is %lu",
  1404. c->highest_inum);
  1405. return ERR_PTR(-EINVAL);
  1406. }
  1407. fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
  1408. if (!fscki)
  1409. return ERR_PTR(-ENOMEM);
  1410. fscki->inum = inum;
  1411. fscki->nlink = le32_to_cpu(ino->nlink);
  1412. fscki->size = le64_to_cpu(ino->size);
  1413. fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
  1414. fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
  1415. fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
  1416. fscki->mode = le32_to_cpu(ino->mode);
  1417. if (S_ISDIR(fscki->mode)) {
  1418. fscki->calc_sz = UBIFS_INO_NODE_SZ;
  1419. fscki->calc_cnt = 2;
  1420. }
  1421. rb_link_node(&fscki->rb, parent, p);
  1422. rb_insert_color(&fscki->rb, &fsckd->inodes);
  1423. return fscki;
  1424. }
  1425. /**
  1426. * search_inode - search inode in the RB-tree of inodes.
  1427. * @fsckd: FS checking information
  1428. * @inum: inode number to search
  1429. *
  1430. * This is a helper function for 'check_leaf()' which searches inode @inum in
  1431. * the RB-tree of inodes and returns an inode information pointer or %NULL if
  1432. * the inode was not found.
  1433. */
  1434. static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
  1435. {
  1436. struct rb_node *p;
  1437. struct fsck_inode *fscki;
  1438. p = fsckd->inodes.rb_node;
  1439. while (p) {
  1440. fscki = rb_entry(p, struct fsck_inode, rb);
  1441. if (inum < fscki->inum)
  1442. p = p->rb_left;
  1443. else if (inum > fscki->inum)
  1444. p = p->rb_right;
  1445. else
  1446. return fscki;
  1447. }
  1448. return NULL;
  1449. }
  1450. /**
  1451. * read_add_inode - read inode node and add it to RB-tree of inodes.
  1452. * @c: UBIFS file-system description object
  1453. * @fsckd: FS checking information
  1454. * @inum: inode number to read
  1455. *
  1456. * This is a helper function for 'check_leaf()' which finds inode node @inum in
  1457. * the index, reads it, and adds it to the RB-tree of inodes. Returns inode
  1458. * information pointer in case of success and a negative error code in case of
  1459. * failure.
  1460. */
  1461. static struct fsck_inode *read_add_inode(struct ubifs_info *c,
  1462. struct fsck_data *fsckd, ino_t inum)
  1463. {
  1464. int n, err;
  1465. union ubifs_key key;
  1466. struct ubifs_znode *znode;
  1467. struct ubifs_zbranch *zbr;
  1468. struct ubifs_ino_node *ino;
  1469. struct fsck_inode *fscki;
  1470. fscki = search_inode(fsckd, inum);
  1471. if (fscki)
  1472. return fscki;
  1473. ino_key_init(c, &key, inum);
  1474. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1475. if (!err) {
  1476. ubifs_err("inode %lu not found in index", inum);
  1477. return ERR_PTR(-ENOENT);
  1478. } else if (err < 0) {
  1479. ubifs_err("error %d while looking up inode %lu", err, inum);
  1480. return ERR_PTR(err);
  1481. }
  1482. zbr = &znode->zbranch[n];
  1483. if (zbr->len < UBIFS_INO_NODE_SZ) {
  1484. ubifs_err("bad node %lu node length %d", inum, zbr->len);
  1485. return ERR_PTR(-EINVAL);
  1486. }
  1487. ino = kmalloc(zbr->len, GFP_NOFS);
  1488. if (!ino)
  1489. return ERR_PTR(-ENOMEM);
  1490. err = ubifs_tnc_read_node(c, zbr, ino);
  1491. if (err) {
  1492. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1493. zbr->lnum, zbr->offs, err);
  1494. kfree(ino);
  1495. return ERR_PTR(err);
  1496. }
  1497. fscki = add_inode(c, fsckd, ino);
  1498. kfree(ino);
  1499. if (IS_ERR(fscki)) {
  1500. ubifs_err("error %ld while adding inode %lu node",
  1501. PTR_ERR(fscki), inum);
  1502. return fscki;
  1503. }
  1504. return fscki;
  1505. }
  1506. /**
  1507. * check_leaf - check leaf node.
  1508. * @c: UBIFS file-system description object
  1509. * @zbr: zbranch of the leaf node to check
  1510. * @priv: FS checking information
  1511. *
  1512. * This is a helper function for 'dbg_check_filesystem()' which is called for
  1513. * every single leaf node while walking the indexing tree. It checks that the
  1514. * leaf node referred from the indexing tree exists, has correct CRC, and does
  1515. * some other basic validation. This function is also responsible for building
  1516. * an RB-tree of inodes - it adds all inodes into the RB-tree. It also
  1517. * calculates reference count, size, etc for each inode in order to later
  1518. * compare them to the information stored inside the inodes and detect possible
  1519. * inconsistencies. Returns zero in case of success and a negative error code
  1520. * in case of failure.
  1521. */
  1522. static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  1523. void *priv)
  1524. {
  1525. ino_t inum;
  1526. void *node;
  1527. struct ubifs_ch *ch;
  1528. int err, type = key_type(c, &zbr->key);
  1529. struct fsck_inode *fscki;
  1530. if (zbr->len < UBIFS_CH_SZ) {
  1531. ubifs_err("bad leaf length %d (LEB %d:%d)",
  1532. zbr->len, zbr->lnum, zbr->offs);
  1533. return -EINVAL;
  1534. }
  1535. node = kmalloc(zbr->len, GFP_NOFS);
  1536. if (!node)
  1537. return -ENOMEM;
  1538. err = ubifs_tnc_read_node(c, zbr, node);
  1539. if (err) {
  1540. ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
  1541. zbr->lnum, zbr->offs, err);
  1542. goto out_free;
  1543. }
  1544. /* If this is an inode node, add it to RB-tree of inodes */
  1545. if (type == UBIFS_INO_KEY) {
  1546. fscki = add_inode(c, priv, node);
  1547. if (IS_ERR(fscki)) {
  1548. err = PTR_ERR(fscki);
  1549. ubifs_err("error %d while adding inode node", err);
  1550. goto out_dump;
  1551. }
  1552. goto out;
  1553. }
  1554. if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
  1555. type != UBIFS_DATA_KEY) {
  1556. ubifs_err("unexpected node type %d at LEB %d:%d",
  1557. type, zbr->lnum, zbr->offs);
  1558. err = -EINVAL;
  1559. goto out_free;
  1560. }
  1561. ch = node;
  1562. if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
  1563. ubifs_err("too high sequence number, max. is %llu",
  1564. c->max_sqnum);
  1565. err = -EINVAL;
  1566. goto out_dump;
  1567. }
  1568. if (type == UBIFS_DATA_KEY) {
  1569. long long blk_offs;
  1570. struct ubifs_data_node *dn = node;
  1571. /*
  1572. * Search the inode node this data node belongs to and insert
  1573. * it to the RB-tree of inodes.
  1574. */
  1575. inum = key_inum_flash(c, &dn->key);
  1576. fscki = read_add_inode(c, priv, inum);
  1577. if (IS_ERR(fscki)) {
  1578. err = PTR_ERR(fscki);
  1579. ubifs_err("error %d while processing data node and "
  1580. "trying to find inode node %lu", err, inum);
  1581. goto out_dump;
  1582. }
  1583. /* Make sure the data node is within inode size */
  1584. blk_offs = key_block_flash(c, &dn->key);
  1585. blk_offs <<= UBIFS_BLOCK_SHIFT;
  1586. blk_offs += le32_to_cpu(dn->size);
  1587. if (blk_offs > fscki->size) {
  1588. ubifs_err("data node at LEB %d:%d is not within inode "
  1589. "size %lld", zbr->lnum, zbr->offs,
  1590. fscki->size);
  1591. err = -EINVAL;
  1592. goto out_dump;
  1593. }
  1594. } else {
  1595. int nlen;
  1596. struct ubifs_dent_node *dent = node;
  1597. struct fsck_inode *fscki1;
  1598. err = ubifs_validate_entry(c, dent);
  1599. if (err)
  1600. goto out_dump;
  1601. /*
  1602. * Search the inode node this entry refers to and the parent
  1603. * inode node and insert them to the RB-tree of inodes.
  1604. */
  1605. inum = le64_to_cpu(dent->inum);
  1606. fscki = read_add_inode(c, priv, inum);
  1607. if (IS_ERR(fscki)) {
  1608. err = PTR_ERR(fscki);
  1609. ubifs_err("error %d while processing entry node and "
  1610. "trying to find inode node %lu", err, inum);
  1611. goto out_dump;
  1612. }
  1613. /* Count how many direntries or xentries refers this inode */
  1614. fscki->references += 1;
  1615. inum = key_inum_flash(c, &dent->key);
  1616. fscki1 = read_add_inode(c, priv, inum);
  1617. if (IS_ERR(fscki1)) {
  1618. err = PTR_ERR(fscki);
  1619. ubifs_err("error %d while processing entry node and "
  1620. "trying to find parent inode node %lu",
  1621. err, inum);
  1622. goto out_dump;
  1623. }
  1624. nlen = le16_to_cpu(dent->nlen);
  1625. if (type == UBIFS_XENT_KEY) {
  1626. fscki1->calc_xcnt += 1;
  1627. fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
  1628. fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
  1629. fscki1->calc_xnms += nlen;
  1630. } else {
  1631. fscki1->calc_sz += CALC_DENT_SIZE(nlen);
  1632. if (dent->type == UBIFS_ITYPE_DIR)
  1633. fscki1->calc_cnt += 1;
  1634. }
  1635. }
  1636. out:
  1637. kfree(node);
  1638. return 0;
  1639. out_dump:
  1640. ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
  1641. dbg_dump_node(c, node);
  1642. out_free:
  1643. kfree(node);
  1644. return err;
  1645. }
  1646. /**
  1647. * free_inodes - free RB-tree of inodes.
  1648. * @fsckd: FS checking information
  1649. */
  1650. static void free_inodes(struct fsck_data *fsckd)
  1651. {
  1652. struct rb_node *this = fsckd->inodes.rb_node;
  1653. struct fsck_inode *fscki;
  1654. while (this) {
  1655. if (this->rb_left)
  1656. this = this->rb_left;
  1657. else if (this->rb_right)
  1658. this = this->rb_right;
  1659. else {
  1660. fscki = rb_entry(this, struct fsck_inode, rb);
  1661. this = rb_parent(this);
  1662. if (this) {
  1663. if (this->rb_left == &fscki->rb)
  1664. this->rb_left = NULL;
  1665. else
  1666. this->rb_right = NULL;
  1667. }
  1668. kfree(fscki);
  1669. }
  1670. }
  1671. }
  1672. /**
  1673. * check_inodes - checks all inodes.
  1674. * @c: UBIFS file-system description object
  1675. * @fsckd: FS checking information
  1676. *
  1677. * This is a helper function for 'dbg_check_filesystem()' which walks the
  1678. * RB-tree of inodes after the index scan has been finished, and checks that
  1679. * inode nlink, size, etc are correct. Returns zero if inodes are fine,
  1680. * %-EINVAL if not, and a negative error code in case of failure.
  1681. */
  1682. static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
  1683. {
  1684. int n, err;
  1685. union ubifs_key key;
  1686. struct ubifs_znode *znode;
  1687. struct ubifs_zbranch *zbr;
  1688. struct ubifs_ino_node *ino;
  1689. struct fsck_inode *fscki;
  1690. struct rb_node *this = rb_first(&fsckd->inodes);
  1691. while (this) {
  1692. fscki = rb_entry(this, struct fsck_inode, rb);
  1693. this = rb_next(this);
  1694. if (S_ISDIR(fscki->mode)) {
  1695. /*
  1696. * Directories have to have exactly one reference (they
  1697. * cannot have hardlinks), although root inode is an
  1698. * exception.
  1699. */
  1700. if (fscki->inum != UBIFS_ROOT_INO &&
  1701. fscki->references != 1) {
  1702. ubifs_err("directory inode %lu has %d "
  1703. "direntries which refer it, but "
  1704. "should be 1", fscki->inum,
  1705. fscki->references);
  1706. goto out_dump;
  1707. }
  1708. if (fscki->inum == UBIFS_ROOT_INO &&
  1709. fscki->references != 0) {
  1710. ubifs_err("root inode %lu has non-zero (%d) "
  1711. "direntries which refer it",
  1712. fscki->inum, fscki->references);
  1713. goto out_dump;
  1714. }
  1715. if (fscki->calc_sz != fscki->size) {
  1716. ubifs_err("directory inode %lu size is %lld, "
  1717. "but calculated size is %lld",
  1718. fscki->inum, fscki->size,
  1719. fscki->calc_sz);
  1720. goto out_dump;
  1721. }
  1722. if (fscki->calc_cnt != fscki->nlink) {
  1723. ubifs_err("directory inode %lu nlink is %d, "
  1724. "but calculated nlink is %d",
  1725. fscki->inum, fscki->nlink,
  1726. fscki->calc_cnt);
  1727. goto out_dump;
  1728. }
  1729. } else {
  1730. if (fscki->references != fscki->nlink) {
  1731. ubifs_err("inode %lu nlink is %d, but "
  1732. "calculated nlink is %d", fscki->inum,
  1733. fscki->nlink, fscki->references);
  1734. goto out_dump;
  1735. }
  1736. }
  1737. if (fscki->xattr_sz != fscki->calc_xsz) {
  1738. ubifs_err("inode %lu has xattr size %u, but "
  1739. "calculated size is %lld",
  1740. fscki->inum, fscki->xattr_sz,
  1741. fscki->calc_xsz);
  1742. goto out_dump;
  1743. }
  1744. if (fscki->xattr_cnt != fscki->calc_xcnt) {
  1745. ubifs_err("inode %lu has %u xattrs, but "
  1746. "calculated count is %lld", fscki->inum,
  1747. fscki->xattr_cnt, fscki->calc_xcnt);
  1748. goto out_dump;
  1749. }
  1750. if (fscki->xattr_nms != fscki->calc_xnms) {
  1751. ubifs_err("inode %lu has xattr names' size %u, but "
  1752. "calculated names' size is %lld",
  1753. fscki->inum, fscki->xattr_nms,
  1754. fscki->calc_xnms);
  1755. goto out_dump;
  1756. }
  1757. }
  1758. return 0;
  1759. out_dump:
  1760. /* Read the bad inode and dump it */
  1761. ino_key_init(c, &key, fscki->inum);
  1762. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1763. if (!err) {
  1764. ubifs_err("inode %lu not found in index", fscki->inum);
  1765. return -ENOENT;
  1766. } else if (err < 0) {
  1767. ubifs_err("error %d while looking up inode %lu",
  1768. err, fscki->inum);
  1769. return err;
  1770. }
  1771. zbr = &znode->zbranch[n];
  1772. ino = kmalloc(zbr->len, GFP_NOFS);
  1773. if (!ino)
  1774. return -ENOMEM;
  1775. err = ubifs_tnc_read_node(c, zbr, ino);
  1776. if (err) {
  1777. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1778. zbr->lnum, zbr->offs, err);
  1779. kfree(ino);
  1780. return err;
  1781. }
  1782. ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
  1783. fscki->inum, zbr->lnum, zbr->offs);
  1784. dbg_dump_node(c, ino);
  1785. kfree(ino);
  1786. return -EINVAL;
  1787. }
  1788. /**
  1789. * dbg_check_filesystem - check the file-system.
  1790. * @c: UBIFS file-system description object
  1791. *
  1792. * This function checks the file system, namely:
  1793. * o makes sure that all leaf nodes exist and their CRCs are correct;
  1794. * o makes sure inode nlink, size, xattr size/count are correct (for all
  1795. * inodes).
  1796. *
  1797. * The function reads whole indexing tree and all nodes, so it is pretty
  1798. * heavy-weight. Returns zero if the file-system is consistent, %-EINVAL if
  1799. * not, and a negative error code in case of failure.
  1800. */
  1801. int dbg_check_filesystem(struct ubifs_info *c)
  1802. {
  1803. int err;
  1804. struct fsck_data fsckd;
  1805. if (!(ubifs_chk_flags & UBIFS_CHK_FS))
  1806. return 0;
  1807. fsckd.inodes = RB_ROOT;
  1808. err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
  1809. if (err)
  1810. goto out_free;
  1811. err = check_inodes(c, &fsckd);
  1812. if (err)
  1813. goto out_free;
  1814. free_inodes(&fsckd);
  1815. return 0;
  1816. out_free:
  1817. ubifs_err("file-system check failed with error %d", err);
  1818. dump_stack();
  1819. free_inodes(&fsckd);
  1820. return err;
  1821. }
  1822. static int invocation_cnt;
  1823. int dbg_force_in_the_gaps(void)
  1824. {
  1825. if (!dbg_force_in_the_gaps_enabled)
  1826. return 0;
  1827. /* Force in-the-gaps every 8th commit */
  1828. return !((invocation_cnt++) & 0x7);
  1829. }
  1830. /* Failure mode for recovery testing */
  1831. #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
  1832. struct failure_mode_info {
  1833. struct list_head list;
  1834. struct ubifs_info *c;
  1835. };
  1836. static LIST_HEAD(fmi_list);
  1837. static DEFINE_SPINLOCK(fmi_lock);
  1838. static unsigned int next;
  1839. static int simple_rand(void)
  1840. {
  1841. if (next == 0)
  1842. next = current->pid;
  1843. next = next * 1103515245 + 12345;
  1844. return (next >> 16) & 32767;
  1845. }
  1846. void dbg_failure_mode_registration(struct ubifs_info *c)
  1847. {
  1848. struct failure_mode_info *fmi;
  1849. fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
  1850. if (!fmi) {
  1851. dbg_err("Failed to register failure mode - no memory");
  1852. return;
  1853. }
  1854. fmi->c = c;
  1855. spin_lock(&fmi_lock);
  1856. list_add_tail(&fmi->list, &fmi_list);
  1857. spin_unlock(&fmi_lock);
  1858. }
  1859. void dbg_failure_mode_deregistration(struct ubifs_info *c)
  1860. {
  1861. struct failure_mode_info *fmi, *tmp;
  1862. spin_lock(&fmi_lock);
  1863. list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
  1864. if (fmi->c == c) {
  1865. list_del(&fmi->list);
  1866. kfree(fmi);
  1867. }
  1868. spin_unlock(&fmi_lock);
  1869. }
  1870. static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
  1871. {
  1872. struct failure_mode_info *fmi;
  1873. spin_lock(&fmi_lock);
  1874. list_for_each_entry(fmi, &fmi_list, list)
  1875. if (fmi->c->ubi == desc) {
  1876. struct ubifs_info *c = fmi->c;
  1877. spin_unlock(&fmi_lock);
  1878. return c;
  1879. }
  1880. spin_unlock(&fmi_lock);
  1881. return NULL;
  1882. }
  1883. static int in_failure_mode(struct ubi_volume_desc *desc)
  1884. {
  1885. struct ubifs_info *c = dbg_find_info(desc);
  1886. if (c && dbg_failure_mode)
  1887. return c->failure_mode;
  1888. return 0;
  1889. }
  1890. static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
  1891. {
  1892. struct ubifs_info *c = dbg_find_info(desc);
  1893. if (!c || !dbg_failure_mode)
  1894. return 0;
  1895. if (c->failure_mode)
  1896. return 1;
  1897. if (!c->fail_cnt) {
  1898. /* First call - decide delay to failure */
  1899. if (chance(1, 2)) {
  1900. unsigned int delay = 1 << (simple_rand() >> 11);
  1901. if (chance(1, 2)) {
  1902. c->fail_delay = 1;
  1903. c->fail_timeout = jiffies +
  1904. msecs_to_jiffies(delay);
  1905. dbg_rcvry("failing after %ums", delay);
  1906. } else {
  1907. c->fail_delay = 2;
  1908. c->fail_cnt_max = delay;
  1909. dbg_rcvry("failing after %u calls", delay);
  1910. }
  1911. }
  1912. c->fail_cnt += 1;
  1913. }
  1914. /* Determine if failure delay has expired */
  1915. if (c->fail_delay == 1) {
  1916. if (time_before(jiffies, c->fail_timeout))
  1917. return 0;
  1918. } else if (c->fail_delay == 2)
  1919. if (c->fail_cnt++ < c->fail_cnt_max)
  1920. return 0;
  1921. if (lnum == UBIFS_SB_LNUM) {
  1922. if (write) {
  1923. if (chance(1, 2))
  1924. return 0;
  1925. } else if (chance(19, 20))
  1926. return 0;
  1927. dbg_rcvry("failing in super block LEB %d", lnum);
  1928. } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
  1929. if (chance(19, 20))
  1930. return 0;
  1931. dbg_rcvry("failing in master LEB %d", lnum);
  1932. } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
  1933. if (write) {
  1934. if (chance(99, 100))
  1935. return 0;
  1936. } else if (chance(399, 400))
  1937. return 0;
  1938. dbg_rcvry("failing in log LEB %d", lnum);
  1939. } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
  1940. if (write) {
  1941. if (chance(7, 8))
  1942. return 0;
  1943. } else if (chance(19, 20))
  1944. return 0;
  1945. dbg_rcvry("failing in LPT LEB %d", lnum);
  1946. } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
  1947. if (write) {
  1948. if (chance(1, 2))
  1949. return 0;
  1950. } else if (chance(9, 10))
  1951. return 0;
  1952. dbg_rcvry("failing in orphan LEB %d", lnum);
  1953. } else if (lnum == c->ihead_lnum) {
  1954. if (chance(99, 100))
  1955. return 0;
  1956. dbg_rcvry("failing in index head LEB %d", lnum);
  1957. } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
  1958. if (chance(9, 10))
  1959. return 0;
  1960. dbg_rcvry("failing in GC head LEB %d", lnum);
  1961. } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
  1962. !ubifs_search_bud(c, lnum)) {
  1963. if (chance(19, 20))
  1964. return 0;
  1965. dbg_rcvry("failing in non-bud LEB %d", lnum);
  1966. } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
  1967. c->cmt_state == COMMIT_RUNNING_REQUIRED) {
  1968. if (chance(999, 1000))
  1969. return 0;
  1970. dbg_rcvry("failing in bud LEB %d commit running", lnum);
  1971. } else {
  1972. if (chance(9999, 10000))
  1973. return 0;
  1974. dbg_rcvry("failing in bud LEB %d commit not running", lnum);
  1975. }
  1976. ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
  1977. c->failure_mode = 1;
  1978. dump_stack();
  1979. return 1;
  1980. }
  1981. static void cut_data(const void *buf, int len)
  1982. {
  1983. int flen, i;
  1984. unsigned char *p = (void *)buf;
  1985. flen = (len * (long long)simple_rand()) >> 15;
  1986. for (i = flen; i < len; i++)
  1987. p[i] = 0xff;
  1988. }
  1989. int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
  1990. int len, int check)
  1991. {
  1992. if (in_failure_mode(desc))
  1993. return -EIO;
  1994. return ubi_leb_read(desc, lnum, buf, offset, len, check);
  1995. }
  1996. int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
  1997. int offset, int len, int dtype)
  1998. {
  1999. int err, failing;
  2000. if (in_failure_mode(desc))
  2001. return -EIO;
  2002. failing = do_fail(desc, lnum, 1);
  2003. if (failing)
  2004. cut_data(buf, len);
  2005. err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
  2006. if (err)
  2007. return err;
  2008. if (failing)
  2009. return -EIO;
  2010. return 0;
  2011. }
  2012. int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2013. int len, int dtype)
  2014. {
  2015. int err;
  2016. if (do_fail(desc, lnum, 1))
  2017. return -EIO;
  2018. err = ubi_leb_change(desc, lnum, buf, len, dtype);
  2019. if (err)
  2020. return err;
  2021. if (do_fail(desc, lnum, 1))
  2022. return -EIO;
  2023. return 0;
  2024. }
  2025. int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
  2026. {
  2027. int err;
  2028. if (do_fail(desc, lnum, 0))
  2029. return -EIO;
  2030. err = ubi_leb_erase(desc, lnum);
  2031. if (err)
  2032. return err;
  2033. if (do_fail(desc, lnum, 0))
  2034. return -EIO;
  2035. return 0;
  2036. }
  2037. int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
  2038. {
  2039. int err;
  2040. if (do_fail(desc, lnum, 0))
  2041. return -EIO;
  2042. err = ubi_leb_unmap(desc, lnum);
  2043. if (err)
  2044. return err;
  2045. if (do_fail(desc, lnum, 0))
  2046. return -EIO;
  2047. return 0;
  2048. }
  2049. int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
  2050. {
  2051. if (in_failure_mode(desc))
  2052. return -EIO;
  2053. return ubi_is_mapped(desc, lnum);
  2054. }
  2055. int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
  2056. {
  2057. int err;
  2058. if (do_fail(desc, lnum, 0))
  2059. return -EIO;
  2060. err = ubi_leb_map(desc, lnum, dtype);
  2061. if (err)
  2062. return err;
  2063. if (do_fail(desc, lnum, 0))
  2064. return -EIO;
  2065. return 0;
  2066. }
  2067. #endif /* CONFIG_UBIFS_FS_DEBUG */