debug.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Artem Bityutskiy (Битюцкий Артём)
  20. * Adrian Hunter
  21. */
  22. /*
  23. * This file implements most of the debugging stuff which is compiled in only
  24. * when it is enabled. But some debugging check functions are implemented in
  25. * corresponding subsystem, just because they are closely related and utilize
  26. * various local functions of those subsystems.
  27. */
  28. #define UBIFS_DBG_PRESERVE_UBI
  29. #include "ubifs.h"
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #ifdef CONFIG_UBIFS_FS_DEBUG
  33. DEFINE_SPINLOCK(dbg_lock);
  34. static char dbg_key_buf0[128];
  35. static char dbg_key_buf1[128];
  36. unsigned int ubifs_msg_flags = UBIFS_MSG_FLAGS_DEFAULT;
  37. unsigned int ubifs_chk_flags = UBIFS_CHK_FLAGS_DEFAULT;
  38. unsigned int ubifs_tst_flags;
  39. module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
  40. module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
  41. module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
  42. MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
  43. MODULE_PARM_DESC(debug_chks, "Debug check flags");
  44. MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
  45. static const char *get_key_fmt(int fmt)
  46. {
  47. switch (fmt) {
  48. case UBIFS_SIMPLE_KEY_FMT:
  49. return "simple";
  50. default:
  51. return "unknown/invalid format";
  52. }
  53. }
  54. static const char *get_key_hash(int hash)
  55. {
  56. switch (hash) {
  57. case UBIFS_KEY_HASH_R5:
  58. return "R5";
  59. case UBIFS_KEY_HASH_TEST:
  60. return "test";
  61. default:
  62. return "unknown/invalid name hash";
  63. }
  64. }
  65. static const char *get_key_type(int type)
  66. {
  67. switch (type) {
  68. case UBIFS_INO_KEY:
  69. return "inode";
  70. case UBIFS_DENT_KEY:
  71. return "direntry";
  72. case UBIFS_XENT_KEY:
  73. return "xentry";
  74. case UBIFS_DATA_KEY:
  75. return "data";
  76. case UBIFS_TRUN_KEY:
  77. return "truncate";
  78. default:
  79. return "unknown/invalid key";
  80. }
  81. }
  82. static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
  83. char *buffer)
  84. {
  85. char *p = buffer;
  86. int type = key_type(c, key);
  87. if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
  88. switch (type) {
  89. case UBIFS_INO_KEY:
  90. sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
  91. get_key_type(type));
  92. break;
  93. case UBIFS_DENT_KEY:
  94. case UBIFS_XENT_KEY:
  95. sprintf(p, "(%lu, %s, %#08x)",
  96. (unsigned long)key_inum(c, key),
  97. get_key_type(type), key_hash(c, key));
  98. break;
  99. case UBIFS_DATA_KEY:
  100. sprintf(p, "(%lu, %s, %u)",
  101. (unsigned long)key_inum(c, key),
  102. get_key_type(type), key_block(c, key));
  103. break;
  104. case UBIFS_TRUN_KEY:
  105. sprintf(p, "(%lu, %s)",
  106. (unsigned long)key_inum(c, key),
  107. get_key_type(type));
  108. break;
  109. default:
  110. sprintf(p, "(bad key type: %#08x, %#08x)",
  111. key->u32[0], key->u32[1]);
  112. }
  113. } else
  114. sprintf(p, "bad key format %d", c->key_fmt);
  115. }
  116. const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
  117. {
  118. /* dbg_lock must be held */
  119. sprintf_key(c, key, dbg_key_buf0);
  120. return dbg_key_buf0;
  121. }
  122. const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
  123. {
  124. /* dbg_lock must be held */
  125. sprintf_key(c, key, dbg_key_buf1);
  126. return dbg_key_buf1;
  127. }
  128. const char *dbg_ntype(int type)
  129. {
  130. switch (type) {
  131. case UBIFS_PAD_NODE:
  132. return "padding node";
  133. case UBIFS_SB_NODE:
  134. return "superblock node";
  135. case UBIFS_MST_NODE:
  136. return "master node";
  137. case UBIFS_REF_NODE:
  138. return "reference node";
  139. case UBIFS_INO_NODE:
  140. return "inode node";
  141. case UBIFS_DENT_NODE:
  142. return "direntry node";
  143. case UBIFS_XENT_NODE:
  144. return "xentry node";
  145. case UBIFS_DATA_NODE:
  146. return "data node";
  147. case UBIFS_TRUN_NODE:
  148. return "truncate node";
  149. case UBIFS_IDX_NODE:
  150. return "indexing node";
  151. case UBIFS_CS_NODE:
  152. return "commit start node";
  153. case UBIFS_ORPH_NODE:
  154. return "orphan node";
  155. default:
  156. return "unknown node";
  157. }
  158. }
  159. static const char *dbg_gtype(int type)
  160. {
  161. switch (type) {
  162. case UBIFS_NO_NODE_GROUP:
  163. return "no node group";
  164. case UBIFS_IN_NODE_GROUP:
  165. return "in node group";
  166. case UBIFS_LAST_OF_NODE_GROUP:
  167. return "last of node group";
  168. default:
  169. return "unknown";
  170. }
  171. }
  172. const char *dbg_cstate(int cmt_state)
  173. {
  174. switch (cmt_state) {
  175. case COMMIT_RESTING:
  176. return "commit resting";
  177. case COMMIT_BACKGROUND:
  178. return "background commit requested";
  179. case COMMIT_REQUIRED:
  180. return "commit required";
  181. case COMMIT_RUNNING_BACKGROUND:
  182. return "BACKGROUND commit running";
  183. case COMMIT_RUNNING_REQUIRED:
  184. return "commit running and required";
  185. case COMMIT_BROKEN:
  186. return "broken commit";
  187. default:
  188. return "unknown commit state";
  189. }
  190. }
  191. static void dump_ch(const struct ubifs_ch *ch)
  192. {
  193. printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
  194. printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc));
  195. printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type,
  196. dbg_ntype(ch->node_type));
  197. printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type,
  198. dbg_gtype(ch->group_type));
  199. printk(KERN_DEBUG "\tsqnum %llu\n",
  200. (unsigned long long)le64_to_cpu(ch->sqnum));
  201. printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
  202. }
  203. void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
  204. {
  205. const struct ubifs_inode *ui = ubifs_inode(inode);
  206. printk(KERN_DEBUG "Dump in-memory inode:");
  207. printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
  208. printk(KERN_DEBUG "\tsize %llu\n",
  209. (unsigned long long)i_size_read(inode));
  210. printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink);
  211. printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid);
  212. printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid);
  213. printk(KERN_DEBUG "\tatime %u.%u\n",
  214. (unsigned int)inode->i_atime.tv_sec,
  215. (unsigned int)inode->i_atime.tv_nsec);
  216. printk(KERN_DEBUG "\tmtime %u.%u\n",
  217. (unsigned int)inode->i_mtime.tv_sec,
  218. (unsigned int)inode->i_mtime.tv_nsec);
  219. printk(KERN_DEBUG "\tctime %u.%u\n",
  220. (unsigned int)inode->i_ctime.tv_sec,
  221. (unsigned int)inode->i_ctime.tv_nsec);
  222. printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum);
  223. printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size);
  224. printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt);
  225. printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names);
  226. printk(KERN_DEBUG "\tdirty %u\n", ui->dirty);
  227. printk(KERN_DEBUG "\txattr %u\n", ui->xattr);
  228. printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr);
  229. printk(KERN_DEBUG "\tsynced_i_size %llu\n",
  230. (unsigned long long)ui->synced_i_size);
  231. printk(KERN_DEBUG "\tui_size %llu\n",
  232. (unsigned long long)ui->ui_size);
  233. printk(KERN_DEBUG "\tflags %d\n", ui->flags);
  234. printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type);
  235. printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
  236. printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
  237. printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
  238. }
  239. void dbg_dump_node(const struct ubifs_info *c, const void *node)
  240. {
  241. int i, n;
  242. union ubifs_key key;
  243. const struct ubifs_ch *ch = node;
  244. if (dbg_failure_mode)
  245. return;
  246. /* If the magic is incorrect, just hexdump the first bytes */
  247. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
  248. printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
  249. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  250. (void *)node, UBIFS_CH_SZ, 1);
  251. return;
  252. }
  253. spin_lock(&dbg_lock);
  254. dump_ch(node);
  255. switch (ch->node_type) {
  256. case UBIFS_PAD_NODE:
  257. {
  258. const struct ubifs_pad_node *pad = node;
  259. printk(KERN_DEBUG "\tpad_len %u\n",
  260. le32_to_cpu(pad->pad_len));
  261. break;
  262. }
  263. case UBIFS_SB_NODE:
  264. {
  265. const struct ubifs_sb_node *sup = node;
  266. unsigned int sup_flags = le32_to_cpu(sup->flags);
  267. printk(KERN_DEBUG "\tkey_hash %d (%s)\n",
  268. (int)sup->key_hash, get_key_hash(sup->key_hash));
  269. printk(KERN_DEBUG "\tkey_fmt %d (%s)\n",
  270. (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
  271. printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
  272. printk(KERN_DEBUG "\t big_lpt %u\n",
  273. !!(sup_flags & UBIFS_FLG_BIGLPT));
  274. printk(KERN_DEBUG "\tmin_io_size %u\n",
  275. le32_to_cpu(sup->min_io_size));
  276. printk(KERN_DEBUG "\tleb_size %u\n",
  277. le32_to_cpu(sup->leb_size));
  278. printk(KERN_DEBUG "\tleb_cnt %u\n",
  279. le32_to_cpu(sup->leb_cnt));
  280. printk(KERN_DEBUG "\tmax_leb_cnt %u\n",
  281. le32_to_cpu(sup->max_leb_cnt));
  282. printk(KERN_DEBUG "\tmax_bud_bytes %llu\n",
  283. (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
  284. printk(KERN_DEBUG "\tlog_lebs %u\n",
  285. le32_to_cpu(sup->log_lebs));
  286. printk(KERN_DEBUG "\tlpt_lebs %u\n",
  287. le32_to_cpu(sup->lpt_lebs));
  288. printk(KERN_DEBUG "\torph_lebs %u\n",
  289. le32_to_cpu(sup->orph_lebs));
  290. printk(KERN_DEBUG "\tjhead_cnt %u\n",
  291. le32_to_cpu(sup->jhead_cnt));
  292. printk(KERN_DEBUG "\tfanout %u\n",
  293. le32_to_cpu(sup->fanout));
  294. printk(KERN_DEBUG "\tlsave_cnt %u\n",
  295. le32_to_cpu(sup->lsave_cnt));
  296. printk(KERN_DEBUG "\tdefault_compr %u\n",
  297. (int)le16_to_cpu(sup->default_compr));
  298. printk(KERN_DEBUG "\trp_size %llu\n",
  299. (unsigned long long)le64_to_cpu(sup->rp_size));
  300. printk(KERN_DEBUG "\trp_uid %u\n",
  301. le32_to_cpu(sup->rp_uid));
  302. printk(KERN_DEBUG "\trp_gid %u\n",
  303. le32_to_cpu(sup->rp_gid));
  304. printk(KERN_DEBUG "\tfmt_version %u\n",
  305. le32_to_cpu(sup->fmt_version));
  306. printk(KERN_DEBUG "\ttime_gran %u\n",
  307. le32_to_cpu(sup->time_gran));
  308. printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X"
  309. "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
  310. sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3],
  311. sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7],
  312. sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11],
  313. sup->uuid[12], sup->uuid[13], sup->uuid[14],
  314. sup->uuid[15]);
  315. break;
  316. }
  317. case UBIFS_MST_NODE:
  318. {
  319. const struct ubifs_mst_node *mst = node;
  320. printk(KERN_DEBUG "\thighest_inum %llu\n",
  321. (unsigned long long)le64_to_cpu(mst->highest_inum));
  322. printk(KERN_DEBUG "\tcommit number %llu\n",
  323. (unsigned long long)le64_to_cpu(mst->cmt_no));
  324. printk(KERN_DEBUG "\tflags %#x\n",
  325. le32_to_cpu(mst->flags));
  326. printk(KERN_DEBUG "\tlog_lnum %u\n",
  327. le32_to_cpu(mst->log_lnum));
  328. printk(KERN_DEBUG "\troot_lnum %u\n",
  329. le32_to_cpu(mst->root_lnum));
  330. printk(KERN_DEBUG "\troot_offs %u\n",
  331. le32_to_cpu(mst->root_offs));
  332. printk(KERN_DEBUG "\troot_len %u\n",
  333. le32_to_cpu(mst->root_len));
  334. printk(KERN_DEBUG "\tgc_lnum %u\n",
  335. le32_to_cpu(mst->gc_lnum));
  336. printk(KERN_DEBUG "\tihead_lnum %u\n",
  337. le32_to_cpu(mst->ihead_lnum));
  338. printk(KERN_DEBUG "\tihead_offs %u\n",
  339. le32_to_cpu(mst->ihead_offs));
  340. printk(KERN_DEBUG "\tindex_size %llu\n",
  341. (unsigned long long)le64_to_cpu(mst->index_size));
  342. printk(KERN_DEBUG "\tlpt_lnum %u\n",
  343. le32_to_cpu(mst->lpt_lnum));
  344. printk(KERN_DEBUG "\tlpt_offs %u\n",
  345. le32_to_cpu(mst->lpt_offs));
  346. printk(KERN_DEBUG "\tnhead_lnum %u\n",
  347. le32_to_cpu(mst->nhead_lnum));
  348. printk(KERN_DEBUG "\tnhead_offs %u\n",
  349. le32_to_cpu(mst->nhead_offs));
  350. printk(KERN_DEBUG "\tltab_lnum %u\n",
  351. le32_to_cpu(mst->ltab_lnum));
  352. printk(KERN_DEBUG "\tltab_offs %u\n",
  353. le32_to_cpu(mst->ltab_offs));
  354. printk(KERN_DEBUG "\tlsave_lnum %u\n",
  355. le32_to_cpu(mst->lsave_lnum));
  356. printk(KERN_DEBUG "\tlsave_offs %u\n",
  357. le32_to_cpu(mst->lsave_offs));
  358. printk(KERN_DEBUG "\tlscan_lnum %u\n",
  359. le32_to_cpu(mst->lscan_lnum));
  360. printk(KERN_DEBUG "\tleb_cnt %u\n",
  361. le32_to_cpu(mst->leb_cnt));
  362. printk(KERN_DEBUG "\tempty_lebs %u\n",
  363. le32_to_cpu(mst->empty_lebs));
  364. printk(KERN_DEBUG "\tidx_lebs %u\n",
  365. le32_to_cpu(mst->idx_lebs));
  366. printk(KERN_DEBUG "\ttotal_free %llu\n",
  367. (unsigned long long)le64_to_cpu(mst->total_free));
  368. printk(KERN_DEBUG "\ttotal_dirty %llu\n",
  369. (unsigned long long)le64_to_cpu(mst->total_dirty));
  370. printk(KERN_DEBUG "\ttotal_used %llu\n",
  371. (unsigned long long)le64_to_cpu(mst->total_used));
  372. printk(KERN_DEBUG "\ttotal_dead %llu\n",
  373. (unsigned long long)le64_to_cpu(mst->total_dead));
  374. printk(KERN_DEBUG "\ttotal_dark %llu\n",
  375. (unsigned long long)le64_to_cpu(mst->total_dark));
  376. break;
  377. }
  378. case UBIFS_REF_NODE:
  379. {
  380. const struct ubifs_ref_node *ref = node;
  381. printk(KERN_DEBUG "\tlnum %u\n",
  382. le32_to_cpu(ref->lnum));
  383. printk(KERN_DEBUG "\toffs %u\n",
  384. le32_to_cpu(ref->offs));
  385. printk(KERN_DEBUG "\tjhead %u\n",
  386. le32_to_cpu(ref->jhead));
  387. break;
  388. }
  389. case UBIFS_INO_NODE:
  390. {
  391. const struct ubifs_ino_node *ino = node;
  392. key_read(c, &ino->key, &key);
  393. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  394. printk(KERN_DEBUG "\tcreat_sqnum %llu\n",
  395. (unsigned long long)le64_to_cpu(ino->creat_sqnum));
  396. printk(KERN_DEBUG "\tsize %llu\n",
  397. (unsigned long long)le64_to_cpu(ino->size));
  398. printk(KERN_DEBUG "\tnlink %u\n",
  399. le32_to_cpu(ino->nlink));
  400. printk(KERN_DEBUG "\tatime %lld.%u\n",
  401. (long long)le64_to_cpu(ino->atime_sec),
  402. le32_to_cpu(ino->atime_nsec));
  403. printk(KERN_DEBUG "\tmtime %lld.%u\n",
  404. (long long)le64_to_cpu(ino->mtime_sec),
  405. le32_to_cpu(ino->mtime_nsec));
  406. printk(KERN_DEBUG "\tctime %lld.%u\n",
  407. (long long)le64_to_cpu(ino->ctime_sec),
  408. le32_to_cpu(ino->ctime_nsec));
  409. printk(KERN_DEBUG "\tuid %u\n",
  410. le32_to_cpu(ino->uid));
  411. printk(KERN_DEBUG "\tgid %u\n",
  412. le32_to_cpu(ino->gid));
  413. printk(KERN_DEBUG "\tmode %u\n",
  414. le32_to_cpu(ino->mode));
  415. printk(KERN_DEBUG "\tflags %#x\n",
  416. le32_to_cpu(ino->flags));
  417. printk(KERN_DEBUG "\txattr_cnt %u\n",
  418. le32_to_cpu(ino->xattr_cnt));
  419. printk(KERN_DEBUG "\txattr_size %u\n",
  420. le32_to_cpu(ino->xattr_size));
  421. printk(KERN_DEBUG "\txattr_names %u\n",
  422. le32_to_cpu(ino->xattr_names));
  423. printk(KERN_DEBUG "\tcompr_type %#x\n",
  424. (int)le16_to_cpu(ino->compr_type));
  425. printk(KERN_DEBUG "\tdata len %u\n",
  426. le32_to_cpu(ino->data_len));
  427. break;
  428. }
  429. case UBIFS_DENT_NODE:
  430. case UBIFS_XENT_NODE:
  431. {
  432. const struct ubifs_dent_node *dent = node;
  433. int nlen = le16_to_cpu(dent->nlen);
  434. key_read(c, &dent->key, &key);
  435. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  436. printk(KERN_DEBUG "\tinum %llu\n",
  437. (unsigned long long)le64_to_cpu(dent->inum));
  438. printk(KERN_DEBUG "\ttype %d\n", (int)dent->type);
  439. printk(KERN_DEBUG "\tnlen %d\n", nlen);
  440. printk(KERN_DEBUG "\tname ");
  441. if (nlen > UBIFS_MAX_NLEN)
  442. printk(KERN_DEBUG "(bad name length, not printing, "
  443. "bad or corrupted node)");
  444. else {
  445. for (i = 0; i < nlen && dent->name[i]; i++)
  446. printk("%c", dent->name[i]);
  447. }
  448. printk("\n");
  449. break;
  450. }
  451. case UBIFS_DATA_NODE:
  452. {
  453. const struct ubifs_data_node *dn = node;
  454. int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
  455. key_read(c, &dn->key, &key);
  456. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  457. printk(KERN_DEBUG "\tsize %u\n",
  458. le32_to_cpu(dn->size));
  459. printk(KERN_DEBUG "\tcompr_typ %d\n",
  460. (int)le16_to_cpu(dn->compr_type));
  461. printk(KERN_DEBUG "\tdata size %d\n",
  462. dlen);
  463. printk(KERN_DEBUG "\tdata:\n");
  464. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
  465. (void *)&dn->data, dlen, 0);
  466. break;
  467. }
  468. case UBIFS_TRUN_NODE:
  469. {
  470. const struct ubifs_trun_node *trun = node;
  471. printk(KERN_DEBUG "\tinum %u\n",
  472. le32_to_cpu(trun->inum));
  473. printk(KERN_DEBUG "\told_size %llu\n",
  474. (unsigned long long)le64_to_cpu(trun->old_size));
  475. printk(KERN_DEBUG "\tnew_size %llu\n",
  476. (unsigned long long)le64_to_cpu(trun->new_size));
  477. break;
  478. }
  479. case UBIFS_IDX_NODE:
  480. {
  481. const struct ubifs_idx_node *idx = node;
  482. n = le16_to_cpu(idx->child_cnt);
  483. printk(KERN_DEBUG "\tchild_cnt %d\n", n);
  484. printk(KERN_DEBUG "\tlevel %d\n",
  485. (int)le16_to_cpu(idx->level));
  486. printk(KERN_DEBUG "\tBranches:\n");
  487. for (i = 0; i < n && i < c->fanout - 1; i++) {
  488. const struct ubifs_branch *br;
  489. br = ubifs_idx_branch(c, idx, i);
  490. key_read(c, &br->key, &key);
  491. printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
  492. i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
  493. le32_to_cpu(br->len), DBGKEY(&key));
  494. }
  495. break;
  496. }
  497. case UBIFS_CS_NODE:
  498. break;
  499. case UBIFS_ORPH_NODE:
  500. {
  501. const struct ubifs_orph_node *orph = node;
  502. printk(KERN_DEBUG "\tcommit number %llu\n",
  503. (unsigned long long)
  504. le64_to_cpu(orph->cmt_no) & LLONG_MAX);
  505. printk(KERN_DEBUG "\tlast node flag %llu\n",
  506. (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
  507. n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
  508. printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
  509. for (i = 0; i < n; i++)
  510. printk(KERN_DEBUG "\t ino %llu\n",
  511. (unsigned long long)le64_to_cpu(orph->inos[i]));
  512. break;
  513. }
  514. default:
  515. printk(KERN_DEBUG "node type %d was not recognized\n",
  516. (int)ch->node_type);
  517. }
  518. spin_unlock(&dbg_lock);
  519. }
  520. void dbg_dump_budget_req(const struct ubifs_budget_req *req)
  521. {
  522. spin_lock(&dbg_lock);
  523. printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
  524. req->new_ino, req->dirtied_ino);
  525. printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n",
  526. req->new_ino_d, req->dirtied_ino_d);
  527. printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n",
  528. req->new_page, req->dirtied_page);
  529. printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n",
  530. req->new_dent, req->mod_dent);
  531. printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth);
  532. printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n",
  533. req->data_growth, req->dd_growth);
  534. spin_unlock(&dbg_lock);
  535. }
  536. void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
  537. {
  538. spin_lock(&dbg_lock);
  539. printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
  540. "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
  541. printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
  542. "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
  543. lst->total_dirty);
  544. printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
  545. "total_dead %lld\n", lst->total_used, lst->total_dark,
  546. lst->total_dead);
  547. spin_unlock(&dbg_lock);
  548. }
  549. void dbg_dump_budg(struct ubifs_info *c)
  550. {
  551. int i;
  552. struct rb_node *rb;
  553. struct ubifs_bud *bud;
  554. struct ubifs_gced_idx_leb *idx_gc;
  555. spin_lock(&dbg_lock);
  556. printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, "
  557. "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid,
  558. c->budg_data_growth, c->budg_dd_growth, c->budg_idx_growth);
  559. printk(KERN_DEBUG "\tdata budget sum %lld, total budget sum %lld, "
  560. "freeable_cnt %d\n", c->budg_data_growth + c->budg_dd_growth,
  561. c->budg_data_growth + c->budg_dd_growth + c->budg_idx_growth,
  562. c->freeable_cnt);
  563. printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %lld, "
  564. "calc_idx_sz %lld, idx_gc_cnt %d\n", c->min_idx_lebs,
  565. c->old_idx_sz, c->calc_idx_sz, c->idx_gc_cnt);
  566. printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
  567. "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
  568. atomic_long_read(&c->dirty_zn_cnt),
  569. atomic_long_read(&c->clean_zn_cnt));
  570. printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
  571. c->dark_wm, c->dead_wm, c->max_idx_node_sz);
  572. printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
  573. c->gc_lnum, c->ihead_lnum);
  574. for (i = 0; i < c->jhead_cnt; i++)
  575. printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
  576. c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
  577. for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
  578. bud = rb_entry(rb, struct ubifs_bud, rb);
  579. printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
  580. }
  581. list_for_each_entry(bud, &c->old_buds, list)
  582. printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
  583. list_for_each_entry(idx_gc, &c->idx_gc, list)
  584. printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
  585. idx_gc->lnum, idx_gc->unmap);
  586. printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
  587. spin_unlock(&dbg_lock);
  588. }
  589. void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
  590. {
  591. printk(KERN_DEBUG "LEB %d lprops: free %d, dirty %d (used %d), "
  592. "flags %#x\n", lp->lnum, lp->free, lp->dirty,
  593. c->leb_size - lp->free - lp->dirty, lp->flags);
  594. }
  595. void dbg_dump_lprops(struct ubifs_info *c)
  596. {
  597. int lnum, err;
  598. struct ubifs_lprops lp;
  599. struct ubifs_lp_stats lst;
  600. printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid);
  601. ubifs_get_lp_stats(c, &lst);
  602. dbg_dump_lstats(&lst);
  603. for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
  604. err = ubifs_read_one_lp(c, lnum, &lp);
  605. if (err)
  606. ubifs_err("cannot read lprops for LEB %d", lnum);
  607. dbg_dump_lprop(c, &lp);
  608. }
  609. }
  610. void dbg_dump_lpt_info(struct ubifs_info *c)
  611. {
  612. int i;
  613. spin_lock(&dbg_lock);
  614. printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz);
  615. printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz);
  616. printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz);
  617. printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz);
  618. printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz);
  619. printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt);
  620. printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght);
  621. printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt);
  622. printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt);
  623. printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt);
  624. printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt);
  625. printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt);
  626. printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits);
  627. printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
  628. printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
  629. printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits);
  630. printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits);
  631. printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits);
  632. printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
  633. printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
  634. c->nhead_lnum, c->nhead_offs);
  635. printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs);
  636. if (c->big_lpt)
  637. printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
  638. c->lsave_lnum, c->lsave_offs);
  639. for (i = 0; i < c->lpt_lebs; i++)
  640. printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
  641. "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
  642. c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
  643. spin_unlock(&dbg_lock);
  644. }
  645. void dbg_dump_leb(const struct ubifs_info *c, int lnum)
  646. {
  647. struct ubifs_scan_leb *sleb;
  648. struct ubifs_scan_node *snod;
  649. if (dbg_failure_mode)
  650. return;
  651. printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum);
  652. sleb = ubifs_scan(c, lnum, 0, c->dbg_buf);
  653. if (IS_ERR(sleb)) {
  654. ubifs_err("scan error %d", (int)PTR_ERR(sleb));
  655. return;
  656. }
  657. printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
  658. sleb->nodes_cnt, sleb->endpt);
  659. list_for_each_entry(snod, &sleb->nodes, list) {
  660. cond_resched();
  661. printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
  662. snod->offs, snod->len);
  663. dbg_dump_node(c, snod->node);
  664. }
  665. ubifs_scan_destroy(sleb);
  666. return;
  667. }
  668. void dbg_dump_znode(const struct ubifs_info *c,
  669. const struct ubifs_znode *znode)
  670. {
  671. int n;
  672. const struct ubifs_zbranch *zbr;
  673. spin_lock(&dbg_lock);
  674. if (znode->parent)
  675. zbr = &znode->parent->zbranch[znode->iip];
  676. else
  677. zbr = &c->zroot;
  678. printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
  679. " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
  680. zbr->len, znode->parent, znode->iip, znode->level,
  681. znode->child_cnt, znode->flags);
  682. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  683. spin_unlock(&dbg_lock);
  684. return;
  685. }
  686. printk(KERN_DEBUG "zbranches:\n");
  687. for (n = 0; n < znode->child_cnt; n++) {
  688. zbr = &znode->zbranch[n];
  689. if (znode->level > 0)
  690. printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
  691. "%s\n", n, zbr->znode, zbr->lnum,
  692. zbr->offs, zbr->len,
  693. DBGKEY(&zbr->key));
  694. else
  695. printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
  696. "%s\n", n, zbr->znode, zbr->lnum,
  697. zbr->offs, zbr->len,
  698. DBGKEY(&zbr->key));
  699. }
  700. spin_unlock(&dbg_lock);
  701. }
  702. void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
  703. {
  704. int i;
  705. printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n",
  706. current->pid, cat, heap->cnt);
  707. for (i = 0; i < heap->cnt; i++) {
  708. struct ubifs_lprops *lprops = heap->arr[i];
  709. printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
  710. "flags %d\n", i, lprops->lnum, lprops->hpos,
  711. lprops->free, lprops->dirty, lprops->flags);
  712. }
  713. }
  714. void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
  715. struct ubifs_nnode *parent, int iip)
  716. {
  717. int i;
  718. printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid);
  719. printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
  720. (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
  721. printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
  722. pnode->flags, iip, pnode->level, pnode->num);
  723. for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
  724. struct ubifs_lprops *lp = &pnode->lprops[i];
  725. printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
  726. i, lp->free, lp->dirty, lp->flags, lp->lnum);
  727. }
  728. }
  729. void dbg_dump_tnc(struct ubifs_info *c)
  730. {
  731. struct ubifs_znode *znode;
  732. int level;
  733. printk(KERN_DEBUG "\n");
  734. printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid);
  735. znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
  736. level = znode->level;
  737. printk(KERN_DEBUG "== Level %d ==\n", level);
  738. while (znode) {
  739. if (level != znode->level) {
  740. level = znode->level;
  741. printk(KERN_DEBUG "== Level %d ==\n", level);
  742. }
  743. dbg_dump_znode(c, znode);
  744. znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
  745. }
  746. printk(KERN_DEBUG "\n");
  747. }
  748. static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
  749. void *priv)
  750. {
  751. dbg_dump_znode(c, znode);
  752. return 0;
  753. }
  754. /**
  755. * dbg_dump_index - dump the on-flash index.
  756. * @c: UBIFS file-system description object
  757. *
  758. * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
  759. * which dumps only in-memory znodes and does not read znodes which from flash.
  760. */
  761. void dbg_dump_index(struct ubifs_info *c)
  762. {
  763. dbg_walk_index(c, NULL, dump_znode, NULL);
  764. }
  765. /**
  766. * dbg_check_synced_i_size - check synchronized inode size.
  767. * @inode: inode to check
  768. *
  769. * If inode is clean, synchronized inode size has to be equivalent to current
  770. * inode size. This function has to be called only for locked inodes (@i_mutex
  771. * has to be locked). Returns %0 if synchronized inode size if correct, and
  772. * %-EINVAL if not.
  773. */
  774. int dbg_check_synced_i_size(struct inode *inode)
  775. {
  776. int err = 0;
  777. struct ubifs_inode *ui = ubifs_inode(inode);
  778. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  779. return 0;
  780. if (!S_ISREG(inode->i_mode))
  781. return 0;
  782. mutex_lock(&ui->ui_mutex);
  783. spin_lock(&ui->ui_lock);
  784. if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
  785. ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
  786. "is clean", ui->ui_size, ui->synced_i_size);
  787. ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
  788. inode->i_mode, i_size_read(inode));
  789. dbg_dump_stack();
  790. err = -EINVAL;
  791. }
  792. spin_unlock(&ui->ui_lock);
  793. mutex_unlock(&ui->ui_mutex);
  794. return err;
  795. }
  796. /*
  797. * dbg_check_dir - check directory inode size and link count.
  798. * @c: UBIFS file-system description object
  799. * @dir: the directory to calculate size for
  800. * @size: the result is returned here
  801. *
  802. * This function makes sure that directory size and link count are correct.
  803. * Returns zero in case of success and a negative error code in case of
  804. * failure.
  805. *
  806. * Note, it is good idea to make sure the @dir->i_mutex is locked before
  807. * calling this function.
  808. */
  809. int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
  810. {
  811. unsigned int nlink = 2;
  812. union ubifs_key key;
  813. struct ubifs_dent_node *dent, *pdent = NULL;
  814. struct qstr nm = { .name = NULL };
  815. loff_t size = UBIFS_INO_NODE_SZ;
  816. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  817. return 0;
  818. if (!S_ISDIR(dir->i_mode))
  819. return 0;
  820. lowest_dent_key(c, &key, dir->i_ino);
  821. while (1) {
  822. int err;
  823. dent = ubifs_tnc_next_ent(c, &key, &nm);
  824. if (IS_ERR(dent)) {
  825. err = PTR_ERR(dent);
  826. if (err == -ENOENT)
  827. break;
  828. return err;
  829. }
  830. nm.name = dent->name;
  831. nm.len = le16_to_cpu(dent->nlen);
  832. size += CALC_DENT_SIZE(nm.len);
  833. if (dent->type == UBIFS_ITYPE_DIR)
  834. nlink += 1;
  835. kfree(pdent);
  836. pdent = dent;
  837. key_read(c, &dent->key, &key);
  838. }
  839. kfree(pdent);
  840. if (i_size_read(dir) != size) {
  841. ubifs_err("directory inode %lu has size %llu, "
  842. "but calculated size is %llu", dir->i_ino,
  843. (unsigned long long)i_size_read(dir),
  844. (unsigned long long)size);
  845. dump_stack();
  846. return -EINVAL;
  847. }
  848. if (dir->i_nlink != nlink) {
  849. ubifs_err("directory inode %lu has nlink %u, but calculated "
  850. "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
  851. dump_stack();
  852. return -EINVAL;
  853. }
  854. return 0;
  855. }
  856. /**
  857. * dbg_check_key_order - make sure that colliding keys are properly ordered.
  858. * @c: UBIFS file-system description object
  859. * @zbr1: first zbranch
  860. * @zbr2: following zbranch
  861. *
  862. * In UBIFS indexing B-tree colliding keys has to be sorted in binary order of
  863. * names of the direntries/xentries which are referred by the keys. This
  864. * function reads direntries/xentries referred by @zbr1 and @zbr2 and makes
  865. * sure the name of direntry/xentry referred by @zbr1 is less than
  866. * direntry/xentry referred by @zbr2. Returns zero if this is true, %1 if not,
  867. * and a negative error code in case of failure.
  868. */
  869. static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
  870. struct ubifs_zbranch *zbr2)
  871. {
  872. int err, nlen1, nlen2, cmp;
  873. struct ubifs_dent_node *dent1, *dent2;
  874. union ubifs_key key;
  875. ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
  876. dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  877. if (!dent1)
  878. return -ENOMEM;
  879. dent2 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  880. if (!dent2) {
  881. err = -ENOMEM;
  882. goto out_free;
  883. }
  884. err = ubifs_tnc_read_node(c, zbr1, dent1);
  885. if (err)
  886. goto out_free;
  887. err = ubifs_validate_entry(c, dent1);
  888. if (err)
  889. goto out_free;
  890. err = ubifs_tnc_read_node(c, zbr2, dent2);
  891. if (err)
  892. goto out_free;
  893. err = ubifs_validate_entry(c, dent2);
  894. if (err)
  895. goto out_free;
  896. /* Make sure node keys are the same as in zbranch */
  897. err = 1;
  898. key_read(c, &dent1->key, &key);
  899. if (keys_cmp(c, &zbr1->key, &key)) {
  900. dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
  901. zbr1->offs, DBGKEY(&key));
  902. dbg_err("but it should have key %s according to tnc",
  903. DBGKEY(&zbr1->key));
  904. dbg_dump_node(c, dent1);
  905. goto out_free;
  906. }
  907. key_read(c, &dent2->key, &key);
  908. if (keys_cmp(c, &zbr2->key, &key)) {
  909. dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
  910. zbr1->offs, DBGKEY(&key));
  911. dbg_err("but it should have key %s according to tnc",
  912. DBGKEY(&zbr2->key));
  913. dbg_dump_node(c, dent2);
  914. goto out_free;
  915. }
  916. nlen1 = le16_to_cpu(dent1->nlen);
  917. nlen2 = le16_to_cpu(dent2->nlen);
  918. cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
  919. if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
  920. err = 0;
  921. goto out_free;
  922. }
  923. if (cmp == 0 && nlen1 == nlen2)
  924. dbg_err("2 xent/dent nodes with the same name");
  925. else
  926. dbg_err("bad order of colliding key %s",
  927. DBGKEY(&key));
  928. dbg_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
  929. dbg_dump_node(c, dent1);
  930. dbg_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
  931. dbg_dump_node(c, dent2);
  932. out_free:
  933. kfree(dent2);
  934. kfree(dent1);
  935. return err;
  936. }
  937. /**
  938. * dbg_check_znode - check if znode is all right.
  939. * @c: UBIFS file-system description object
  940. * @zbr: zbranch which points to this znode
  941. *
  942. * This function makes sure that znode referred to by @zbr is all right.
  943. * Returns zero if it is, and %-EINVAL if it is not.
  944. */
  945. static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
  946. {
  947. struct ubifs_znode *znode = zbr->znode;
  948. struct ubifs_znode *zp = znode->parent;
  949. int n, err, cmp;
  950. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  951. err = 1;
  952. goto out;
  953. }
  954. if (znode->level < 0) {
  955. err = 2;
  956. goto out;
  957. }
  958. if (znode->iip < 0 || znode->iip >= c->fanout) {
  959. err = 3;
  960. goto out;
  961. }
  962. if (zbr->len == 0)
  963. /* Only dirty zbranch may have no on-flash nodes */
  964. if (!ubifs_zn_dirty(znode)) {
  965. err = 4;
  966. goto out;
  967. }
  968. if (ubifs_zn_dirty(znode)) {
  969. /*
  970. * If znode is dirty, its parent has to be dirty as well. The
  971. * order of the operation is important, so we have to have
  972. * memory barriers.
  973. */
  974. smp_mb();
  975. if (zp && !ubifs_zn_dirty(zp)) {
  976. /*
  977. * The dirty flag is atomic and is cleared outside the
  978. * TNC mutex, so znode's dirty flag may now have
  979. * been cleared. The child is always cleared before the
  980. * parent, so we just need to check again.
  981. */
  982. smp_mb();
  983. if (ubifs_zn_dirty(znode)) {
  984. err = 5;
  985. goto out;
  986. }
  987. }
  988. }
  989. if (zp) {
  990. const union ubifs_key *min, *max;
  991. if (znode->level != zp->level - 1) {
  992. err = 6;
  993. goto out;
  994. }
  995. /* Make sure the 'parent' pointer in our znode is correct */
  996. err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
  997. if (!err) {
  998. /* This zbranch does not exist in the parent */
  999. err = 7;
  1000. goto out;
  1001. }
  1002. if (znode->iip >= zp->child_cnt) {
  1003. err = 8;
  1004. goto out;
  1005. }
  1006. if (znode->iip != n) {
  1007. /* This may happen only in case of collisions */
  1008. if (keys_cmp(c, &zp->zbranch[n].key,
  1009. &zp->zbranch[znode->iip].key)) {
  1010. err = 9;
  1011. goto out;
  1012. }
  1013. n = znode->iip;
  1014. }
  1015. /*
  1016. * Make sure that the first key in our znode is greater than or
  1017. * equal to the key in the pointing zbranch.
  1018. */
  1019. min = &zbr->key;
  1020. cmp = keys_cmp(c, min, &znode->zbranch[0].key);
  1021. if (cmp == 1) {
  1022. err = 10;
  1023. goto out;
  1024. }
  1025. if (n + 1 < zp->child_cnt) {
  1026. max = &zp->zbranch[n + 1].key;
  1027. /*
  1028. * Make sure the last key in our znode is less or
  1029. * equivalent than the the key in zbranch which goes
  1030. * after our pointing zbranch.
  1031. */
  1032. cmp = keys_cmp(c, max,
  1033. &znode->zbranch[znode->child_cnt - 1].key);
  1034. if (cmp == -1) {
  1035. err = 11;
  1036. goto out;
  1037. }
  1038. }
  1039. } else {
  1040. /* This may only be root znode */
  1041. if (zbr != &c->zroot) {
  1042. err = 12;
  1043. goto out;
  1044. }
  1045. }
  1046. /*
  1047. * Make sure that next key is greater or equivalent then the previous
  1048. * one.
  1049. */
  1050. for (n = 1; n < znode->child_cnt; n++) {
  1051. cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
  1052. &znode->zbranch[n].key);
  1053. if (cmp > 0) {
  1054. err = 13;
  1055. goto out;
  1056. }
  1057. if (cmp == 0) {
  1058. /* This can only be keys with colliding hash */
  1059. if (!is_hash_key(c, &znode->zbranch[n].key)) {
  1060. err = 14;
  1061. goto out;
  1062. }
  1063. if (znode->level != 0 || c->replaying)
  1064. continue;
  1065. /*
  1066. * Colliding keys should follow binary order of
  1067. * corresponding xentry/dentry names.
  1068. */
  1069. err = dbg_check_key_order(c, &znode->zbranch[n - 1],
  1070. &znode->zbranch[n]);
  1071. if (err < 0)
  1072. return err;
  1073. if (err) {
  1074. err = 15;
  1075. goto out;
  1076. }
  1077. }
  1078. }
  1079. for (n = 0; n < znode->child_cnt; n++) {
  1080. if (!znode->zbranch[n].znode &&
  1081. (znode->zbranch[n].lnum == 0 ||
  1082. znode->zbranch[n].len == 0)) {
  1083. err = 16;
  1084. goto out;
  1085. }
  1086. if (znode->zbranch[n].lnum != 0 &&
  1087. znode->zbranch[n].len == 0) {
  1088. err = 17;
  1089. goto out;
  1090. }
  1091. if (znode->zbranch[n].lnum == 0 &&
  1092. znode->zbranch[n].len != 0) {
  1093. err = 18;
  1094. goto out;
  1095. }
  1096. if (znode->zbranch[n].lnum == 0 &&
  1097. znode->zbranch[n].offs != 0) {
  1098. err = 19;
  1099. goto out;
  1100. }
  1101. if (znode->level != 0 && znode->zbranch[n].znode)
  1102. if (znode->zbranch[n].znode->parent != znode) {
  1103. err = 20;
  1104. goto out;
  1105. }
  1106. }
  1107. return 0;
  1108. out:
  1109. ubifs_err("failed, error %d", err);
  1110. ubifs_msg("dump of the znode");
  1111. dbg_dump_znode(c, znode);
  1112. if (zp) {
  1113. ubifs_msg("dump of the parent znode");
  1114. dbg_dump_znode(c, zp);
  1115. }
  1116. dump_stack();
  1117. return -EINVAL;
  1118. }
  1119. /**
  1120. * dbg_check_tnc - check TNC tree.
  1121. * @c: UBIFS file-system description object
  1122. * @extra: do extra checks that are possible at start commit
  1123. *
  1124. * This function traverses whole TNC tree and checks every znode. Returns zero
  1125. * if everything is all right and %-EINVAL if something is wrong with TNC.
  1126. */
  1127. int dbg_check_tnc(struct ubifs_info *c, int extra)
  1128. {
  1129. struct ubifs_znode *znode;
  1130. long clean_cnt = 0, dirty_cnt = 0;
  1131. int err, last;
  1132. if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
  1133. return 0;
  1134. ubifs_assert(mutex_is_locked(&c->tnc_mutex));
  1135. if (!c->zroot.znode)
  1136. return 0;
  1137. znode = ubifs_tnc_postorder_first(c->zroot.znode);
  1138. while (1) {
  1139. struct ubifs_znode *prev;
  1140. struct ubifs_zbranch *zbr;
  1141. if (!znode->parent)
  1142. zbr = &c->zroot;
  1143. else
  1144. zbr = &znode->parent->zbranch[znode->iip];
  1145. err = dbg_check_znode(c, zbr);
  1146. if (err)
  1147. return err;
  1148. if (extra) {
  1149. if (ubifs_zn_dirty(znode))
  1150. dirty_cnt += 1;
  1151. else
  1152. clean_cnt += 1;
  1153. }
  1154. prev = znode;
  1155. znode = ubifs_tnc_postorder_next(znode);
  1156. if (!znode)
  1157. break;
  1158. /*
  1159. * If the last key of this znode is equivalent to the first key
  1160. * of the next znode (collision), then check order of the keys.
  1161. */
  1162. last = prev->child_cnt - 1;
  1163. if (prev->level == 0 && znode->level == 0 && !c->replaying &&
  1164. !keys_cmp(c, &prev->zbranch[last].key,
  1165. &znode->zbranch[0].key)) {
  1166. err = dbg_check_key_order(c, &prev->zbranch[last],
  1167. &znode->zbranch[0]);
  1168. if (err < 0)
  1169. return err;
  1170. if (err) {
  1171. ubifs_msg("first znode");
  1172. dbg_dump_znode(c, prev);
  1173. ubifs_msg("second znode");
  1174. dbg_dump_znode(c, znode);
  1175. return -EINVAL;
  1176. }
  1177. }
  1178. }
  1179. if (extra) {
  1180. if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
  1181. ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
  1182. atomic_long_read(&c->clean_zn_cnt),
  1183. clean_cnt);
  1184. return -EINVAL;
  1185. }
  1186. if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
  1187. ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
  1188. atomic_long_read(&c->dirty_zn_cnt),
  1189. dirty_cnt);
  1190. return -EINVAL;
  1191. }
  1192. }
  1193. return 0;
  1194. }
  1195. /**
  1196. * dbg_walk_index - walk the on-flash index.
  1197. * @c: UBIFS file-system description object
  1198. * @leaf_cb: called for each leaf node
  1199. * @znode_cb: called for each indexing node
  1200. * @priv: private date which is passed to callbacks
  1201. *
  1202. * This function walks the UBIFS index and calls the @leaf_cb for each leaf
  1203. * node and @znode_cb for each indexing node. Returns zero in case of success
  1204. * and a negative error code in case of failure.
  1205. *
  1206. * It would be better if this function removed every znode it pulled to into
  1207. * the TNC, so that the behavior more closely matched the non-debugging
  1208. * behavior.
  1209. */
  1210. int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
  1211. dbg_znode_callback znode_cb, void *priv)
  1212. {
  1213. int err;
  1214. struct ubifs_zbranch *zbr;
  1215. struct ubifs_znode *znode, *child;
  1216. mutex_lock(&c->tnc_mutex);
  1217. /* If the root indexing node is not in TNC - pull it */
  1218. if (!c->zroot.znode) {
  1219. c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
  1220. if (IS_ERR(c->zroot.znode)) {
  1221. err = PTR_ERR(c->zroot.znode);
  1222. c->zroot.znode = NULL;
  1223. goto out_unlock;
  1224. }
  1225. }
  1226. /*
  1227. * We are going to traverse the indexing tree in the postorder manner.
  1228. * Go down and find the leftmost indexing node where we are going to
  1229. * start from.
  1230. */
  1231. znode = c->zroot.znode;
  1232. while (znode->level > 0) {
  1233. zbr = &znode->zbranch[0];
  1234. child = zbr->znode;
  1235. if (!child) {
  1236. child = ubifs_load_znode(c, zbr, znode, 0);
  1237. if (IS_ERR(child)) {
  1238. err = PTR_ERR(child);
  1239. goto out_unlock;
  1240. }
  1241. zbr->znode = child;
  1242. }
  1243. znode = child;
  1244. }
  1245. /* Iterate over all indexing nodes */
  1246. while (1) {
  1247. int idx;
  1248. cond_resched();
  1249. if (znode_cb) {
  1250. err = znode_cb(c, znode, priv);
  1251. if (err) {
  1252. ubifs_err("znode checking function returned "
  1253. "error %d", err);
  1254. dbg_dump_znode(c, znode);
  1255. goto out_dump;
  1256. }
  1257. }
  1258. if (leaf_cb && znode->level == 0) {
  1259. for (idx = 0; idx < znode->child_cnt; idx++) {
  1260. zbr = &znode->zbranch[idx];
  1261. err = leaf_cb(c, zbr, priv);
  1262. if (err) {
  1263. ubifs_err("leaf checking function "
  1264. "returned error %d, for leaf "
  1265. "at LEB %d:%d",
  1266. err, zbr->lnum, zbr->offs);
  1267. goto out_dump;
  1268. }
  1269. }
  1270. }
  1271. if (!znode->parent)
  1272. break;
  1273. idx = znode->iip + 1;
  1274. znode = znode->parent;
  1275. if (idx < znode->child_cnt) {
  1276. /* Switch to the next index in the parent */
  1277. zbr = &znode->zbranch[idx];
  1278. child = zbr->znode;
  1279. if (!child) {
  1280. child = ubifs_load_znode(c, zbr, znode, idx);
  1281. if (IS_ERR(child)) {
  1282. err = PTR_ERR(child);
  1283. goto out_unlock;
  1284. }
  1285. zbr->znode = child;
  1286. }
  1287. znode = child;
  1288. } else
  1289. /*
  1290. * This is the last child, switch to the parent and
  1291. * continue.
  1292. */
  1293. continue;
  1294. /* Go to the lowest leftmost znode in the new sub-tree */
  1295. while (znode->level > 0) {
  1296. zbr = &znode->zbranch[0];
  1297. child = zbr->znode;
  1298. if (!child) {
  1299. child = ubifs_load_znode(c, zbr, znode, 0);
  1300. if (IS_ERR(child)) {
  1301. err = PTR_ERR(child);
  1302. goto out_unlock;
  1303. }
  1304. zbr->znode = child;
  1305. }
  1306. znode = child;
  1307. }
  1308. }
  1309. mutex_unlock(&c->tnc_mutex);
  1310. return 0;
  1311. out_dump:
  1312. if (znode->parent)
  1313. zbr = &znode->parent->zbranch[znode->iip];
  1314. else
  1315. zbr = &c->zroot;
  1316. ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
  1317. dbg_dump_znode(c, znode);
  1318. out_unlock:
  1319. mutex_unlock(&c->tnc_mutex);
  1320. return err;
  1321. }
  1322. /**
  1323. * add_size - add znode size to partially calculated index size.
  1324. * @c: UBIFS file-system description object
  1325. * @znode: znode to add size for
  1326. * @priv: partially calculated index size
  1327. *
  1328. * This is a helper function for 'dbg_check_idx_size()' which is called for
  1329. * every indexing node and adds its size to the 'long long' variable pointed to
  1330. * by @priv.
  1331. */
  1332. static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
  1333. {
  1334. long long *idx_size = priv;
  1335. int add;
  1336. add = ubifs_idx_node_sz(c, znode->child_cnt);
  1337. add = ALIGN(add, 8);
  1338. *idx_size += add;
  1339. return 0;
  1340. }
  1341. /**
  1342. * dbg_check_idx_size - check index size.
  1343. * @c: UBIFS file-system description object
  1344. * @idx_size: size to check
  1345. *
  1346. * This function walks the UBIFS index, calculates its size and checks that the
  1347. * size is equivalent to @idx_size. Returns zero in case of success and a
  1348. * negative error code in case of failure.
  1349. */
  1350. int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
  1351. {
  1352. int err;
  1353. long long calc = 0;
  1354. if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
  1355. return 0;
  1356. err = dbg_walk_index(c, NULL, add_size, &calc);
  1357. if (err) {
  1358. ubifs_err("error %d while walking the index", err);
  1359. return err;
  1360. }
  1361. if (calc != idx_size) {
  1362. ubifs_err("index size check failed: calculated size is %lld, "
  1363. "should be %lld", calc, idx_size);
  1364. dump_stack();
  1365. return -EINVAL;
  1366. }
  1367. return 0;
  1368. }
  1369. /**
  1370. * struct fsck_inode - information about an inode used when checking the file-system.
  1371. * @rb: link in the RB-tree of inodes
  1372. * @inum: inode number
  1373. * @mode: inode type, permissions, etc
  1374. * @nlink: inode link count
  1375. * @xattr_cnt: count of extended attributes
  1376. * @references: how many directory/xattr entries refer this inode (calculated
  1377. * while walking the index)
  1378. * @calc_cnt: for directory inode count of child directories
  1379. * @size: inode size (read from on-flash inode)
  1380. * @xattr_sz: summary size of all extended attributes (read from on-flash
  1381. * inode)
  1382. * @calc_sz: for directories calculated directory size
  1383. * @calc_xcnt: count of extended attributes
  1384. * @calc_xsz: calculated summary size of all extended attributes
  1385. * @xattr_nms: sum of lengths of all extended attribute names belonging to this
  1386. * inode (read from on-flash inode)
  1387. * @calc_xnms: calculated sum of lengths of all extended attribute names
  1388. */
  1389. struct fsck_inode {
  1390. struct rb_node rb;
  1391. ino_t inum;
  1392. umode_t mode;
  1393. unsigned int nlink;
  1394. unsigned int xattr_cnt;
  1395. int references;
  1396. int calc_cnt;
  1397. long long size;
  1398. unsigned int xattr_sz;
  1399. long long calc_sz;
  1400. long long calc_xcnt;
  1401. long long calc_xsz;
  1402. unsigned int xattr_nms;
  1403. long long calc_xnms;
  1404. };
  1405. /**
  1406. * struct fsck_data - private FS checking information.
  1407. * @inodes: RB-tree of all inodes (contains @struct fsck_inode objects)
  1408. */
  1409. struct fsck_data {
  1410. struct rb_root inodes;
  1411. };
  1412. /**
  1413. * add_inode - add inode information to RB-tree of inodes.
  1414. * @c: UBIFS file-system description object
  1415. * @fsckd: FS checking information
  1416. * @ino: raw UBIFS inode to add
  1417. *
  1418. * This is a helper function for 'check_leaf()' which adds information about
  1419. * inode @ino to the RB-tree of inodes. Returns inode information pointer in
  1420. * case of success and a negative error code in case of failure.
  1421. */
  1422. static struct fsck_inode *add_inode(struct ubifs_info *c,
  1423. struct fsck_data *fsckd,
  1424. struct ubifs_ino_node *ino)
  1425. {
  1426. struct rb_node **p, *parent = NULL;
  1427. struct fsck_inode *fscki;
  1428. ino_t inum = key_inum_flash(c, &ino->key);
  1429. p = &fsckd->inodes.rb_node;
  1430. while (*p) {
  1431. parent = *p;
  1432. fscki = rb_entry(parent, struct fsck_inode, rb);
  1433. if (inum < fscki->inum)
  1434. p = &(*p)->rb_left;
  1435. else if (inum > fscki->inum)
  1436. p = &(*p)->rb_right;
  1437. else
  1438. return fscki;
  1439. }
  1440. if (inum > c->highest_inum) {
  1441. ubifs_err("too high inode number, max. is %lu",
  1442. (unsigned long)c->highest_inum);
  1443. return ERR_PTR(-EINVAL);
  1444. }
  1445. fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
  1446. if (!fscki)
  1447. return ERR_PTR(-ENOMEM);
  1448. fscki->inum = inum;
  1449. fscki->nlink = le32_to_cpu(ino->nlink);
  1450. fscki->size = le64_to_cpu(ino->size);
  1451. fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
  1452. fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
  1453. fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
  1454. fscki->mode = le32_to_cpu(ino->mode);
  1455. if (S_ISDIR(fscki->mode)) {
  1456. fscki->calc_sz = UBIFS_INO_NODE_SZ;
  1457. fscki->calc_cnt = 2;
  1458. }
  1459. rb_link_node(&fscki->rb, parent, p);
  1460. rb_insert_color(&fscki->rb, &fsckd->inodes);
  1461. return fscki;
  1462. }
  1463. /**
  1464. * search_inode - search inode in the RB-tree of inodes.
  1465. * @fsckd: FS checking information
  1466. * @inum: inode number to search
  1467. *
  1468. * This is a helper function for 'check_leaf()' which searches inode @inum in
  1469. * the RB-tree of inodes and returns an inode information pointer or %NULL if
  1470. * the inode was not found.
  1471. */
  1472. static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
  1473. {
  1474. struct rb_node *p;
  1475. struct fsck_inode *fscki;
  1476. p = fsckd->inodes.rb_node;
  1477. while (p) {
  1478. fscki = rb_entry(p, struct fsck_inode, rb);
  1479. if (inum < fscki->inum)
  1480. p = p->rb_left;
  1481. else if (inum > fscki->inum)
  1482. p = p->rb_right;
  1483. else
  1484. return fscki;
  1485. }
  1486. return NULL;
  1487. }
  1488. /**
  1489. * read_add_inode - read inode node and add it to RB-tree of inodes.
  1490. * @c: UBIFS file-system description object
  1491. * @fsckd: FS checking information
  1492. * @inum: inode number to read
  1493. *
  1494. * This is a helper function for 'check_leaf()' which finds inode node @inum in
  1495. * the index, reads it, and adds it to the RB-tree of inodes. Returns inode
  1496. * information pointer in case of success and a negative error code in case of
  1497. * failure.
  1498. */
  1499. static struct fsck_inode *read_add_inode(struct ubifs_info *c,
  1500. struct fsck_data *fsckd, ino_t inum)
  1501. {
  1502. int n, err;
  1503. union ubifs_key key;
  1504. struct ubifs_znode *znode;
  1505. struct ubifs_zbranch *zbr;
  1506. struct ubifs_ino_node *ino;
  1507. struct fsck_inode *fscki;
  1508. fscki = search_inode(fsckd, inum);
  1509. if (fscki)
  1510. return fscki;
  1511. ino_key_init(c, &key, inum);
  1512. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1513. if (!err) {
  1514. ubifs_err("inode %lu not found in index", (unsigned long)inum);
  1515. return ERR_PTR(-ENOENT);
  1516. } else if (err < 0) {
  1517. ubifs_err("error %d while looking up inode %lu",
  1518. err, (unsigned long)inum);
  1519. return ERR_PTR(err);
  1520. }
  1521. zbr = &znode->zbranch[n];
  1522. if (zbr->len < UBIFS_INO_NODE_SZ) {
  1523. ubifs_err("bad node %lu node length %d",
  1524. (unsigned long)inum, zbr->len);
  1525. return ERR_PTR(-EINVAL);
  1526. }
  1527. ino = kmalloc(zbr->len, GFP_NOFS);
  1528. if (!ino)
  1529. return ERR_PTR(-ENOMEM);
  1530. err = ubifs_tnc_read_node(c, zbr, ino);
  1531. if (err) {
  1532. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1533. zbr->lnum, zbr->offs, err);
  1534. kfree(ino);
  1535. return ERR_PTR(err);
  1536. }
  1537. fscki = add_inode(c, fsckd, ino);
  1538. kfree(ino);
  1539. if (IS_ERR(fscki)) {
  1540. ubifs_err("error %ld while adding inode %lu node",
  1541. PTR_ERR(fscki), (unsigned long)inum);
  1542. return fscki;
  1543. }
  1544. return fscki;
  1545. }
  1546. /**
  1547. * check_leaf - check leaf node.
  1548. * @c: UBIFS file-system description object
  1549. * @zbr: zbranch of the leaf node to check
  1550. * @priv: FS checking information
  1551. *
  1552. * This is a helper function for 'dbg_check_filesystem()' which is called for
  1553. * every single leaf node while walking the indexing tree. It checks that the
  1554. * leaf node referred from the indexing tree exists, has correct CRC, and does
  1555. * some other basic validation. This function is also responsible for building
  1556. * an RB-tree of inodes - it adds all inodes into the RB-tree. It also
  1557. * calculates reference count, size, etc for each inode in order to later
  1558. * compare them to the information stored inside the inodes and detect possible
  1559. * inconsistencies. Returns zero in case of success and a negative error code
  1560. * in case of failure.
  1561. */
  1562. static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  1563. void *priv)
  1564. {
  1565. ino_t inum;
  1566. void *node;
  1567. struct ubifs_ch *ch;
  1568. int err, type = key_type(c, &zbr->key);
  1569. struct fsck_inode *fscki;
  1570. if (zbr->len < UBIFS_CH_SZ) {
  1571. ubifs_err("bad leaf length %d (LEB %d:%d)",
  1572. zbr->len, zbr->lnum, zbr->offs);
  1573. return -EINVAL;
  1574. }
  1575. node = kmalloc(zbr->len, GFP_NOFS);
  1576. if (!node)
  1577. return -ENOMEM;
  1578. err = ubifs_tnc_read_node(c, zbr, node);
  1579. if (err) {
  1580. ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
  1581. zbr->lnum, zbr->offs, err);
  1582. goto out_free;
  1583. }
  1584. /* If this is an inode node, add it to RB-tree of inodes */
  1585. if (type == UBIFS_INO_KEY) {
  1586. fscki = add_inode(c, priv, node);
  1587. if (IS_ERR(fscki)) {
  1588. err = PTR_ERR(fscki);
  1589. ubifs_err("error %d while adding inode node", err);
  1590. goto out_dump;
  1591. }
  1592. goto out;
  1593. }
  1594. if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
  1595. type != UBIFS_DATA_KEY) {
  1596. ubifs_err("unexpected node type %d at LEB %d:%d",
  1597. type, zbr->lnum, zbr->offs);
  1598. err = -EINVAL;
  1599. goto out_free;
  1600. }
  1601. ch = node;
  1602. if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
  1603. ubifs_err("too high sequence number, max. is %llu",
  1604. c->max_sqnum);
  1605. err = -EINVAL;
  1606. goto out_dump;
  1607. }
  1608. if (type == UBIFS_DATA_KEY) {
  1609. long long blk_offs;
  1610. struct ubifs_data_node *dn = node;
  1611. /*
  1612. * Search the inode node this data node belongs to and insert
  1613. * it to the RB-tree of inodes.
  1614. */
  1615. inum = key_inum_flash(c, &dn->key);
  1616. fscki = read_add_inode(c, priv, inum);
  1617. if (IS_ERR(fscki)) {
  1618. err = PTR_ERR(fscki);
  1619. ubifs_err("error %d while processing data node and "
  1620. "trying to find inode node %lu",
  1621. err, (unsigned long)inum);
  1622. goto out_dump;
  1623. }
  1624. /* Make sure the data node is within inode size */
  1625. blk_offs = key_block_flash(c, &dn->key);
  1626. blk_offs <<= UBIFS_BLOCK_SHIFT;
  1627. blk_offs += le32_to_cpu(dn->size);
  1628. if (blk_offs > fscki->size) {
  1629. ubifs_err("data node at LEB %d:%d is not within inode "
  1630. "size %lld", zbr->lnum, zbr->offs,
  1631. fscki->size);
  1632. err = -EINVAL;
  1633. goto out_dump;
  1634. }
  1635. } else {
  1636. int nlen;
  1637. struct ubifs_dent_node *dent = node;
  1638. struct fsck_inode *fscki1;
  1639. err = ubifs_validate_entry(c, dent);
  1640. if (err)
  1641. goto out_dump;
  1642. /*
  1643. * Search the inode node this entry refers to and the parent
  1644. * inode node and insert them to the RB-tree of inodes.
  1645. */
  1646. inum = le64_to_cpu(dent->inum);
  1647. fscki = read_add_inode(c, priv, inum);
  1648. if (IS_ERR(fscki)) {
  1649. err = PTR_ERR(fscki);
  1650. ubifs_err("error %d while processing entry node and "
  1651. "trying to find inode node %lu",
  1652. err, (unsigned long)inum);
  1653. goto out_dump;
  1654. }
  1655. /* Count how many direntries or xentries refers this inode */
  1656. fscki->references += 1;
  1657. inum = key_inum_flash(c, &dent->key);
  1658. fscki1 = read_add_inode(c, priv, inum);
  1659. if (IS_ERR(fscki1)) {
  1660. err = PTR_ERR(fscki);
  1661. ubifs_err("error %d while processing entry node and "
  1662. "trying to find parent inode node %lu",
  1663. err, (unsigned long)inum);
  1664. goto out_dump;
  1665. }
  1666. nlen = le16_to_cpu(dent->nlen);
  1667. if (type == UBIFS_XENT_KEY) {
  1668. fscki1->calc_xcnt += 1;
  1669. fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
  1670. fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
  1671. fscki1->calc_xnms += nlen;
  1672. } else {
  1673. fscki1->calc_sz += CALC_DENT_SIZE(nlen);
  1674. if (dent->type == UBIFS_ITYPE_DIR)
  1675. fscki1->calc_cnt += 1;
  1676. }
  1677. }
  1678. out:
  1679. kfree(node);
  1680. return 0;
  1681. out_dump:
  1682. ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
  1683. dbg_dump_node(c, node);
  1684. out_free:
  1685. kfree(node);
  1686. return err;
  1687. }
  1688. /**
  1689. * free_inodes - free RB-tree of inodes.
  1690. * @fsckd: FS checking information
  1691. */
  1692. static void free_inodes(struct fsck_data *fsckd)
  1693. {
  1694. struct rb_node *this = fsckd->inodes.rb_node;
  1695. struct fsck_inode *fscki;
  1696. while (this) {
  1697. if (this->rb_left)
  1698. this = this->rb_left;
  1699. else if (this->rb_right)
  1700. this = this->rb_right;
  1701. else {
  1702. fscki = rb_entry(this, struct fsck_inode, rb);
  1703. this = rb_parent(this);
  1704. if (this) {
  1705. if (this->rb_left == &fscki->rb)
  1706. this->rb_left = NULL;
  1707. else
  1708. this->rb_right = NULL;
  1709. }
  1710. kfree(fscki);
  1711. }
  1712. }
  1713. }
  1714. /**
  1715. * check_inodes - checks all inodes.
  1716. * @c: UBIFS file-system description object
  1717. * @fsckd: FS checking information
  1718. *
  1719. * This is a helper function for 'dbg_check_filesystem()' which walks the
  1720. * RB-tree of inodes after the index scan has been finished, and checks that
  1721. * inode nlink, size, etc are correct. Returns zero if inodes are fine,
  1722. * %-EINVAL if not, and a negative error code in case of failure.
  1723. */
  1724. static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
  1725. {
  1726. int n, err;
  1727. union ubifs_key key;
  1728. struct ubifs_znode *znode;
  1729. struct ubifs_zbranch *zbr;
  1730. struct ubifs_ino_node *ino;
  1731. struct fsck_inode *fscki;
  1732. struct rb_node *this = rb_first(&fsckd->inodes);
  1733. while (this) {
  1734. fscki = rb_entry(this, struct fsck_inode, rb);
  1735. this = rb_next(this);
  1736. if (S_ISDIR(fscki->mode)) {
  1737. /*
  1738. * Directories have to have exactly one reference (they
  1739. * cannot have hardlinks), although root inode is an
  1740. * exception.
  1741. */
  1742. if (fscki->inum != UBIFS_ROOT_INO &&
  1743. fscki->references != 1) {
  1744. ubifs_err("directory inode %lu has %d "
  1745. "direntries which refer it, but "
  1746. "should be 1",
  1747. (unsigned long)fscki->inum,
  1748. fscki->references);
  1749. goto out_dump;
  1750. }
  1751. if (fscki->inum == UBIFS_ROOT_INO &&
  1752. fscki->references != 0) {
  1753. ubifs_err("root inode %lu has non-zero (%d) "
  1754. "direntries which refer it",
  1755. (unsigned long)fscki->inum,
  1756. fscki->references);
  1757. goto out_dump;
  1758. }
  1759. if (fscki->calc_sz != fscki->size) {
  1760. ubifs_err("directory inode %lu size is %lld, "
  1761. "but calculated size is %lld",
  1762. (unsigned long)fscki->inum,
  1763. fscki->size, fscki->calc_sz);
  1764. goto out_dump;
  1765. }
  1766. if (fscki->calc_cnt != fscki->nlink) {
  1767. ubifs_err("directory inode %lu nlink is %d, "
  1768. "but calculated nlink is %d",
  1769. (unsigned long)fscki->inum,
  1770. fscki->nlink, fscki->calc_cnt);
  1771. goto out_dump;
  1772. }
  1773. } else {
  1774. if (fscki->references != fscki->nlink) {
  1775. ubifs_err("inode %lu nlink is %d, but "
  1776. "calculated nlink is %d",
  1777. (unsigned long)fscki->inum,
  1778. fscki->nlink, fscki->references);
  1779. goto out_dump;
  1780. }
  1781. }
  1782. if (fscki->xattr_sz != fscki->calc_xsz) {
  1783. ubifs_err("inode %lu has xattr size %u, but "
  1784. "calculated size is %lld",
  1785. (unsigned long)fscki->inum, fscki->xattr_sz,
  1786. fscki->calc_xsz);
  1787. goto out_dump;
  1788. }
  1789. if (fscki->xattr_cnt != fscki->calc_xcnt) {
  1790. ubifs_err("inode %lu has %u xattrs, but "
  1791. "calculated count is %lld",
  1792. (unsigned long)fscki->inum,
  1793. fscki->xattr_cnt, fscki->calc_xcnt);
  1794. goto out_dump;
  1795. }
  1796. if (fscki->xattr_nms != fscki->calc_xnms) {
  1797. ubifs_err("inode %lu has xattr names' size %u, but "
  1798. "calculated names' size is %lld",
  1799. (unsigned long)fscki->inum, fscki->xattr_nms,
  1800. fscki->calc_xnms);
  1801. goto out_dump;
  1802. }
  1803. }
  1804. return 0;
  1805. out_dump:
  1806. /* Read the bad inode and dump it */
  1807. ino_key_init(c, &key, fscki->inum);
  1808. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1809. if (!err) {
  1810. ubifs_err("inode %lu not found in index",
  1811. (unsigned long)fscki->inum);
  1812. return -ENOENT;
  1813. } else if (err < 0) {
  1814. ubifs_err("error %d while looking up inode %lu",
  1815. err, (unsigned long)fscki->inum);
  1816. return err;
  1817. }
  1818. zbr = &znode->zbranch[n];
  1819. ino = kmalloc(zbr->len, GFP_NOFS);
  1820. if (!ino)
  1821. return -ENOMEM;
  1822. err = ubifs_tnc_read_node(c, zbr, ino);
  1823. if (err) {
  1824. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1825. zbr->lnum, zbr->offs, err);
  1826. kfree(ino);
  1827. return err;
  1828. }
  1829. ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
  1830. (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
  1831. dbg_dump_node(c, ino);
  1832. kfree(ino);
  1833. return -EINVAL;
  1834. }
  1835. /**
  1836. * dbg_check_filesystem - check the file-system.
  1837. * @c: UBIFS file-system description object
  1838. *
  1839. * This function checks the file system, namely:
  1840. * o makes sure that all leaf nodes exist and their CRCs are correct;
  1841. * o makes sure inode nlink, size, xattr size/count are correct (for all
  1842. * inodes).
  1843. *
  1844. * The function reads whole indexing tree and all nodes, so it is pretty
  1845. * heavy-weight. Returns zero if the file-system is consistent, %-EINVAL if
  1846. * not, and a negative error code in case of failure.
  1847. */
  1848. int dbg_check_filesystem(struct ubifs_info *c)
  1849. {
  1850. int err;
  1851. struct fsck_data fsckd;
  1852. if (!(ubifs_chk_flags & UBIFS_CHK_FS))
  1853. return 0;
  1854. fsckd.inodes = RB_ROOT;
  1855. err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
  1856. if (err)
  1857. goto out_free;
  1858. err = check_inodes(c, &fsckd);
  1859. if (err)
  1860. goto out_free;
  1861. free_inodes(&fsckd);
  1862. return 0;
  1863. out_free:
  1864. ubifs_err("file-system check failed with error %d", err);
  1865. dump_stack();
  1866. free_inodes(&fsckd);
  1867. return err;
  1868. }
  1869. static int invocation_cnt;
  1870. int dbg_force_in_the_gaps(void)
  1871. {
  1872. if (!dbg_force_in_the_gaps_enabled)
  1873. return 0;
  1874. /* Force in-the-gaps every 8th commit */
  1875. return !((invocation_cnt++) & 0x7);
  1876. }
  1877. /* Failure mode for recovery testing */
  1878. #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
  1879. struct failure_mode_info {
  1880. struct list_head list;
  1881. struct ubifs_info *c;
  1882. };
  1883. static LIST_HEAD(fmi_list);
  1884. static DEFINE_SPINLOCK(fmi_lock);
  1885. static unsigned int next;
  1886. static int simple_rand(void)
  1887. {
  1888. if (next == 0)
  1889. next = current->pid;
  1890. next = next * 1103515245 + 12345;
  1891. return (next >> 16) & 32767;
  1892. }
  1893. void dbg_failure_mode_registration(struct ubifs_info *c)
  1894. {
  1895. struct failure_mode_info *fmi;
  1896. fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
  1897. if (!fmi) {
  1898. dbg_err("Failed to register failure mode - no memory");
  1899. return;
  1900. }
  1901. fmi->c = c;
  1902. spin_lock(&fmi_lock);
  1903. list_add_tail(&fmi->list, &fmi_list);
  1904. spin_unlock(&fmi_lock);
  1905. }
  1906. void dbg_failure_mode_deregistration(struct ubifs_info *c)
  1907. {
  1908. struct failure_mode_info *fmi, *tmp;
  1909. spin_lock(&fmi_lock);
  1910. list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
  1911. if (fmi->c == c) {
  1912. list_del(&fmi->list);
  1913. kfree(fmi);
  1914. }
  1915. spin_unlock(&fmi_lock);
  1916. }
  1917. static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
  1918. {
  1919. struct failure_mode_info *fmi;
  1920. spin_lock(&fmi_lock);
  1921. list_for_each_entry(fmi, &fmi_list, list)
  1922. if (fmi->c->ubi == desc) {
  1923. struct ubifs_info *c = fmi->c;
  1924. spin_unlock(&fmi_lock);
  1925. return c;
  1926. }
  1927. spin_unlock(&fmi_lock);
  1928. return NULL;
  1929. }
  1930. static int in_failure_mode(struct ubi_volume_desc *desc)
  1931. {
  1932. struct ubifs_info *c = dbg_find_info(desc);
  1933. if (c && dbg_failure_mode)
  1934. return c->failure_mode;
  1935. return 0;
  1936. }
  1937. static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
  1938. {
  1939. struct ubifs_info *c = dbg_find_info(desc);
  1940. if (!c || !dbg_failure_mode)
  1941. return 0;
  1942. if (c->failure_mode)
  1943. return 1;
  1944. if (!c->fail_cnt) {
  1945. /* First call - decide delay to failure */
  1946. if (chance(1, 2)) {
  1947. unsigned int delay = 1 << (simple_rand() >> 11);
  1948. if (chance(1, 2)) {
  1949. c->fail_delay = 1;
  1950. c->fail_timeout = jiffies +
  1951. msecs_to_jiffies(delay);
  1952. dbg_rcvry("failing after %ums", delay);
  1953. } else {
  1954. c->fail_delay = 2;
  1955. c->fail_cnt_max = delay;
  1956. dbg_rcvry("failing after %u calls", delay);
  1957. }
  1958. }
  1959. c->fail_cnt += 1;
  1960. }
  1961. /* Determine if failure delay has expired */
  1962. if (c->fail_delay == 1) {
  1963. if (time_before(jiffies, c->fail_timeout))
  1964. return 0;
  1965. } else if (c->fail_delay == 2)
  1966. if (c->fail_cnt++ < c->fail_cnt_max)
  1967. return 0;
  1968. if (lnum == UBIFS_SB_LNUM) {
  1969. if (write) {
  1970. if (chance(1, 2))
  1971. return 0;
  1972. } else if (chance(19, 20))
  1973. return 0;
  1974. dbg_rcvry("failing in super block LEB %d", lnum);
  1975. } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
  1976. if (chance(19, 20))
  1977. return 0;
  1978. dbg_rcvry("failing in master LEB %d", lnum);
  1979. } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
  1980. if (write) {
  1981. if (chance(99, 100))
  1982. return 0;
  1983. } else if (chance(399, 400))
  1984. return 0;
  1985. dbg_rcvry("failing in log LEB %d", lnum);
  1986. } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
  1987. if (write) {
  1988. if (chance(7, 8))
  1989. return 0;
  1990. } else if (chance(19, 20))
  1991. return 0;
  1992. dbg_rcvry("failing in LPT LEB %d", lnum);
  1993. } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
  1994. if (write) {
  1995. if (chance(1, 2))
  1996. return 0;
  1997. } else if (chance(9, 10))
  1998. return 0;
  1999. dbg_rcvry("failing in orphan LEB %d", lnum);
  2000. } else if (lnum == c->ihead_lnum) {
  2001. if (chance(99, 100))
  2002. return 0;
  2003. dbg_rcvry("failing in index head LEB %d", lnum);
  2004. } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
  2005. if (chance(9, 10))
  2006. return 0;
  2007. dbg_rcvry("failing in GC head LEB %d", lnum);
  2008. } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
  2009. !ubifs_search_bud(c, lnum)) {
  2010. if (chance(19, 20))
  2011. return 0;
  2012. dbg_rcvry("failing in non-bud LEB %d", lnum);
  2013. } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
  2014. c->cmt_state == COMMIT_RUNNING_REQUIRED) {
  2015. if (chance(999, 1000))
  2016. return 0;
  2017. dbg_rcvry("failing in bud LEB %d commit running", lnum);
  2018. } else {
  2019. if (chance(9999, 10000))
  2020. return 0;
  2021. dbg_rcvry("failing in bud LEB %d commit not running", lnum);
  2022. }
  2023. ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
  2024. c->failure_mode = 1;
  2025. dump_stack();
  2026. return 1;
  2027. }
  2028. static void cut_data(const void *buf, int len)
  2029. {
  2030. int flen, i;
  2031. unsigned char *p = (void *)buf;
  2032. flen = (len * (long long)simple_rand()) >> 15;
  2033. for (i = flen; i < len; i++)
  2034. p[i] = 0xff;
  2035. }
  2036. int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
  2037. int len, int check)
  2038. {
  2039. if (in_failure_mode(desc))
  2040. return -EIO;
  2041. return ubi_leb_read(desc, lnum, buf, offset, len, check);
  2042. }
  2043. int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2044. int offset, int len, int dtype)
  2045. {
  2046. int err, failing;
  2047. if (in_failure_mode(desc))
  2048. return -EIO;
  2049. failing = do_fail(desc, lnum, 1);
  2050. if (failing)
  2051. cut_data(buf, len);
  2052. err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
  2053. if (err)
  2054. return err;
  2055. if (failing)
  2056. return -EIO;
  2057. return 0;
  2058. }
  2059. int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2060. int len, int dtype)
  2061. {
  2062. int err;
  2063. if (do_fail(desc, lnum, 1))
  2064. return -EIO;
  2065. err = ubi_leb_change(desc, lnum, buf, len, dtype);
  2066. if (err)
  2067. return err;
  2068. if (do_fail(desc, lnum, 1))
  2069. return -EIO;
  2070. return 0;
  2071. }
  2072. int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
  2073. {
  2074. int err;
  2075. if (do_fail(desc, lnum, 0))
  2076. return -EIO;
  2077. err = ubi_leb_erase(desc, lnum);
  2078. if (err)
  2079. return err;
  2080. if (do_fail(desc, lnum, 0))
  2081. return -EIO;
  2082. return 0;
  2083. }
  2084. int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
  2085. {
  2086. int err;
  2087. if (do_fail(desc, lnum, 0))
  2088. return -EIO;
  2089. err = ubi_leb_unmap(desc, lnum);
  2090. if (err)
  2091. return err;
  2092. if (do_fail(desc, lnum, 0))
  2093. return -EIO;
  2094. return 0;
  2095. }
  2096. int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
  2097. {
  2098. if (in_failure_mode(desc))
  2099. return -EIO;
  2100. return ubi_is_mapped(desc, lnum);
  2101. }
  2102. int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
  2103. {
  2104. int err;
  2105. if (do_fail(desc, lnum, 0))
  2106. return -EIO;
  2107. err = ubi_leb_map(desc, lnum, dtype);
  2108. if (err)
  2109. return err;
  2110. if (do_fail(desc, lnum, 0))
  2111. return -EIO;
  2112. return 0;
  2113. }
  2114. #endif /* CONFIG_UBIFS_FS_DEBUG */