debug.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Artem Bityutskiy (Битюцкий Артём)
  20. * Adrian Hunter
  21. */
  22. /*
  23. * This file implements most of the debugging stuff which is compiled in only
  24. * when it is enabled. But some debugging check functions are implemented in
  25. * corresponding subsystem, just because they are closely related and utilize
  26. * various local functions of those subsystems.
  27. */
  28. #define UBIFS_DBG_PRESERVE_UBI
  29. #include "ubifs.h"
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/math64.h>
  34. #include <linux/slab.h>
  35. #ifdef CONFIG_UBIFS_FS_DEBUG
  36. DEFINE_SPINLOCK(dbg_lock);
  37. static char dbg_key_buf0[128];
  38. static char dbg_key_buf1[128];
  39. unsigned int ubifs_msg_flags;
  40. unsigned int ubifs_chk_flags;
  41. unsigned int ubifs_tst_flags;
  42. module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
  43. module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
  44. module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
  45. MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
  46. MODULE_PARM_DESC(debug_chks, "Debug check flags");
  47. MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
  48. static const char *get_key_fmt(int fmt)
  49. {
  50. switch (fmt) {
  51. case UBIFS_SIMPLE_KEY_FMT:
  52. return "simple";
  53. default:
  54. return "unknown/invalid format";
  55. }
  56. }
  57. static const char *get_key_hash(int hash)
  58. {
  59. switch (hash) {
  60. case UBIFS_KEY_HASH_R5:
  61. return "R5";
  62. case UBIFS_KEY_HASH_TEST:
  63. return "test";
  64. default:
  65. return "unknown/invalid name hash";
  66. }
  67. }
  68. static const char *get_key_type(int type)
  69. {
  70. switch (type) {
  71. case UBIFS_INO_KEY:
  72. return "inode";
  73. case UBIFS_DENT_KEY:
  74. return "direntry";
  75. case UBIFS_XENT_KEY:
  76. return "xentry";
  77. case UBIFS_DATA_KEY:
  78. return "data";
  79. case UBIFS_TRUN_KEY:
  80. return "truncate";
  81. default:
  82. return "unknown/invalid key";
  83. }
  84. }
  85. static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
  86. char *buffer)
  87. {
  88. char *p = buffer;
  89. int type = key_type(c, key);
  90. if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
  91. switch (type) {
  92. case UBIFS_INO_KEY:
  93. sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
  94. get_key_type(type));
  95. break;
  96. case UBIFS_DENT_KEY:
  97. case UBIFS_XENT_KEY:
  98. sprintf(p, "(%lu, %s, %#08x)",
  99. (unsigned long)key_inum(c, key),
  100. get_key_type(type), key_hash(c, key));
  101. break;
  102. case UBIFS_DATA_KEY:
  103. sprintf(p, "(%lu, %s, %u)",
  104. (unsigned long)key_inum(c, key),
  105. get_key_type(type), key_block(c, key));
  106. break;
  107. case UBIFS_TRUN_KEY:
  108. sprintf(p, "(%lu, %s)",
  109. (unsigned long)key_inum(c, key),
  110. get_key_type(type));
  111. break;
  112. default:
  113. sprintf(p, "(bad key type: %#08x, %#08x)",
  114. key->u32[0], key->u32[1]);
  115. }
  116. } else
  117. sprintf(p, "bad key format %d", c->key_fmt);
  118. }
  119. const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
  120. {
  121. /* dbg_lock must be held */
  122. sprintf_key(c, key, dbg_key_buf0);
  123. return dbg_key_buf0;
  124. }
  125. const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
  126. {
  127. /* dbg_lock must be held */
  128. sprintf_key(c, key, dbg_key_buf1);
  129. return dbg_key_buf1;
  130. }
  131. const char *dbg_ntype(int type)
  132. {
  133. switch (type) {
  134. case UBIFS_PAD_NODE:
  135. return "padding node";
  136. case UBIFS_SB_NODE:
  137. return "superblock node";
  138. case UBIFS_MST_NODE:
  139. return "master node";
  140. case UBIFS_REF_NODE:
  141. return "reference node";
  142. case UBIFS_INO_NODE:
  143. return "inode node";
  144. case UBIFS_DENT_NODE:
  145. return "direntry node";
  146. case UBIFS_XENT_NODE:
  147. return "xentry node";
  148. case UBIFS_DATA_NODE:
  149. return "data node";
  150. case UBIFS_TRUN_NODE:
  151. return "truncate node";
  152. case UBIFS_IDX_NODE:
  153. return "indexing node";
  154. case UBIFS_CS_NODE:
  155. return "commit start node";
  156. case UBIFS_ORPH_NODE:
  157. return "orphan node";
  158. default:
  159. return "unknown node";
  160. }
  161. }
  162. static const char *dbg_gtype(int type)
  163. {
  164. switch (type) {
  165. case UBIFS_NO_NODE_GROUP:
  166. return "no node group";
  167. case UBIFS_IN_NODE_GROUP:
  168. return "in node group";
  169. case UBIFS_LAST_OF_NODE_GROUP:
  170. return "last of node group";
  171. default:
  172. return "unknown";
  173. }
  174. }
  175. const char *dbg_cstate(int cmt_state)
  176. {
  177. switch (cmt_state) {
  178. case COMMIT_RESTING:
  179. return "commit resting";
  180. case COMMIT_BACKGROUND:
  181. return "background commit requested";
  182. case COMMIT_REQUIRED:
  183. return "commit required";
  184. case COMMIT_RUNNING_BACKGROUND:
  185. return "BACKGROUND commit running";
  186. case COMMIT_RUNNING_REQUIRED:
  187. return "commit running and required";
  188. case COMMIT_BROKEN:
  189. return "broken commit";
  190. default:
  191. return "unknown commit state";
  192. }
  193. }
  194. const char *dbg_jhead(int jhead)
  195. {
  196. switch (jhead) {
  197. case GCHD:
  198. return "0 (GC)";
  199. case BASEHD:
  200. return "1 (base)";
  201. case DATAHD:
  202. return "2 (data)";
  203. default:
  204. return "unknown journal head";
  205. }
  206. }
  207. static void dump_ch(const struct ubifs_ch *ch)
  208. {
  209. printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
  210. printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc));
  211. printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type,
  212. dbg_ntype(ch->node_type));
  213. printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type,
  214. dbg_gtype(ch->group_type));
  215. printk(KERN_DEBUG "\tsqnum %llu\n",
  216. (unsigned long long)le64_to_cpu(ch->sqnum));
  217. printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
  218. }
  219. void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
  220. {
  221. const struct ubifs_inode *ui = ubifs_inode(inode);
  222. printk(KERN_DEBUG "Dump in-memory inode:");
  223. printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
  224. printk(KERN_DEBUG "\tsize %llu\n",
  225. (unsigned long long)i_size_read(inode));
  226. printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink);
  227. printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid);
  228. printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid);
  229. printk(KERN_DEBUG "\tatime %u.%u\n",
  230. (unsigned int)inode->i_atime.tv_sec,
  231. (unsigned int)inode->i_atime.tv_nsec);
  232. printk(KERN_DEBUG "\tmtime %u.%u\n",
  233. (unsigned int)inode->i_mtime.tv_sec,
  234. (unsigned int)inode->i_mtime.tv_nsec);
  235. printk(KERN_DEBUG "\tctime %u.%u\n",
  236. (unsigned int)inode->i_ctime.tv_sec,
  237. (unsigned int)inode->i_ctime.tv_nsec);
  238. printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum);
  239. printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size);
  240. printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt);
  241. printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names);
  242. printk(KERN_DEBUG "\tdirty %u\n", ui->dirty);
  243. printk(KERN_DEBUG "\txattr %u\n", ui->xattr);
  244. printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr);
  245. printk(KERN_DEBUG "\tsynced_i_size %llu\n",
  246. (unsigned long long)ui->synced_i_size);
  247. printk(KERN_DEBUG "\tui_size %llu\n",
  248. (unsigned long long)ui->ui_size);
  249. printk(KERN_DEBUG "\tflags %d\n", ui->flags);
  250. printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type);
  251. printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
  252. printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
  253. printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
  254. }
  255. void dbg_dump_node(const struct ubifs_info *c, const void *node)
  256. {
  257. int i, n;
  258. union ubifs_key key;
  259. const struct ubifs_ch *ch = node;
  260. if (dbg_failure_mode)
  261. return;
  262. /* If the magic is incorrect, just hexdump the first bytes */
  263. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
  264. printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
  265. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  266. (void *)node, UBIFS_CH_SZ, 1);
  267. return;
  268. }
  269. spin_lock(&dbg_lock);
  270. dump_ch(node);
  271. switch (ch->node_type) {
  272. case UBIFS_PAD_NODE:
  273. {
  274. const struct ubifs_pad_node *pad = node;
  275. printk(KERN_DEBUG "\tpad_len %u\n",
  276. le32_to_cpu(pad->pad_len));
  277. break;
  278. }
  279. case UBIFS_SB_NODE:
  280. {
  281. const struct ubifs_sb_node *sup = node;
  282. unsigned int sup_flags = le32_to_cpu(sup->flags);
  283. printk(KERN_DEBUG "\tkey_hash %d (%s)\n",
  284. (int)sup->key_hash, get_key_hash(sup->key_hash));
  285. printk(KERN_DEBUG "\tkey_fmt %d (%s)\n",
  286. (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
  287. printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
  288. printk(KERN_DEBUG "\t big_lpt %u\n",
  289. !!(sup_flags & UBIFS_FLG_BIGLPT));
  290. printk(KERN_DEBUG "\tmin_io_size %u\n",
  291. le32_to_cpu(sup->min_io_size));
  292. printk(KERN_DEBUG "\tleb_size %u\n",
  293. le32_to_cpu(sup->leb_size));
  294. printk(KERN_DEBUG "\tleb_cnt %u\n",
  295. le32_to_cpu(sup->leb_cnt));
  296. printk(KERN_DEBUG "\tmax_leb_cnt %u\n",
  297. le32_to_cpu(sup->max_leb_cnt));
  298. printk(KERN_DEBUG "\tmax_bud_bytes %llu\n",
  299. (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
  300. printk(KERN_DEBUG "\tlog_lebs %u\n",
  301. le32_to_cpu(sup->log_lebs));
  302. printk(KERN_DEBUG "\tlpt_lebs %u\n",
  303. le32_to_cpu(sup->lpt_lebs));
  304. printk(KERN_DEBUG "\torph_lebs %u\n",
  305. le32_to_cpu(sup->orph_lebs));
  306. printk(KERN_DEBUG "\tjhead_cnt %u\n",
  307. le32_to_cpu(sup->jhead_cnt));
  308. printk(KERN_DEBUG "\tfanout %u\n",
  309. le32_to_cpu(sup->fanout));
  310. printk(KERN_DEBUG "\tlsave_cnt %u\n",
  311. le32_to_cpu(sup->lsave_cnt));
  312. printk(KERN_DEBUG "\tdefault_compr %u\n",
  313. (int)le16_to_cpu(sup->default_compr));
  314. printk(KERN_DEBUG "\trp_size %llu\n",
  315. (unsigned long long)le64_to_cpu(sup->rp_size));
  316. printk(KERN_DEBUG "\trp_uid %u\n",
  317. le32_to_cpu(sup->rp_uid));
  318. printk(KERN_DEBUG "\trp_gid %u\n",
  319. le32_to_cpu(sup->rp_gid));
  320. printk(KERN_DEBUG "\tfmt_version %u\n",
  321. le32_to_cpu(sup->fmt_version));
  322. printk(KERN_DEBUG "\ttime_gran %u\n",
  323. le32_to_cpu(sup->time_gran));
  324. printk(KERN_DEBUG "\tUUID %pUB\n",
  325. sup->uuid);
  326. break;
  327. }
  328. case UBIFS_MST_NODE:
  329. {
  330. const struct ubifs_mst_node *mst = node;
  331. printk(KERN_DEBUG "\thighest_inum %llu\n",
  332. (unsigned long long)le64_to_cpu(mst->highest_inum));
  333. printk(KERN_DEBUG "\tcommit number %llu\n",
  334. (unsigned long long)le64_to_cpu(mst->cmt_no));
  335. printk(KERN_DEBUG "\tflags %#x\n",
  336. le32_to_cpu(mst->flags));
  337. printk(KERN_DEBUG "\tlog_lnum %u\n",
  338. le32_to_cpu(mst->log_lnum));
  339. printk(KERN_DEBUG "\troot_lnum %u\n",
  340. le32_to_cpu(mst->root_lnum));
  341. printk(KERN_DEBUG "\troot_offs %u\n",
  342. le32_to_cpu(mst->root_offs));
  343. printk(KERN_DEBUG "\troot_len %u\n",
  344. le32_to_cpu(mst->root_len));
  345. printk(KERN_DEBUG "\tgc_lnum %u\n",
  346. le32_to_cpu(mst->gc_lnum));
  347. printk(KERN_DEBUG "\tihead_lnum %u\n",
  348. le32_to_cpu(mst->ihead_lnum));
  349. printk(KERN_DEBUG "\tihead_offs %u\n",
  350. le32_to_cpu(mst->ihead_offs));
  351. printk(KERN_DEBUG "\tindex_size %llu\n",
  352. (unsigned long long)le64_to_cpu(mst->index_size));
  353. printk(KERN_DEBUG "\tlpt_lnum %u\n",
  354. le32_to_cpu(mst->lpt_lnum));
  355. printk(KERN_DEBUG "\tlpt_offs %u\n",
  356. le32_to_cpu(mst->lpt_offs));
  357. printk(KERN_DEBUG "\tnhead_lnum %u\n",
  358. le32_to_cpu(mst->nhead_lnum));
  359. printk(KERN_DEBUG "\tnhead_offs %u\n",
  360. le32_to_cpu(mst->nhead_offs));
  361. printk(KERN_DEBUG "\tltab_lnum %u\n",
  362. le32_to_cpu(mst->ltab_lnum));
  363. printk(KERN_DEBUG "\tltab_offs %u\n",
  364. le32_to_cpu(mst->ltab_offs));
  365. printk(KERN_DEBUG "\tlsave_lnum %u\n",
  366. le32_to_cpu(mst->lsave_lnum));
  367. printk(KERN_DEBUG "\tlsave_offs %u\n",
  368. le32_to_cpu(mst->lsave_offs));
  369. printk(KERN_DEBUG "\tlscan_lnum %u\n",
  370. le32_to_cpu(mst->lscan_lnum));
  371. printk(KERN_DEBUG "\tleb_cnt %u\n",
  372. le32_to_cpu(mst->leb_cnt));
  373. printk(KERN_DEBUG "\tempty_lebs %u\n",
  374. le32_to_cpu(mst->empty_lebs));
  375. printk(KERN_DEBUG "\tidx_lebs %u\n",
  376. le32_to_cpu(mst->idx_lebs));
  377. printk(KERN_DEBUG "\ttotal_free %llu\n",
  378. (unsigned long long)le64_to_cpu(mst->total_free));
  379. printk(KERN_DEBUG "\ttotal_dirty %llu\n",
  380. (unsigned long long)le64_to_cpu(mst->total_dirty));
  381. printk(KERN_DEBUG "\ttotal_used %llu\n",
  382. (unsigned long long)le64_to_cpu(mst->total_used));
  383. printk(KERN_DEBUG "\ttotal_dead %llu\n",
  384. (unsigned long long)le64_to_cpu(mst->total_dead));
  385. printk(KERN_DEBUG "\ttotal_dark %llu\n",
  386. (unsigned long long)le64_to_cpu(mst->total_dark));
  387. break;
  388. }
  389. case UBIFS_REF_NODE:
  390. {
  391. const struct ubifs_ref_node *ref = node;
  392. printk(KERN_DEBUG "\tlnum %u\n",
  393. le32_to_cpu(ref->lnum));
  394. printk(KERN_DEBUG "\toffs %u\n",
  395. le32_to_cpu(ref->offs));
  396. printk(KERN_DEBUG "\tjhead %u\n",
  397. le32_to_cpu(ref->jhead));
  398. break;
  399. }
  400. case UBIFS_INO_NODE:
  401. {
  402. const struct ubifs_ino_node *ino = node;
  403. key_read(c, &ino->key, &key);
  404. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  405. printk(KERN_DEBUG "\tcreat_sqnum %llu\n",
  406. (unsigned long long)le64_to_cpu(ino->creat_sqnum));
  407. printk(KERN_DEBUG "\tsize %llu\n",
  408. (unsigned long long)le64_to_cpu(ino->size));
  409. printk(KERN_DEBUG "\tnlink %u\n",
  410. le32_to_cpu(ino->nlink));
  411. printk(KERN_DEBUG "\tatime %lld.%u\n",
  412. (long long)le64_to_cpu(ino->atime_sec),
  413. le32_to_cpu(ino->atime_nsec));
  414. printk(KERN_DEBUG "\tmtime %lld.%u\n",
  415. (long long)le64_to_cpu(ino->mtime_sec),
  416. le32_to_cpu(ino->mtime_nsec));
  417. printk(KERN_DEBUG "\tctime %lld.%u\n",
  418. (long long)le64_to_cpu(ino->ctime_sec),
  419. le32_to_cpu(ino->ctime_nsec));
  420. printk(KERN_DEBUG "\tuid %u\n",
  421. le32_to_cpu(ino->uid));
  422. printk(KERN_DEBUG "\tgid %u\n",
  423. le32_to_cpu(ino->gid));
  424. printk(KERN_DEBUG "\tmode %u\n",
  425. le32_to_cpu(ino->mode));
  426. printk(KERN_DEBUG "\tflags %#x\n",
  427. le32_to_cpu(ino->flags));
  428. printk(KERN_DEBUG "\txattr_cnt %u\n",
  429. le32_to_cpu(ino->xattr_cnt));
  430. printk(KERN_DEBUG "\txattr_size %u\n",
  431. le32_to_cpu(ino->xattr_size));
  432. printk(KERN_DEBUG "\txattr_names %u\n",
  433. le32_to_cpu(ino->xattr_names));
  434. printk(KERN_DEBUG "\tcompr_type %#x\n",
  435. (int)le16_to_cpu(ino->compr_type));
  436. printk(KERN_DEBUG "\tdata len %u\n",
  437. le32_to_cpu(ino->data_len));
  438. break;
  439. }
  440. case UBIFS_DENT_NODE:
  441. case UBIFS_XENT_NODE:
  442. {
  443. const struct ubifs_dent_node *dent = node;
  444. int nlen = le16_to_cpu(dent->nlen);
  445. key_read(c, &dent->key, &key);
  446. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  447. printk(KERN_DEBUG "\tinum %llu\n",
  448. (unsigned long long)le64_to_cpu(dent->inum));
  449. printk(KERN_DEBUG "\ttype %d\n", (int)dent->type);
  450. printk(KERN_DEBUG "\tnlen %d\n", nlen);
  451. printk(KERN_DEBUG "\tname ");
  452. if (nlen > UBIFS_MAX_NLEN)
  453. printk(KERN_DEBUG "(bad name length, not printing, "
  454. "bad or corrupted node)");
  455. else {
  456. for (i = 0; i < nlen && dent->name[i]; i++)
  457. printk(KERN_CONT "%c", dent->name[i]);
  458. }
  459. printk(KERN_CONT "\n");
  460. break;
  461. }
  462. case UBIFS_DATA_NODE:
  463. {
  464. const struct ubifs_data_node *dn = node;
  465. int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
  466. key_read(c, &dn->key, &key);
  467. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  468. printk(KERN_DEBUG "\tsize %u\n",
  469. le32_to_cpu(dn->size));
  470. printk(KERN_DEBUG "\tcompr_typ %d\n",
  471. (int)le16_to_cpu(dn->compr_type));
  472. printk(KERN_DEBUG "\tdata size %d\n",
  473. dlen);
  474. printk(KERN_DEBUG "\tdata:\n");
  475. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
  476. (void *)&dn->data, dlen, 0);
  477. break;
  478. }
  479. case UBIFS_TRUN_NODE:
  480. {
  481. const struct ubifs_trun_node *trun = node;
  482. printk(KERN_DEBUG "\tinum %u\n",
  483. le32_to_cpu(trun->inum));
  484. printk(KERN_DEBUG "\told_size %llu\n",
  485. (unsigned long long)le64_to_cpu(trun->old_size));
  486. printk(KERN_DEBUG "\tnew_size %llu\n",
  487. (unsigned long long)le64_to_cpu(trun->new_size));
  488. break;
  489. }
  490. case UBIFS_IDX_NODE:
  491. {
  492. const struct ubifs_idx_node *idx = node;
  493. n = le16_to_cpu(idx->child_cnt);
  494. printk(KERN_DEBUG "\tchild_cnt %d\n", n);
  495. printk(KERN_DEBUG "\tlevel %d\n",
  496. (int)le16_to_cpu(idx->level));
  497. printk(KERN_DEBUG "\tBranches:\n");
  498. for (i = 0; i < n && i < c->fanout - 1; i++) {
  499. const struct ubifs_branch *br;
  500. br = ubifs_idx_branch(c, idx, i);
  501. key_read(c, &br->key, &key);
  502. printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
  503. i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
  504. le32_to_cpu(br->len), DBGKEY(&key));
  505. }
  506. break;
  507. }
  508. case UBIFS_CS_NODE:
  509. break;
  510. case UBIFS_ORPH_NODE:
  511. {
  512. const struct ubifs_orph_node *orph = node;
  513. printk(KERN_DEBUG "\tcommit number %llu\n",
  514. (unsigned long long)
  515. le64_to_cpu(orph->cmt_no) & LLONG_MAX);
  516. printk(KERN_DEBUG "\tlast node flag %llu\n",
  517. (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
  518. n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
  519. printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
  520. for (i = 0; i < n; i++)
  521. printk(KERN_DEBUG "\t ino %llu\n",
  522. (unsigned long long)le64_to_cpu(orph->inos[i]));
  523. break;
  524. }
  525. default:
  526. printk(KERN_DEBUG "node type %d was not recognized\n",
  527. (int)ch->node_type);
  528. }
  529. spin_unlock(&dbg_lock);
  530. }
  531. void dbg_dump_budget_req(const struct ubifs_budget_req *req)
  532. {
  533. spin_lock(&dbg_lock);
  534. printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
  535. req->new_ino, req->dirtied_ino);
  536. printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n",
  537. req->new_ino_d, req->dirtied_ino_d);
  538. printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n",
  539. req->new_page, req->dirtied_page);
  540. printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n",
  541. req->new_dent, req->mod_dent);
  542. printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth);
  543. printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n",
  544. req->data_growth, req->dd_growth);
  545. spin_unlock(&dbg_lock);
  546. }
  547. void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
  548. {
  549. spin_lock(&dbg_lock);
  550. printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
  551. "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
  552. printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
  553. "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
  554. lst->total_dirty);
  555. printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
  556. "total_dead %lld\n", lst->total_used, lst->total_dark,
  557. lst->total_dead);
  558. spin_unlock(&dbg_lock);
  559. }
  560. void dbg_dump_budg(struct ubifs_info *c)
  561. {
  562. int i;
  563. struct rb_node *rb;
  564. struct ubifs_bud *bud;
  565. struct ubifs_gced_idx_leb *idx_gc;
  566. long long available, outstanding, free;
  567. ubifs_assert(spin_is_locked(&c->space_lock));
  568. spin_lock(&dbg_lock);
  569. printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, "
  570. "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid,
  571. c->budg_data_growth, c->budg_dd_growth, c->budg_idx_growth);
  572. printk(KERN_DEBUG "\tdata budget sum %lld, total budget sum %lld, "
  573. "freeable_cnt %d\n", c->budg_data_growth + c->budg_dd_growth,
  574. c->budg_data_growth + c->budg_dd_growth + c->budg_idx_growth,
  575. c->freeable_cnt);
  576. printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %lld, "
  577. "calc_idx_sz %lld, idx_gc_cnt %d\n", c->min_idx_lebs,
  578. c->old_idx_sz, c->calc_idx_sz, c->idx_gc_cnt);
  579. printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
  580. "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
  581. atomic_long_read(&c->dirty_zn_cnt),
  582. atomic_long_read(&c->clean_zn_cnt));
  583. printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
  584. c->dark_wm, c->dead_wm, c->max_idx_node_sz);
  585. printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
  586. c->gc_lnum, c->ihead_lnum);
  587. /* If we are in R/O mode, journal heads do not exist */
  588. if (c->jheads)
  589. for (i = 0; i < c->jhead_cnt; i++)
  590. printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
  591. dbg_jhead(c->jheads[i].wbuf.jhead),
  592. c->jheads[i].wbuf.lnum);
  593. for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
  594. bud = rb_entry(rb, struct ubifs_bud, rb);
  595. printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
  596. }
  597. list_for_each_entry(bud, &c->old_buds, list)
  598. printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
  599. list_for_each_entry(idx_gc, &c->idx_gc, list)
  600. printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
  601. idx_gc->lnum, idx_gc->unmap);
  602. printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
  603. /* Print budgeting predictions */
  604. available = ubifs_calc_available(c, c->min_idx_lebs);
  605. outstanding = c->budg_data_growth + c->budg_dd_growth;
  606. free = ubifs_get_free_space_nolock(c);
  607. printk(KERN_DEBUG "Budgeting predictions:\n");
  608. printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
  609. available, outstanding, free);
  610. spin_unlock(&dbg_lock);
  611. }
  612. void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
  613. {
  614. int i, spc, dark = 0, dead = 0;
  615. struct rb_node *rb;
  616. struct ubifs_bud *bud;
  617. spc = lp->free + lp->dirty;
  618. if (spc < c->dead_wm)
  619. dead = spc;
  620. else
  621. dark = ubifs_calc_dark(c, spc);
  622. if (lp->flags & LPROPS_INDEX)
  623. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  624. "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
  625. lp->dirty, c->leb_size - spc, spc, lp->flags);
  626. else
  627. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  628. "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
  629. "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
  630. c->leb_size - spc, spc, dark, dead,
  631. (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
  632. if (lp->flags & LPROPS_TAKEN) {
  633. if (lp->flags & LPROPS_INDEX)
  634. printk(KERN_CONT "index, taken");
  635. else
  636. printk(KERN_CONT "taken");
  637. } else {
  638. const char *s;
  639. if (lp->flags & LPROPS_INDEX) {
  640. switch (lp->flags & LPROPS_CAT_MASK) {
  641. case LPROPS_DIRTY_IDX:
  642. s = "dirty index";
  643. break;
  644. case LPROPS_FRDI_IDX:
  645. s = "freeable index";
  646. break;
  647. default:
  648. s = "index";
  649. }
  650. } else {
  651. switch (lp->flags & LPROPS_CAT_MASK) {
  652. case LPROPS_UNCAT:
  653. s = "not categorized";
  654. break;
  655. case LPROPS_DIRTY:
  656. s = "dirty";
  657. break;
  658. case LPROPS_FREE:
  659. s = "free";
  660. break;
  661. case LPROPS_EMPTY:
  662. s = "empty";
  663. break;
  664. case LPROPS_FREEABLE:
  665. s = "freeable";
  666. break;
  667. default:
  668. s = NULL;
  669. break;
  670. }
  671. }
  672. printk(KERN_CONT "%s", s);
  673. }
  674. for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
  675. bud = rb_entry(rb, struct ubifs_bud, rb);
  676. if (bud->lnum == lp->lnum) {
  677. int head = 0;
  678. for (i = 0; i < c->jhead_cnt; i++) {
  679. if (lp->lnum == c->jheads[i].wbuf.lnum) {
  680. printk(KERN_CONT ", jhead %s",
  681. dbg_jhead(i));
  682. head = 1;
  683. }
  684. }
  685. if (!head)
  686. printk(KERN_CONT ", bud of jhead %s",
  687. dbg_jhead(bud->jhead));
  688. }
  689. }
  690. if (lp->lnum == c->gc_lnum)
  691. printk(KERN_CONT ", GC LEB");
  692. printk(KERN_CONT ")\n");
  693. }
  694. void dbg_dump_lprops(struct ubifs_info *c)
  695. {
  696. int lnum, err;
  697. struct ubifs_lprops lp;
  698. struct ubifs_lp_stats lst;
  699. printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n",
  700. current->pid);
  701. ubifs_get_lp_stats(c, &lst);
  702. dbg_dump_lstats(&lst);
  703. for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
  704. err = ubifs_read_one_lp(c, lnum, &lp);
  705. if (err)
  706. ubifs_err("cannot read lprops for LEB %d", lnum);
  707. dbg_dump_lprop(c, &lp);
  708. }
  709. printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n",
  710. current->pid);
  711. }
  712. void dbg_dump_lpt_info(struct ubifs_info *c)
  713. {
  714. int i;
  715. spin_lock(&dbg_lock);
  716. printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid);
  717. printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz);
  718. printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz);
  719. printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz);
  720. printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz);
  721. printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz);
  722. printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt);
  723. printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght);
  724. printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt);
  725. printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt);
  726. printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt);
  727. printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt);
  728. printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt);
  729. printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits);
  730. printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
  731. printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
  732. printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits);
  733. printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits);
  734. printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits);
  735. printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
  736. printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
  737. c->nhead_lnum, c->nhead_offs);
  738. printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n",
  739. c->ltab_lnum, c->ltab_offs);
  740. if (c->big_lpt)
  741. printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
  742. c->lsave_lnum, c->lsave_offs);
  743. for (i = 0; i < c->lpt_lebs; i++)
  744. printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
  745. "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
  746. c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
  747. spin_unlock(&dbg_lock);
  748. }
  749. void dbg_dump_leb(const struct ubifs_info *c, int lnum)
  750. {
  751. struct ubifs_scan_leb *sleb;
  752. struct ubifs_scan_node *snod;
  753. void *buf;
  754. if (dbg_failure_mode)
  755. return;
  756. printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
  757. current->pid, lnum);
  758. buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
  759. if (!buf) {
  760. ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
  761. return;
  762. }
  763. sleb = ubifs_scan(c, lnum, 0, buf, 0);
  764. if (IS_ERR(sleb)) {
  765. ubifs_err("scan error %d", (int)PTR_ERR(sleb));
  766. goto out;
  767. }
  768. printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
  769. sleb->nodes_cnt, sleb->endpt);
  770. list_for_each_entry(snod, &sleb->nodes, list) {
  771. cond_resched();
  772. printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
  773. snod->offs, snod->len);
  774. dbg_dump_node(c, snod->node);
  775. }
  776. printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
  777. current->pid, lnum);
  778. ubifs_scan_destroy(sleb);
  779. out:
  780. vfree(buf);
  781. return;
  782. }
  783. void dbg_dump_znode(const struct ubifs_info *c,
  784. const struct ubifs_znode *znode)
  785. {
  786. int n;
  787. const struct ubifs_zbranch *zbr;
  788. spin_lock(&dbg_lock);
  789. if (znode->parent)
  790. zbr = &znode->parent->zbranch[znode->iip];
  791. else
  792. zbr = &c->zroot;
  793. printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
  794. " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
  795. zbr->len, znode->parent, znode->iip, znode->level,
  796. znode->child_cnt, znode->flags);
  797. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  798. spin_unlock(&dbg_lock);
  799. return;
  800. }
  801. printk(KERN_DEBUG "zbranches:\n");
  802. for (n = 0; n < znode->child_cnt; n++) {
  803. zbr = &znode->zbranch[n];
  804. if (znode->level > 0)
  805. printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
  806. "%s\n", n, zbr->znode, zbr->lnum,
  807. zbr->offs, zbr->len,
  808. DBGKEY(&zbr->key));
  809. else
  810. printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
  811. "%s\n", n, zbr->znode, zbr->lnum,
  812. zbr->offs, zbr->len,
  813. DBGKEY(&zbr->key));
  814. }
  815. spin_unlock(&dbg_lock);
  816. }
  817. void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
  818. {
  819. int i;
  820. printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n",
  821. current->pid, cat, heap->cnt);
  822. for (i = 0; i < heap->cnt; i++) {
  823. struct ubifs_lprops *lprops = heap->arr[i];
  824. printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
  825. "flags %d\n", i, lprops->lnum, lprops->hpos,
  826. lprops->free, lprops->dirty, lprops->flags);
  827. }
  828. printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid);
  829. }
  830. void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
  831. struct ubifs_nnode *parent, int iip)
  832. {
  833. int i;
  834. printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid);
  835. printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
  836. (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
  837. printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
  838. pnode->flags, iip, pnode->level, pnode->num);
  839. for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
  840. struct ubifs_lprops *lp = &pnode->lprops[i];
  841. printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
  842. i, lp->free, lp->dirty, lp->flags, lp->lnum);
  843. }
  844. }
  845. void dbg_dump_tnc(struct ubifs_info *c)
  846. {
  847. struct ubifs_znode *znode;
  848. int level;
  849. printk(KERN_DEBUG "\n");
  850. printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid);
  851. znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
  852. level = znode->level;
  853. printk(KERN_DEBUG "== Level %d ==\n", level);
  854. while (znode) {
  855. if (level != znode->level) {
  856. level = znode->level;
  857. printk(KERN_DEBUG "== Level %d ==\n", level);
  858. }
  859. dbg_dump_znode(c, znode);
  860. znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
  861. }
  862. printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid);
  863. }
  864. static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
  865. void *priv)
  866. {
  867. dbg_dump_znode(c, znode);
  868. return 0;
  869. }
  870. /**
  871. * dbg_dump_index - dump the on-flash index.
  872. * @c: UBIFS file-system description object
  873. *
  874. * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
  875. * which dumps only in-memory znodes and does not read znodes which from flash.
  876. */
  877. void dbg_dump_index(struct ubifs_info *c)
  878. {
  879. dbg_walk_index(c, NULL, dump_znode, NULL);
  880. }
  881. /**
  882. * dbg_save_space_info - save information about flash space.
  883. * @c: UBIFS file-system description object
  884. *
  885. * This function saves information about UBIFS free space, dirty space, etc, in
  886. * order to check it later.
  887. */
  888. void dbg_save_space_info(struct ubifs_info *c)
  889. {
  890. struct ubifs_debug_info *d = c->dbg;
  891. int freeable_cnt;
  892. spin_lock(&c->space_lock);
  893. memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
  894. /*
  895. * We use a dirty hack here and zero out @c->freeable_cnt, because it
  896. * affects the free space calculations, and UBIFS might not know about
  897. * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
  898. * only when we read their lprops, and we do this only lazily, upon the
  899. * need. So at any given point of time @c->freeable_cnt might be not
  900. * exactly accurate.
  901. *
  902. * Just one example about the issue we hit when we did not zero
  903. * @c->freeable_cnt.
  904. * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
  905. * amount of free space in @d->saved_free
  906. * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
  907. * information from flash, where we cache LEBs from various
  908. * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
  909. * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
  910. * -> 'ubifs_get_pnode()' -> 'update_cats()'
  911. * -> 'ubifs_add_to_cat()').
  912. * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
  913. * becomes %1.
  914. * 4. We calculate the amount of free space when the re-mount is
  915. * finished in 'dbg_check_space_info()' and it does not match
  916. * @d->saved_free.
  917. */
  918. freeable_cnt = c->freeable_cnt;
  919. c->freeable_cnt = 0;
  920. d->saved_free = ubifs_get_free_space_nolock(c);
  921. c->freeable_cnt = freeable_cnt;
  922. spin_unlock(&c->space_lock);
  923. }
  924. /**
  925. * dbg_check_space_info - check flash space information.
  926. * @c: UBIFS file-system description object
  927. *
  928. * This function compares current flash space information with the information
  929. * which was saved when the 'dbg_save_space_info()' function was called.
  930. * Returns zero if the information has not changed, and %-EINVAL it it has
  931. * changed.
  932. */
  933. int dbg_check_space_info(struct ubifs_info *c)
  934. {
  935. struct ubifs_debug_info *d = c->dbg;
  936. struct ubifs_lp_stats lst;
  937. long long free;
  938. int freeable_cnt;
  939. spin_lock(&c->space_lock);
  940. freeable_cnt = c->freeable_cnt;
  941. c->freeable_cnt = 0;
  942. free = ubifs_get_free_space_nolock(c);
  943. c->freeable_cnt = freeable_cnt;
  944. spin_unlock(&c->space_lock);
  945. if (free != d->saved_free) {
  946. ubifs_err("free space changed from %lld to %lld",
  947. d->saved_free, free);
  948. goto out;
  949. }
  950. return 0;
  951. out:
  952. ubifs_msg("saved lprops statistics dump");
  953. dbg_dump_lstats(&d->saved_lst);
  954. ubifs_get_lp_stats(c, &lst);
  955. ubifs_msg("current lprops statistics dump");
  956. dbg_dump_lstats(&lst);
  957. spin_lock(&c->space_lock);
  958. dbg_dump_budg(c);
  959. spin_unlock(&c->space_lock);
  960. dump_stack();
  961. return -EINVAL;
  962. }
  963. /**
  964. * dbg_check_synced_i_size - check synchronized inode size.
  965. * @inode: inode to check
  966. *
  967. * If inode is clean, synchronized inode size has to be equivalent to current
  968. * inode size. This function has to be called only for locked inodes (@i_mutex
  969. * has to be locked). Returns %0 if synchronized inode size if correct, and
  970. * %-EINVAL if not.
  971. */
  972. int dbg_check_synced_i_size(struct inode *inode)
  973. {
  974. int err = 0;
  975. struct ubifs_inode *ui = ubifs_inode(inode);
  976. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  977. return 0;
  978. if (!S_ISREG(inode->i_mode))
  979. return 0;
  980. mutex_lock(&ui->ui_mutex);
  981. spin_lock(&ui->ui_lock);
  982. if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
  983. ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
  984. "is clean", ui->ui_size, ui->synced_i_size);
  985. ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
  986. inode->i_mode, i_size_read(inode));
  987. dbg_dump_stack();
  988. err = -EINVAL;
  989. }
  990. spin_unlock(&ui->ui_lock);
  991. mutex_unlock(&ui->ui_mutex);
  992. return err;
  993. }
  994. /*
  995. * dbg_check_dir - check directory inode size and link count.
  996. * @c: UBIFS file-system description object
  997. * @dir: the directory to calculate size for
  998. * @size: the result is returned here
  999. *
  1000. * This function makes sure that directory size and link count are correct.
  1001. * Returns zero in case of success and a negative error code in case of
  1002. * failure.
  1003. *
  1004. * Note, it is good idea to make sure the @dir->i_mutex is locked before
  1005. * calling this function.
  1006. */
  1007. int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
  1008. {
  1009. unsigned int nlink = 2;
  1010. union ubifs_key key;
  1011. struct ubifs_dent_node *dent, *pdent = NULL;
  1012. struct qstr nm = { .name = NULL };
  1013. loff_t size = UBIFS_INO_NODE_SZ;
  1014. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  1015. return 0;
  1016. if (!S_ISDIR(dir->i_mode))
  1017. return 0;
  1018. lowest_dent_key(c, &key, dir->i_ino);
  1019. while (1) {
  1020. int err;
  1021. dent = ubifs_tnc_next_ent(c, &key, &nm);
  1022. if (IS_ERR(dent)) {
  1023. err = PTR_ERR(dent);
  1024. if (err == -ENOENT)
  1025. break;
  1026. return err;
  1027. }
  1028. nm.name = dent->name;
  1029. nm.len = le16_to_cpu(dent->nlen);
  1030. size += CALC_DENT_SIZE(nm.len);
  1031. if (dent->type == UBIFS_ITYPE_DIR)
  1032. nlink += 1;
  1033. kfree(pdent);
  1034. pdent = dent;
  1035. key_read(c, &dent->key, &key);
  1036. }
  1037. kfree(pdent);
  1038. if (i_size_read(dir) != size) {
  1039. ubifs_err("directory inode %lu has size %llu, "
  1040. "but calculated size is %llu", dir->i_ino,
  1041. (unsigned long long)i_size_read(dir),
  1042. (unsigned long long)size);
  1043. dump_stack();
  1044. return -EINVAL;
  1045. }
  1046. if (dir->i_nlink != nlink) {
  1047. ubifs_err("directory inode %lu has nlink %u, but calculated "
  1048. "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
  1049. dump_stack();
  1050. return -EINVAL;
  1051. }
  1052. return 0;
  1053. }
  1054. /**
  1055. * dbg_check_key_order - make sure that colliding keys are properly ordered.
  1056. * @c: UBIFS file-system description object
  1057. * @zbr1: first zbranch
  1058. * @zbr2: following zbranch
  1059. *
  1060. * In UBIFS indexing B-tree colliding keys has to be sorted in binary order of
  1061. * names of the direntries/xentries which are referred by the keys. This
  1062. * function reads direntries/xentries referred by @zbr1 and @zbr2 and makes
  1063. * sure the name of direntry/xentry referred by @zbr1 is less than
  1064. * direntry/xentry referred by @zbr2. Returns zero if this is true, %1 if not,
  1065. * and a negative error code in case of failure.
  1066. */
  1067. static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
  1068. struct ubifs_zbranch *zbr2)
  1069. {
  1070. int err, nlen1, nlen2, cmp;
  1071. struct ubifs_dent_node *dent1, *dent2;
  1072. union ubifs_key key;
  1073. ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
  1074. dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1075. if (!dent1)
  1076. return -ENOMEM;
  1077. dent2 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1078. if (!dent2) {
  1079. err = -ENOMEM;
  1080. goto out_free;
  1081. }
  1082. err = ubifs_tnc_read_node(c, zbr1, dent1);
  1083. if (err)
  1084. goto out_free;
  1085. err = ubifs_validate_entry(c, dent1);
  1086. if (err)
  1087. goto out_free;
  1088. err = ubifs_tnc_read_node(c, zbr2, dent2);
  1089. if (err)
  1090. goto out_free;
  1091. err = ubifs_validate_entry(c, dent2);
  1092. if (err)
  1093. goto out_free;
  1094. /* Make sure node keys are the same as in zbranch */
  1095. err = 1;
  1096. key_read(c, &dent1->key, &key);
  1097. if (keys_cmp(c, &zbr1->key, &key)) {
  1098. dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
  1099. zbr1->offs, DBGKEY(&key));
  1100. dbg_err("but it should have key %s according to tnc",
  1101. DBGKEY(&zbr1->key));
  1102. dbg_dump_node(c, dent1);
  1103. goto out_free;
  1104. }
  1105. key_read(c, &dent2->key, &key);
  1106. if (keys_cmp(c, &zbr2->key, &key)) {
  1107. dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
  1108. zbr1->offs, DBGKEY(&key));
  1109. dbg_err("but it should have key %s according to tnc",
  1110. DBGKEY(&zbr2->key));
  1111. dbg_dump_node(c, dent2);
  1112. goto out_free;
  1113. }
  1114. nlen1 = le16_to_cpu(dent1->nlen);
  1115. nlen2 = le16_to_cpu(dent2->nlen);
  1116. cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
  1117. if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
  1118. err = 0;
  1119. goto out_free;
  1120. }
  1121. if (cmp == 0 && nlen1 == nlen2)
  1122. dbg_err("2 xent/dent nodes with the same name");
  1123. else
  1124. dbg_err("bad order of colliding key %s",
  1125. DBGKEY(&key));
  1126. ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
  1127. dbg_dump_node(c, dent1);
  1128. ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
  1129. dbg_dump_node(c, dent2);
  1130. out_free:
  1131. kfree(dent2);
  1132. kfree(dent1);
  1133. return err;
  1134. }
  1135. /**
  1136. * dbg_check_znode - check if znode is all right.
  1137. * @c: UBIFS file-system description object
  1138. * @zbr: zbranch which points to this znode
  1139. *
  1140. * This function makes sure that znode referred to by @zbr is all right.
  1141. * Returns zero if it is, and %-EINVAL if it is not.
  1142. */
  1143. static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
  1144. {
  1145. struct ubifs_znode *znode = zbr->znode;
  1146. struct ubifs_znode *zp = znode->parent;
  1147. int n, err, cmp;
  1148. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  1149. err = 1;
  1150. goto out;
  1151. }
  1152. if (znode->level < 0) {
  1153. err = 2;
  1154. goto out;
  1155. }
  1156. if (znode->iip < 0 || znode->iip >= c->fanout) {
  1157. err = 3;
  1158. goto out;
  1159. }
  1160. if (zbr->len == 0)
  1161. /* Only dirty zbranch may have no on-flash nodes */
  1162. if (!ubifs_zn_dirty(znode)) {
  1163. err = 4;
  1164. goto out;
  1165. }
  1166. if (ubifs_zn_dirty(znode)) {
  1167. /*
  1168. * If znode is dirty, its parent has to be dirty as well. The
  1169. * order of the operation is important, so we have to have
  1170. * memory barriers.
  1171. */
  1172. smp_mb();
  1173. if (zp && !ubifs_zn_dirty(zp)) {
  1174. /*
  1175. * The dirty flag is atomic and is cleared outside the
  1176. * TNC mutex, so znode's dirty flag may now have
  1177. * been cleared. The child is always cleared before the
  1178. * parent, so we just need to check again.
  1179. */
  1180. smp_mb();
  1181. if (ubifs_zn_dirty(znode)) {
  1182. err = 5;
  1183. goto out;
  1184. }
  1185. }
  1186. }
  1187. if (zp) {
  1188. const union ubifs_key *min, *max;
  1189. if (znode->level != zp->level - 1) {
  1190. err = 6;
  1191. goto out;
  1192. }
  1193. /* Make sure the 'parent' pointer in our znode is correct */
  1194. err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
  1195. if (!err) {
  1196. /* This zbranch does not exist in the parent */
  1197. err = 7;
  1198. goto out;
  1199. }
  1200. if (znode->iip >= zp->child_cnt) {
  1201. err = 8;
  1202. goto out;
  1203. }
  1204. if (znode->iip != n) {
  1205. /* This may happen only in case of collisions */
  1206. if (keys_cmp(c, &zp->zbranch[n].key,
  1207. &zp->zbranch[znode->iip].key)) {
  1208. err = 9;
  1209. goto out;
  1210. }
  1211. n = znode->iip;
  1212. }
  1213. /*
  1214. * Make sure that the first key in our znode is greater than or
  1215. * equal to the key in the pointing zbranch.
  1216. */
  1217. min = &zbr->key;
  1218. cmp = keys_cmp(c, min, &znode->zbranch[0].key);
  1219. if (cmp == 1) {
  1220. err = 10;
  1221. goto out;
  1222. }
  1223. if (n + 1 < zp->child_cnt) {
  1224. max = &zp->zbranch[n + 1].key;
  1225. /*
  1226. * Make sure the last key in our znode is less or
  1227. * equivalent than the key in the zbranch which goes
  1228. * after our pointing zbranch.
  1229. */
  1230. cmp = keys_cmp(c, max,
  1231. &znode->zbranch[znode->child_cnt - 1].key);
  1232. if (cmp == -1) {
  1233. err = 11;
  1234. goto out;
  1235. }
  1236. }
  1237. } else {
  1238. /* This may only be root znode */
  1239. if (zbr != &c->zroot) {
  1240. err = 12;
  1241. goto out;
  1242. }
  1243. }
  1244. /*
  1245. * Make sure that next key is greater or equivalent then the previous
  1246. * one.
  1247. */
  1248. for (n = 1; n < znode->child_cnt; n++) {
  1249. cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
  1250. &znode->zbranch[n].key);
  1251. if (cmp > 0) {
  1252. err = 13;
  1253. goto out;
  1254. }
  1255. if (cmp == 0) {
  1256. /* This can only be keys with colliding hash */
  1257. if (!is_hash_key(c, &znode->zbranch[n].key)) {
  1258. err = 14;
  1259. goto out;
  1260. }
  1261. if (znode->level != 0 || c->replaying)
  1262. continue;
  1263. /*
  1264. * Colliding keys should follow binary order of
  1265. * corresponding xentry/dentry names.
  1266. */
  1267. err = dbg_check_key_order(c, &znode->zbranch[n - 1],
  1268. &znode->zbranch[n]);
  1269. if (err < 0)
  1270. return err;
  1271. if (err) {
  1272. err = 15;
  1273. goto out;
  1274. }
  1275. }
  1276. }
  1277. for (n = 0; n < znode->child_cnt; n++) {
  1278. if (!znode->zbranch[n].znode &&
  1279. (znode->zbranch[n].lnum == 0 ||
  1280. znode->zbranch[n].len == 0)) {
  1281. err = 16;
  1282. goto out;
  1283. }
  1284. if (znode->zbranch[n].lnum != 0 &&
  1285. znode->zbranch[n].len == 0) {
  1286. err = 17;
  1287. goto out;
  1288. }
  1289. if (znode->zbranch[n].lnum == 0 &&
  1290. znode->zbranch[n].len != 0) {
  1291. err = 18;
  1292. goto out;
  1293. }
  1294. if (znode->zbranch[n].lnum == 0 &&
  1295. znode->zbranch[n].offs != 0) {
  1296. err = 19;
  1297. goto out;
  1298. }
  1299. if (znode->level != 0 && znode->zbranch[n].znode)
  1300. if (znode->zbranch[n].znode->parent != znode) {
  1301. err = 20;
  1302. goto out;
  1303. }
  1304. }
  1305. return 0;
  1306. out:
  1307. ubifs_err("failed, error %d", err);
  1308. ubifs_msg("dump of the znode");
  1309. dbg_dump_znode(c, znode);
  1310. if (zp) {
  1311. ubifs_msg("dump of the parent znode");
  1312. dbg_dump_znode(c, zp);
  1313. }
  1314. dump_stack();
  1315. return -EINVAL;
  1316. }
  1317. /**
  1318. * dbg_check_tnc - check TNC tree.
  1319. * @c: UBIFS file-system description object
  1320. * @extra: do extra checks that are possible at start commit
  1321. *
  1322. * This function traverses whole TNC tree and checks every znode. Returns zero
  1323. * if everything is all right and %-EINVAL if something is wrong with TNC.
  1324. */
  1325. int dbg_check_tnc(struct ubifs_info *c, int extra)
  1326. {
  1327. struct ubifs_znode *znode;
  1328. long clean_cnt = 0, dirty_cnt = 0;
  1329. int err, last;
  1330. if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
  1331. return 0;
  1332. ubifs_assert(mutex_is_locked(&c->tnc_mutex));
  1333. if (!c->zroot.znode)
  1334. return 0;
  1335. znode = ubifs_tnc_postorder_first(c->zroot.znode);
  1336. while (1) {
  1337. struct ubifs_znode *prev;
  1338. struct ubifs_zbranch *zbr;
  1339. if (!znode->parent)
  1340. zbr = &c->zroot;
  1341. else
  1342. zbr = &znode->parent->zbranch[znode->iip];
  1343. err = dbg_check_znode(c, zbr);
  1344. if (err)
  1345. return err;
  1346. if (extra) {
  1347. if (ubifs_zn_dirty(znode))
  1348. dirty_cnt += 1;
  1349. else
  1350. clean_cnt += 1;
  1351. }
  1352. prev = znode;
  1353. znode = ubifs_tnc_postorder_next(znode);
  1354. if (!znode)
  1355. break;
  1356. /*
  1357. * If the last key of this znode is equivalent to the first key
  1358. * of the next znode (collision), then check order of the keys.
  1359. */
  1360. last = prev->child_cnt - 1;
  1361. if (prev->level == 0 && znode->level == 0 && !c->replaying &&
  1362. !keys_cmp(c, &prev->zbranch[last].key,
  1363. &znode->zbranch[0].key)) {
  1364. err = dbg_check_key_order(c, &prev->zbranch[last],
  1365. &znode->zbranch[0]);
  1366. if (err < 0)
  1367. return err;
  1368. if (err) {
  1369. ubifs_msg("first znode");
  1370. dbg_dump_znode(c, prev);
  1371. ubifs_msg("second znode");
  1372. dbg_dump_znode(c, znode);
  1373. return -EINVAL;
  1374. }
  1375. }
  1376. }
  1377. if (extra) {
  1378. if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
  1379. ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
  1380. atomic_long_read(&c->clean_zn_cnt),
  1381. clean_cnt);
  1382. return -EINVAL;
  1383. }
  1384. if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
  1385. ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
  1386. atomic_long_read(&c->dirty_zn_cnt),
  1387. dirty_cnt);
  1388. return -EINVAL;
  1389. }
  1390. }
  1391. return 0;
  1392. }
  1393. /**
  1394. * dbg_walk_index - walk the on-flash index.
  1395. * @c: UBIFS file-system description object
  1396. * @leaf_cb: called for each leaf node
  1397. * @znode_cb: called for each indexing node
  1398. * @priv: private data which is passed to callbacks
  1399. *
  1400. * This function walks the UBIFS index and calls the @leaf_cb for each leaf
  1401. * node and @znode_cb for each indexing node. Returns zero in case of success
  1402. * and a negative error code in case of failure.
  1403. *
  1404. * It would be better if this function removed every znode it pulled to into
  1405. * the TNC, so that the behavior more closely matched the non-debugging
  1406. * behavior.
  1407. */
  1408. int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
  1409. dbg_znode_callback znode_cb, void *priv)
  1410. {
  1411. int err;
  1412. struct ubifs_zbranch *zbr;
  1413. struct ubifs_znode *znode, *child;
  1414. mutex_lock(&c->tnc_mutex);
  1415. /* If the root indexing node is not in TNC - pull it */
  1416. if (!c->zroot.znode) {
  1417. c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
  1418. if (IS_ERR(c->zroot.znode)) {
  1419. err = PTR_ERR(c->zroot.znode);
  1420. c->zroot.znode = NULL;
  1421. goto out_unlock;
  1422. }
  1423. }
  1424. /*
  1425. * We are going to traverse the indexing tree in the postorder manner.
  1426. * Go down and find the leftmost indexing node where we are going to
  1427. * start from.
  1428. */
  1429. znode = c->zroot.znode;
  1430. while (znode->level > 0) {
  1431. zbr = &znode->zbranch[0];
  1432. child = zbr->znode;
  1433. if (!child) {
  1434. child = ubifs_load_znode(c, zbr, znode, 0);
  1435. if (IS_ERR(child)) {
  1436. err = PTR_ERR(child);
  1437. goto out_unlock;
  1438. }
  1439. zbr->znode = child;
  1440. }
  1441. znode = child;
  1442. }
  1443. /* Iterate over all indexing nodes */
  1444. while (1) {
  1445. int idx;
  1446. cond_resched();
  1447. if (znode_cb) {
  1448. err = znode_cb(c, znode, priv);
  1449. if (err) {
  1450. ubifs_err("znode checking function returned "
  1451. "error %d", err);
  1452. dbg_dump_znode(c, znode);
  1453. goto out_dump;
  1454. }
  1455. }
  1456. if (leaf_cb && znode->level == 0) {
  1457. for (idx = 0; idx < znode->child_cnt; idx++) {
  1458. zbr = &znode->zbranch[idx];
  1459. err = leaf_cb(c, zbr, priv);
  1460. if (err) {
  1461. ubifs_err("leaf checking function "
  1462. "returned error %d, for leaf "
  1463. "at LEB %d:%d",
  1464. err, zbr->lnum, zbr->offs);
  1465. goto out_dump;
  1466. }
  1467. }
  1468. }
  1469. if (!znode->parent)
  1470. break;
  1471. idx = znode->iip + 1;
  1472. znode = znode->parent;
  1473. if (idx < znode->child_cnt) {
  1474. /* Switch to the next index in the parent */
  1475. zbr = &znode->zbranch[idx];
  1476. child = zbr->znode;
  1477. if (!child) {
  1478. child = ubifs_load_znode(c, zbr, znode, idx);
  1479. if (IS_ERR(child)) {
  1480. err = PTR_ERR(child);
  1481. goto out_unlock;
  1482. }
  1483. zbr->znode = child;
  1484. }
  1485. znode = child;
  1486. } else
  1487. /*
  1488. * This is the last child, switch to the parent and
  1489. * continue.
  1490. */
  1491. continue;
  1492. /* Go to the lowest leftmost znode in the new sub-tree */
  1493. while (znode->level > 0) {
  1494. zbr = &znode->zbranch[0];
  1495. child = zbr->znode;
  1496. if (!child) {
  1497. child = ubifs_load_znode(c, zbr, znode, 0);
  1498. if (IS_ERR(child)) {
  1499. err = PTR_ERR(child);
  1500. goto out_unlock;
  1501. }
  1502. zbr->znode = child;
  1503. }
  1504. znode = child;
  1505. }
  1506. }
  1507. mutex_unlock(&c->tnc_mutex);
  1508. return 0;
  1509. out_dump:
  1510. if (znode->parent)
  1511. zbr = &znode->parent->zbranch[znode->iip];
  1512. else
  1513. zbr = &c->zroot;
  1514. ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
  1515. dbg_dump_znode(c, znode);
  1516. out_unlock:
  1517. mutex_unlock(&c->tnc_mutex);
  1518. return err;
  1519. }
  1520. /**
  1521. * add_size - add znode size to partially calculated index size.
  1522. * @c: UBIFS file-system description object
  1523. * @znode: znode to add size for
  1524. * @priv: partially calculated index size
  1525. *
  1526. * This is a helper function for 'dbg_check_idx_size()' which is called for
  1527. * every indexing node and adds its size to the 'long long' variable pointed to
  1528. * by @priv.
  1529. */
  1530. static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
  1531. {
  1532. long long *idx_size = priv;
  1533. int add;
  1534. add = ubifs_idx_node_sz(c, znode->child_cnt);
  1535. add = ALIGN(add, 8);
  1536. *idx_size += add;
  1537. return 0;
  1538. }
  1539. /**
  1540. * dbg_check_idx_size - check index size.
  1541. * @c: UBIFS file-system description object
  1542. * @idx_size: size to check
  1543. *
  1544. * This function walks the UBIFS index, calculates its size and checks that the
  1545. * size is equivalent to @idx_size. Returns zero in case of success and a
  1546. * negative error code in case of failure.
  1547. */
  1548. int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
  1549. {
  1550. int err;
  1551. long long calc = 0;
  1552. if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
  1553. return 0;
  1554. err = dbg_walk_index(c, NULL, add_size, &calc);
  1555. if (err) {
  1556. ubifs_err("error %d while walking the index", err);
  1557. return err;
  1558. }
  1559. if (calc != idx_size) {
  1560. ubifs_err("index size check failed: calculated size is %lld, "
  1561. "should be %lld", calc, idx_size);
  1562. dump_stack();
  1563. return -EINVAL;
  1564. }
  1565. return 0;
  1566. }
  1567. /**
  1568. * struct fsck_inode - information about an inode used when checking the file-system.
  1569. * @rb: link in the RB-tree of inodes
  1570. * @inum: inode number
  1571. * @mode: inode type, permissions, etc
  1572. * @nlink: inode link count
  1573. * @xattr_cnt: count of extended attributes
  1574. * @references: how many directory/xattr entries refer this inode (calculated
  1575. * while walking the index)
  1576. * @calc_cnt: for directory inode count of child directories
  1577. * @size: inode size (read from on-flash inode)
  1578. * @xattr_sz: summary size of all extended attributes (read from on-flash
  1579. * inode)
  1580. * @calc_sz: for directories calculated directory size
  1581. * @calc_xcnt: count of extended attributes
  1582. * @calc_xsz: calculated summary size of all extended attributes
  1583. * @xattr_nms: sum of lengths of all extended attribute names belonging to this
  1584. * inode (read from on-flash inode)
  1585. * @calc_xnms: calculated sum of lengths of all extended attribute names
  1586. */
  1587. struct fsck_inode {
  1588. struct rb_node rb;
  1589. ino_t inum;
  1590. umode_t mode;
  1591. unsigned int nlink;
  1592. unsigned int xattr_cnt;
  1593. int references;
  1594. int calc_cnt;
  1595. long long size;
  1596. unsigned int xattr_sz;
  1597. long long calc_sz;
  1598. long long calc_xcnt;
  1599. long long calc_xsz;
  1600. unsigned int xattr_nms;
  1601. long long calc_xnms;
  1602. };
  1603. /**
  1604. * struct fsck_data - private FS checking information.
  1605. * @inodes: RB-tree of all inodes (contains @struct fsck_inode objects)
  1606. */
  1607. struct fsck_data {
  1608. struct rb_root inodes;
  1609. };
  1610. /**
  1611. * add_inode - add inode information to RB-tree of inodes.
  1612. * @c: UBIFS file-system description object
  1613. * @fsckd: FS checking information
  1614. * @ino: raw UBIFS inode to add
  1615. *
  1616. * This is a helper function for 'check_leaf()' which adds information about
  1617. * inode @ino to the RB-tree of inodes. Returns inode information pointer in
  1618. * case of success and a negative error code in case of failure.
  1619. */
  1620. static struct fsck_inode *add_inode(struct ubifs_info *c,
  1621. struct fsck_data *fsckd,
  1622. struct ubifs_ino_node *ino)
  1623. {
  1624. struct rb_node **p, *parent = NULL;
  1625. struct fsck_inode *fscki;
  1626. ino_t inum = key_inum_flash(c, &ino->key);
  1627. p = &fsckd->inodes.rb_node;
  1628. while (*p) {
  1629. parent = *p;
  1630. fscki = rb_entry(parent, struct fsck_inode, rb);
  1631. if (inum < fscki->inum)
  1632. p = &(*p)->rb_left;
  1633. else if (inum > fscki->inum)
  1634. p = &(*p)->rb_right;
  1635. else
  1636. return fscki;
  1637. }
  1638. if (inum > c->highest_inum) {
  1639. ubifs_err("too high inode number, max. is %lu",
  1640. (unsigned long)c->highest_inum);
  1641. return ERR_PTR(-EINVAL);
  1642. }
  1643. fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
  1644. if (!fscki)
  1645. return ERR_PTR(-ENOMEM);
  1646. fscki->inum = inum;
  1647. fscki->nlink = le32_to_cpu(ino->nlink);
  1648. fscki->size = le64_to_cpu(ino->size);
  1649. fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
  1650. fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
  1651. fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
  1652. fscki->mode = le32_to_cpu(ino->mode);
  1653. if (S_ISDIR(fscki->mode)) {
  1654. fscki->calc_sz = UBIFS_INO_NODE_SZ;
  1655. fscki->calc_cnt = 2;
  1656. }
  1657. rb_link_node(&fscki->rb, parent, p);
  1658. rb_insert_color(&fscki->rb, &fsckd->inodes);
  1659. return fscki;
  1660. }
  1661. /**
  1662. * search_inode - search inode in the RB-tree of inodes.
  1663. * @fsckd: FS checking information
  1664. * @inum: inode number to search
  1665. *
  1666. * This is a helper function for 'check_leaf()' which searches inode @inum in
  1667. * the RB-tree of inodes and returns an inode information pointer or %NULL if
  1668. * the inode was not found.
  1669. */
  1670. static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
  1671. {
  1672. struct rb_node *p;
  1673. struct fsck_inode *fscki;
  1674. p = fsckd->inodes.rb_node;
  1675. while (p) {
  1676. fscki = rb_entry(p, struct fsck_inode, rb);
  1677. if (inum < fscki->inum)
  1678. p = p->rb_left;
  1679. else if (inum > fscki->inum)
  1680. p = p->rb_right;
  1681. else
  1682. return fscki;
  1683. }
  1684. return NULL;
  1685. }
  1686. /**
  1687. * read_add_inode - read inode node and add it to RB-tree of inodes.
  1688. * @c: UBIFS file-system description object
  1689. * @fsckd: FS checking information
  1690. * @inum: inode number to read
  1691. *
  1692. * This is a helper function for 'check_leaf()' which finds inode node @inum in
  1693. * the index, reads it, and adds it to the RB-tree of inodes. Returns inode
  1694. * information pointer in case of success and a negative error code in case of
  1695. * failure.
  1696. */
  1697. static struct fsck_inode *read_add_inode(struct ubifs_info *c,
  1698. struct fsck_data *fsckd, ino_t inum)
  1699. {
  1700. int n, err;
  1701. union ubifs_key key;
  1702. struct ubifs_znode *znode;
  1703. struct ubifs_zbranch *zbr;
  1704. struct ubifs_ino_node *ino;
  1705. struct fsck_inode *fscki;
  1706. fscki = search_inode(fsckd, inum);
  1707. if (fscki)
  1708. return fscki;
  1709. ino_key_init(c, &key, inum);
  1710. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1711. if (!err) {
  1712. ubifs_err("inode %lu not found in index", (unsigned long)inum);
  1713. return ERR_PTR(-ENOENT);
  1714. } else if (err < 0) {
  1715. ubifs_err("error %d while looking up inode %lu",
  1716. err, (unsigned long)inum);
  1717. return ERR_PTR(err);
  1718. }
  1719. zbr = &znode->zbranch[n];
  1720. if (zbr->len < UBIFS_INO_NODE_SZ) {
  1721. ubifs_err("bad node %lu node length %d",
  1722. (unsigned long)inum, zbr->len);
  1723. return ERR_PTR(-EINVAL);
  1724. }
  1725. ino = kmalloc(zbr->len, GFP_NOFS);
  1726. if (!ino)
  1727. return ERR_PTR(-ENOMEM);
  1728. err = ubifs_tnc_read_node(c, zbr, ino);
  1729. if (err) {
  1730. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1731. zbr->lnum, zbr->offs, err);
  1732. kfree(ino);
  1733. return ERR_PTR(err);
  1734. }
  1735. fscki = add_inode(c, fsckd, ino);
  1736. kfree(ino);
  1737. if (IS_ERR(fscki)) {
  1738. ubifs_err("error %ld while adding inode %lu node",
  1739. PTR_ERR(fscki), (unsigned long)inum);
  1740. return fscki;
  1741. }
  1742. return fscki;
  1743. }
  1744. /**
  1745. * check_leaf - check leaf node.
  1746. * @c: UBIFS file-system description object
  1747. * @zbr: zbranch of the leaf node to check
  1748. * @priv: FS checking information
  1749. *
  1750. * This is a helper function for 'dbg_check_filesystem()' which is called for
  1751. * every single leaf node while walking the indexing tree. It checks that the
  1752. * leaf node referred from the indexing tree exists, has correct CRC, and does
  1753. * some other basic validation. This function is also responsible for building
  1754. * an RB-tree of inodes - it adds all inodes into the RB-tree. It also
  1755. * calculates reference count, size, etc for each inode in order to later
  1756. * compare them to the information stored inside the inodes and detect possible
  1757. * inconsistencies. Returns zero in case of success and a negative error code
  1758. * in case of failure.
  1759. */
  1760. static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  1761. void *priv)
  1762. {
  1763. ino_t inum;
  1764. void *node;
  1765. struct ubifs_ch *ch;
  1766. int err, type = key_type(c, &zbr->key);
  1767. struct fsck_inode *fscki;
  1768. if (zbr->len < UBIFS_CH_SZ) {
  1769. ubifs_err("bad leaf length %d (LEB %d:%d)",
  1770. zbr->len, zbr->lnum, zbr->offs);
  1771. return -EINVAL;
  1772. }
  1773. node = kmalloc(zbr->len, GFP_NOFS);
  1774. if (!node)
  1775. return -ENOMEM;
  1776. err = ubifs_tnc_read_node(c, zbr, node);
  1777. if (err) {
  1778. ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
  1779. zbr->lnum, zbr->offs, err);
  1780. goto out_free;
  1781. }
  1782. /* If this is an inode node, add it to RB-tree of inodes */
  1783. if (type == UBIFS_INO_KEY) {
  1784. fscki = add_inode(c, priv, node);
  1785. if (IS_ERR(fscki)) {
  1786. err = PTR_ERR(fscki);
  1787. ubifs_err("error %d while adding inode node", err);
  1788. goto out_dump;
  1789. }
  1790. goto out;
  1791. }
  1792. if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
  1793. type != UBIFS_DATA_KEY) {
  1794. ubifs_err("unexpected node type %d at LEB %d:%d",
  1795. type, zbr->lnum, zbr->offs);
  1796. err = -EINVAL;
  1797. goto out_free;
  1798. }
  1799. ch = node;
  1800. if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
  1801. ubifs_err("too high sequence number, max. is %llu",
  1802. c->max_sqnum);
  1803. err = -EINVAL;
  1804. goto out_dump;
  1805. }
  1806. if (type == UBIFS_DATA_KEY) {
  1807. long long blk_offs;
  1808. struct ubifs_data_node *dn = node;
  1809. /*
  1810. * Search the inode node this data node belongs to and insert
  1811. * it to the RB-tree of inodes.
  1812. */
  1813. inum = key_inum_flash(c, &dn->key);
  1814. fscki = read_add_inode(c, priv, inum);
  1815. if (IS_ERR(fscki)) {
  1816. err = PTR_ERR(fscki);
  1817. ubifs_err("error %d while processing data node and "
  1818. "trying to find inode node %lu",
  1819. err, (unsigned long)inum);
  1820. goto out_dump;
  1821. }
  1822. /* Make sure the data node is within inode size */
  1823. blk_offs = key_block_flash(c, &dn->key);
  1824. blk_offs <<= UBIFS_BLOCK_SHIFT;
  1825. blk_offs += le32_to_cpu(dn->size);
  1826. if (blk_offs > fscki->size) {
  1827. ubifs_err("data node at LEB %d:%d is not within inode "
  1828. "size %lld", zbr->lnum, zbr->offs,
  1829. fscki->size);
  1830. err = -EINVAL;
  1831. goto out_dump;
  1832. }
  1833. } else {
  1834. int nlen;
  1835. struct ubifs_dent_node *dent = node;
  1836. struct fsck_inode *fscki1;
  1837. err = ubifs_validate_entry(c, dent);
  1838. if (err)
  1839. goto out_dump;
  1840. /*
  1841. * Search the inode node this entry refers to and the parent
  1842. * inode node and insert them to the RB-tree of inodes.
  1843. */
  1844. inum = le64_to_cpu(dent->inum);
  1845. fscki = read_add_inode(c, priv, inum);
  1846. if (IS_ERR(fscki)) {
  1847. err = PTR_ERR(fscki);
  1848. ubifs_err("error %d while processing entry node and "
  1849. "trying to find inode node %lu",
  1850. err, (unsigned long)inum);
  1851. goto out_dump;
  1852. }
  1853. /* Count how many direntries or xentries refers this inode */
  1854. fscki->references += 1;
  1855. inum = key_inum_flash(c, &dent->key);
  1856. fscki1 = read_add_inode(c, priv, inum);
  1857. if (IS_ERR(fscki1)) {
  1858. err = PTR_ERR(fscki1);
  1859. ubifs_err("error %d while processing entry node and "
  1860. "trying to find parent inode node %lu",
  1861. err, (unsigned long)inum);
  1862. goto out_dump;
  1863. }
  1864. nlen = le16_to_cpu(dent->nlen);
  1865. if (type == UBIFS_XENT_KEY) {
  1866. fscki1->calc_xcnt += 1;
  1867. fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
  1868. fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
  1869. fscki1->calc_xnms += nlen;
  1870. } else {
  1871. fscki1->calc_sz += CALC_DENT_SIZE(nlen);
  1872. if (dent->type == UBIFS_ITYPE_DIR)
  1873. fscki1->calc_cnt += 1;
  1874. }
  1875. }
  1876. out:
  1877. kfree(node);
  1878. return 0;
  1879. out_dump:
  1880. ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
  1881. dbg_dump_node(c, node);
  1882. out_free:
  1883. kfree(node);
  1884. return err;
  1885. }
  1886. /**
  1887. * free_inodes - free RB-tree of inodes.
  1888. * @fsckd: FS checking information
  1889. */
  1890. static void free_inodes(struct fsck_data *fsckd)
  1891. {
  1892. struct rb_node *this = fsckd->inodes.rb_node;
  1893. struct fsck_inode *fscki;
  1894. while (this) {
  1895. if (this->rb_left)
  1896. this = this->rb_left;
  1897. else if (this->rb_right)
  1898. this = this->rb_right;
  1899. else {
  1900. fscki = rb_entry(this, struct fsck_inode, rb);
  1901. this = rb_parent(this);
  1902. if (this) {
  1903. if (this->rb_left == &fscki->rb)
  1904. this->rb_left = NULL;
  1905. else
  1906. this->rb_right = NULL;
  1907. }
  1908. kfree(fscki);
  1909. }
  1910. }
  1911. }
  1912. /**
  1913. * check_inodes - checks all inodes.
  1914. * @c: UBIFS file-system description object
  1915. * @fsckd: FS checking information
  1916. *
  1917. * This is a helper function for 'dbg_check_filesystem()' which walks the
  1918. * RB-tree of inodes after the index scan has been finished, and checks that
  1919. * inode nlink, size, etc are correct. Returns zero if inodes are fine,
  1920. * %-EINVAL if not, and a negative error code in case of failure.
  1921. */
  1922. static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
  1923. {
  1924. int n, err;
  1925. union ubifs_key key;
  1926. struct ubifs_znode *znode;
  1927. struct ubifs_zbranch *zbr;
  1928. struct ubifs_ino_node *ino;
  1929. struct fsck_inode *fscki;
  1930. struct rb_node *this = rb_first(&fsckd->inodes);
  1931. while (this) {
  1932. fscki = rb_entry(this, struct fsck_inode, rb);
  1933. this = rb_next(this);
  1934. if (S_ISDIR(fscki->mode)) {
  1935. /*
  1936. * Directories have to have exactly one reference (they
  1937. * cannot have hardlinks), although root inode is an
  1938. * exception.
  1939. */
  1940. if (fscki->inum != UBIFS_ROOT_INO &&
  1941. fscki->references != 1) {
  1942. ubifs_err("directory inode %lu has %d "
  1943. "direntries which refer it, but "
  1944. "should be 1",
  1945. (unsigned long)fscki->inum,
  1946. fscki->references);
  1947. goto out_dump;
  1948. }
  1949. if (fscki->inum == UBIFS_ROOT_INO &&
  1950. fscki->references != 0) {
  1951. ubifs_err("root inode %lu has non-zero (%d) "
  1952. "direntries which refer it",
  1953. (unsigned long)fscki->inum,
  1954. fscki->references);
  1955. goto out_dump;
  1956. }
  1957. if (fscki->calc_sz != fscki->size) {
  1958. ubifs_err("directory inode %lu size is %lld, "
  1959. "but calculated size is %lld",
  1960. (unsigned long)fscki->inum,
  1961. fscki->size, fscki->calc_sz);
  1962. goto out_dump;
  1963. }
  1964. if (fscki->calc_cnt != fscki->nlink) {
  1965. ubifs_err("directory inode %lu nlink is %d, "
  1966. "but calculated nlink is %d",
  1967. (unsigned long)fscki->inum,
  1968. fscki->nlink, fscki->calc_cnt);
  1969. goto out_dump;
  1970. }
  1971. } else {
  1972. if (fscki->references != fscki->nlink) {
  1973. ubifs_err("inode %lu nlink is %d, but "
  1974. "calculated nlink is %d",
  1975. (unsigned long)fscki->inum,
  1976. fscki->nlink, fscki->references);
  1977. goto out_dump;
  1978. }
  1979. }
  1980. if (fscki->xattr_sz != fscki->calc_xsz) {
  1981. ubifs_err("inode %lu has xattr size %u, but "
  1982. "calculated size is %lld",
  1983. (unsigned long)fscki->inum, fscki->xattr_sz,
  1984. fscki->calc_xsz);
  1985. goto out_dump;
  1986. }
  1987. if (fscki->xattr_cnt != fscki->calc_xcnt) {
  1988. ubifs_err("inode %lu has %u xattrs, but "
  1989. "calculated count is %lld",
  1990. (unsigned long)fscki->inum,
  1991. fscki->xattr_cnt, fscki->calc_xcnt);
  1992. goto out_dump;
  1993. }
  1994. if (fscki->xattr_nms != fscki->calc_xnms) {
  1995. ubifs_err("inode %lu has xattr names' size %u, but "
  1996. "calculated names' size is %lld",
  1997. (unsigned long)fscki->inum, fscki->xattr_nms,
  1998. fscki->calc_xnms);
  1999. goto out_dump;
  2000. }
  2001. }
  2002. return 0;
  2003. out_dump:
  2004. /* Read the bad inode and dump it */
  2005. ino_key_init(c, &key, fscki->inum);
  2006. err = ubifs_lookup_level0(c, &key, &znode, &n);
  2007. if (!err) {
  2008. ubifs_err("inode %lu not found in index",
  2009. (unsigned long)fscki->inum);
  2010. return -ENOENT;
  2011. } else if (err < 0) {
  2012. ubifs_err("error %d while looking up inode %lu",
  2013. err, (unsigned long)fscki->inum);
  2014. return err;
  2015. }
  2016. zbr = &znode->zbranch[n];
  2017. ino = kmalloc(zbr->len, GFP_NOFS);
  2018. if (!ino)
  2019. return -ENOMEM;
  2020. err = ubifs_tnc_read_node(c, zbr, ino);
  2021. if (err) {
  2022. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  2023. zbr->lnum, zbr->offs, err);
  2024. kfree(ino);
  2025. return err;
  2026. }
  2027. ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
  2028. (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
  2029. dbg_dump_node(c, ino);
  2030. kfree(ino);
  2031. return -EINVAL;
  2032. }
  2033. /**
  2034. * dbg_check_filesystem - check the file-system.
  2035. * @c: UBIFS file-system description object
  2036. *
  2037. * This function checks the file system, namely:
  2038. * o makes sure that all leaf nodes exist and their CRCs are correct;
  2039. * o makes sure inode nlink, size, xattr size/count are correct (for all
  2040. * inodes).
  2041. *
  2042. * The function reads whole indexing tree and all nodes, so it is pretty
  2043. * heavy-weight. Returns zero if the file-system is consistent, %-EINVAL if
  2044. * not, and a negative error code in case of failure.
  2045. */
  2046. int dbg_check_filesystem(struct ubifs_info *c)
  2047. {
  2048. int err;
  2049. struct fsck_data fsckd;
  2050. if (!(ubifs_chk_flags & UBIFS_CHK_FS))
  2051. return 0;
  2052. fsckd.inodes = RB_ROOT;
  2053. err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
  2054. if (err)
  2055. goto out_free;
  2056. err = check_inodes(c, &fsckd);
  2057. if (err)
  2058. goto out_free;
  2059. free_inodes(&fsckd);
  2060. return 0;
  2061. out_free:
  2062. ubifs_err("file-system check failed with error %d", err);
  2063. dump_stack();
  2064. free_inodes(&fsckd);
  2065. return err;
  2066. }
  2067. /**
  2068. * dbg_check_data_nodes_order - check that list of data nodes is sorted.
  2069. * @c: UBIFS file-system description object
  2070. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2071. *
  2072. * This function returns zero if the list of data nodes is sorted correctly,
  2073. * and %-EINVAL if not.
  2074. */
  2075. int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
  2076. {
  2077. struct list_head *cur;
  2078. struct ubifs_scan_node *sa, *sb;
  2079. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2080. return 0;
  2081. for (cur = head->next; cur->next != head; cur = cur->next) {
  2082. ino_t inuma, inumb;
  2083. uint32_t blka, blkb;
  2084. cond_resched();
  2085. sa = container_of(cur, struct ubifs_scan_node, list);
  2086. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2087. if (sa->type != UBIFS_DATA_NODE) {
  2088. ubifs_err("bad node type %d", sa->type);
  2089. dbg_dump_node(c, sa->node);
  2090. return -EINVAL;
  2091. }
  2092. if (sb->type != UBIFS_DATA_NODE) {
  2093. ubifs_err("bad node type %d", sb->type);
  2094. dbg_dump_node(c, sb->node);
  2095. return -EINVAL;
  2096. }
  2097. inuma = key_inum(c, &sa->key);
  2098. inumb = key_inum(c, &sb->key);
  2099. if (inuma < inumb)
  2100. continue;
  2101. if (inuma > inumb) {
  2102. ubifs_err("larger inum %lu goes before inum %lu",
  2103. (unsigned long)inuma, (unsigned long)inumb);
  2104. goto error_dump;
  2105. }
  2106. blka = key_block(c, &sa->key);
  2107. blkb = key_block(c, &sb->key);
  2108. if (blka > blkb) {
  2109. ubifs_err("larger block %u goes before %u", blka, blkb);
  2110. goto error_dump;
  2111. }
  2112. if (blka == blkb) {
  2113. ubifs_err("two data nodes for the same block");
  2114. goto error_dump;
  2115. }
  2116. }
  2117. return 0;
  2118. error_dump:
  2119. dbg_dump_node(c, sa->node);
  2120. dbg_dump_node(c, sb->node);
  2121. return -EINVAL;
  2122. }
  2123. /**
  2124. * dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
  2125. * @c: UBIFS file-system description object
  2126. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2127. *
  2128. * This function returns zero if the list of non-data nodes is sorted correctly,
  2129. * and %-EINVAL if not.
  2130. */
  2131. int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
  2132. {
  2133. struct list_head *cur;
  2134. struct ubifs_scan_node *sa, *sb;
  2135. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2136. return 0;
  2137. for (cur = head->next; cur->next != head; cur = cur->next) {
  2138. ino_t inuma, inumb;
  2139. uint32_t hasha, hashb;
  2140. cond_resched();
  2141. sa = container_of(cur, struct ubifs_scan_node, list);
  2142. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2143. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2144. sa->type != UBIFS_XENT_NODE) {
  2145. ubifs_err("bad node type %d", sa->type);
  2146. dbg_dump_node(c, sa->node);
  2147. return -EINVAL;
  2148. }
  2149. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2150. sa->type != UBIFS_XENT_NODE) {
  2151. ubifs_err("bad node type %d", sb->type);
  2152. dbg_dump_node(c, sb->node);
  2153. return -EINVAL;
  2154. }
  2155. if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2156. ubifs_err("non-inode node goes before inode node");
  2157. goto error_dump;
  2158. }
  2159. if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
  2160. continue;
  2161. if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2162. /* Inode nodes are sorted in descending size order */
  2163. if (sa->len < sb->len) {
  2164. ubifs_err("smaller inode node goes first");
  2165. goto error_dump;
  2166. }
  2167. continue;
  2168. }
  2169. /*
  2170. * This is either a dentry or xentry, which should be sorted in
  2171. * ascending (parent ino, hash) order.
  2172. */
  2173. inuma = key_inum(c, &sa->key);
  2174. inumb = key_inum(c, &sb->key);
  2175. if (inuma < inumb)
  2176. continue;
  2177. if (inuma > inumb) {
  2178. ubifs_err("larger inum %lu goes before inum %lu",
  2179. (unsigned long)inuma, (unsigned long)inumb);
  2180. goto error_dump;
  2181. }
  2182. hasha = key_block(c, &sa->key);
  2183. hashb = key_block(c, &sb->key);
  2184. if (hasha > hashb) {
  2185. ubifs_err("larger hash %u goes before %u", hasha, hashb);
  2186. goto error_dump;
  2187. }
  2188. }
  2189. return 0;
  2190. error_dump:
  2191. ubifs_msg("dumping first node");
  2192. dbg_dump_node(c, sa->node);
  2193. ubifs_msg("dumping second node");
  2194. dbg_dump_node(c, sb->node);
  2195. return -EINVAL;
  2196. return 0;
  2197. }
  2198. static int invocation_cnt;
  2199. int dbg_force_in_the_gaps(void)
  2200. {
  2201. if (!dbg_force_in_the_gaps_enabled)
  2202. return 0;
  2203. /* Force in-the-gaps every 8th commit */
  2204. return !((invocation_cnt++) & 0x7);
  2205. }
  2206. /* Failure mode for recovery testing */
  2207. #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
  2208. struct failure_mode_info {
  2209. struct list_head list;
  2210. struct ubifs_info *c;
  2211. };
  2212. static LIST_HEAD(fmi_list);
  2213. static DEFINE_SPINLOCK(fmi_lock);
  2214. static unsigned int next;
  2215. static int simple_rand(void)
  2216. {
  2217. if (next == 0)
  2218. next = current->pid;
  2219. next = next * 1103515245 + 12345;
  2220. return (next >> 16) & 32767;
  2221. }
  2222. static void failure_mode_init(struct ubifs_info *c)
  2223. {
  2224. struct failure_mode_info *fmi;
  2225. fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
  2226. if (!fmi) {
  2227. ubifs_err("Failed to register failure mode - no memory");
  2228. return;
  2229. }
  2230. fmi->c = c;
  2231. spin_lock(&fmi_lock);
  2232. list_add_tail(&fmi->list, &fmi_list);
  2233. spin_unlock(&fmi_lock);
  2234. }
  2235. static void failure_mode_exit(struct ubifs_info *c)
  2236. {
  2237. struct failure_mode_info *fmi, *tmp;
  2238. spin_lock(&fmi_lock);
  2239. list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
  2240. if (fmi->c == c) {
  2241. list_del(&fmi->list);
  2242. kfree(fmi);
  2243. }
  2244. spin_unlock(&fmi_lock);
  2245. }
  2246. static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
  2247. {
  2248. struct failure_mode_info *fmi;
  2249. spin_lock(&fmi_lock);
  2250. list_for_each_entry(fmi, &fmi_list, list)
  2251. if (fmi->c->ubi == desc) {
  2252. struct ubifs_info *c = fmi->c;
  2253. spin_unlock(&fmi_lock);
  2254. return c;
  2255. }
  2256. spin_unlock(&fmi_lock);
  2257. return NULL;
  2258. }
  2259. static int in_failure_mode(struct ubi_volume_desc *desc)
  2260. {
  2261. struct ubifs_info *c = dbg_find_info(desc);
  2262. if (c && dbg_failure_mode)
  2263. return c->dbg->failure_mode;
  2264. return 0;
  2265. }
  2266. static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
  2267. {
  2268. struct ubifs_info *c = dbg_find_info(desc);
  2269. struct ubifs_debug_info *d;
  2270. if (!c || !dbg_failure_mode)
  2271. return 0;
  2272. d = c->dbg;
  2273. if (d->failure_mode)
  2274. return 1;
  2275. if (!d->fail_cnt) {
  2276. /* First call - decide delay to failure */
  2277. if (chance(1, 2)) {
  2278. unsigned int delay = 1 << (simple_rand() >> 11);
  2279. if (chance(1, 2)) {
  2280. d->fail_delay = 1;
  2281. d->fail_timeout = jiffies +
  2282. msecs_to_jiffies(delay);
  2283. dbg_rcvry("failing after %ums", delay);
  2284. } else {
  2285. d->fail_delay = 2;
  2286. d->fail_cnt_max = delay;
  2287. dbg_rcvry("failing after %u calls", delay);
  2288. }
  2289. }
  2290. d->fail_cnt += 1;
  2291. }
  2292. /* Determine if failure delay has expired */
  2293. if (d->fail_delay == 1) {
  2294. if (time_before(jiffies, d->fail_timeout))
  2295. return 0;
  2296. } else if (d->fail_delay == 2)
  2297. if (d->fail_cnt++ < d->fail_cnt_max)
  2298. return 0;
  2299. if (lnum == UBIFS_SB_LNUM) {
  2300. if (write) {
  2301. if (chance(1, 2))
  2302. return 0;
  2303. } else if (chance(19, 20))
  2304. return 0;
  2305. dbg_rcvry("failing in super block LEB %d", lnum);
  2306. } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
  2307. if (chance(19, 20))
  2308. return 0;
  2309. dbg_rcvry("failing in master LEB %d", lnum);
  2310. } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
  2311. if (write) {
  2312. if (chance(99, 100))
  2313. return 0;
  2314. } else if (chance(399, 400))
  2315. return 0;
  2316. dbg_rcvry("failing in log LEB %d", lnum);
  2317. } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
  2318. if (write) {
  2319. if (chance(7, 8))
  2320. return 0;
  2321. } else if (chance(19, 20))
  2322. return 0;
  2323. dbg_rcvry("failing in LPT LEB %d", lnum);
  2324. } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
  2325. if (write) {
  2326. if (chance(1, 2))
  2327. return 0;
  2328. } else if (chance(9, 10))
  2329. return 0;
  2330. dbg_rcvry("failing in orphan LEB %d", lnum);
  2331. } else if (lnum == c->ihead_lnum) {
  2332. if (chance(99, 100))
  2333. return 0;
  2334. dbg_rcvry("failing in index head LEB %d", lnum);
  2335. } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
  2336. if (chance(9, 10))
  2337. return 0;
  2338. dbg_rcvry("failing in GC head LEB %d", lnum);
  2339. } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
  2340. !ubifs_search_bud(c, lnum)) {
  2341. if (chance(19, 20))
  2342. return 0;
  2343. dbg_rcvry("failing in non-bud LEB %d", lnum);
  2344. } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
  2345. c->cmt_state == COMMIT_RUNNING_REQUIRED) {
  2346. if (chance(999, 1000))
  2347. return 0;
  2348. dbg_rcvry("failing in bud LEB %d commit running", lnum);
  2349. } else {
  2350. if (chance(9999, 10000))
  2351. return 0;
  2352. dbg_rcvry("failing in bud LEB %d commit not running", lnum);
  2353. }
  2354. ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
  2355. d->failure_mode = 1;
  2356. dump_stack();
  2357. return 1;
  2358. }
  2359. static void cut_data(const void *buf, int len)
  2360. {
  2361. int flen, i;
  2362. unsigned char *p = (void *)buf;
  2363. flen = (len * (long long)simple_rand()) >> 15;
  2364. for (i = flen; i < len; i++)
  2365. p[i] = 0xff;
  2366. }
  2367. int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
  2368. int len, int check)
  2369. {
  2370. if (in_failure_mode(desc))
  2371. return -EIO;
  2372. return ubi_leb_read(desc, lnum, buf, offset, len, check);
  2373. }
  2374. int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2375. int offset, int len, int dtype)
  2376. {
  2377. int err, failing;
  2378. if (in_failure_mode(desc))
  2379. return -EIO;
  2380. failing = do_fail(desc, lnum, 1);
  2381. if (failing)
  2382. cut_data(buf, len);
  2383. err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
  2384. if (err)
  2385. return err;
  2386. if (failing)
  2387. return -EIO;
  2388. return 0;
  2389. }
  2390. int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2391. int len, int dtype)
  2392. {
  2393. int err;
  2394. if (do_fail(desc, lnum, 1))
  2395. return -EIO;
  2396. err = ubi_leb_change(desc, lnum, buf, len, dtype);
  2397. if (err)
  2398. return err;
  2399. if (do_fail(desc, lnum, 1))
  2400. return -EIO;
  2401. return 0;
  2402. }
  2403. int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
  2404. {
  2405. int err;
  2406. if (do_fail(desc, lnum, 0))
  2407. return -EIO;
  2408. err = ubi_leb_erase(desc, lnum);
  2409. if (err)
  2410. return err;
  2411. if (do_fail(desc, lnum, 0))
  2412. return -EIO;
  2413. return 0;
  2414. }
  2415. int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
  2416. {
  2417. int err;
  2418. if (do_fail(desc, lnum, 0))
  2419. return -EIO;
  2420. err = ubi_leb_unmap(desc, lnum);
  2421. if (err)
  2422. return err;
  2423. if (do_fail(desc, lnum, 0))
  2424. return -EIO;
  2425. return 0;
  2426. }
  2427. int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
  2428. {
  2429. if (in_failure_mode(desc))
  2430. return -EIO;
  2431. return ubi_is_mapped(desc, lnum);
  2432. }
  2433. int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
  2434. {
  2435. int err;
  2436. if (do_fail(desc, lnum, 0))
  2437. return -EIO;
  2438. err = ubi_leb_map(desc, lnum, dtype);
  2439. if (err)
  2440. return err;
  2441. if (do_fail(desc, lnum, 0))
  2442. return -EIO;
  2443. return 0;
  2444. }
  2445. /**
  2446. * ubifs_debugging_init - initialize UBIFS debugging.
  2447. * @c: UBIFS file-system description object
  2448. *
  2449. * This function initializes debugging-related data for the file system.
  2450. * Returns zero in case of success and a negative error code in case of
  2451. * failure.
  2452. */
  2453. int ubifs_debugging_init(struct ubifs_info *c)
  2454. {
  2455. c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
  2456. if (!c->dbg)
  2457. return -ENOMEM;
  2458. failure_mode_init(c);
  2459. return 0;
  2460. }
  2461. /**
  2462. * ubifs_debugging_exit - free debugging data.
  2463. * @c: UBIFS file-system description object
  2464. */
  2465. void ubifs_debugging_exit(struct ubifs_info *c)
  2466. {
  2467. failure_mode_exit(c);
  2468. kfree(c->dbg);
  2469. }
  2470. /*
  2471. * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
  2472. * contain the stuff specific to particular file-system mounts.
  2473. */
  2474. static struct dentry *dfs_rootdir;
  2475. /**
  2476. * dbg_debugfs_init - initialize debugfs file-system.
  2477. *
  2478. * UBIFS uses debugfs file-system to expose various debugging knobs to
  2479. * user-space. This function creates "ubifs" directory in the debugfs
  2480. * file-system. Returns zero in case of success and a negative error code in
  2481. * case of failure.
  2482. */
  2483. int dbg_debugfs_init(void)
  2484. {
  2485. dfs_rootdir = debugfs_create_dir("ubifs", NULL);
  2486. if (IS_ERR(dfs_rootdir)) {
  2487. int err = PTR_ERR(dfs_rootdir);
  2488. ubifs_err("cannot create \"ubifs\" debugfs directory, "
  2489. "error %d\n", err);
  2490. return err;
  2491. }
  2492. return 0;
  2493. }
  2494. /**
  2495. * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system.
  2496. */
  2497. void dbg_debugfs_exit(void)
  2498. {
  2499. debugfs_remove(dfs_rootdir);
  2500. }
  2501. static int open_debugfs_file(struct inode *inode, struct file *file)
  2502. {
  2503. file->private_data = inode->i_private;
  2504. return 0;
  2505. }
  2506. static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
  2507. size_t count, loff_t *ppos)
  2508. {
  2509. struct ubifs_info *c = file->private_data;
  2510. struct ubifs_debug_info *d = c->dbg;
  2511. if (file->f_path.dentry == d->dfs_dump_lprops)
  2512. dbg_dump_lprops(c);
  2513. else if (file->f_path.dentry == d->dfs_dump_budg) {
  2514. spin_lock(&c->space_lock);
  2515. dbg_dump_budg(c);
  2516. spin_unlock(&c->space_lock);
  2517. } else if (file->f_path.dentry == d->dfs_dump_tnc) {
  2518. mutex_lock(&c->tnc_mutex);
  2519. dbg_dump_tnc(c);
  2520. mutex_unlock(&c->tnc_mutex);
  2521. } else
  2522. return -EINVAL;
  2523. *ppos += count;
  2524. return count;
  2525. }
  2526. static const struct file_operations dfs_fops = {
  2527. .open = open_debugfs_file,
  2528. .write = write_debugfs_file,
  2529. .owner = THIS_MODULE,
  2530. .llseek = default_llseek,
  2531. };
  2532. /**
  2533. * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance.
  2534. * @c: UBIFS file-system description object
  2535. *
  2536. * This function creates all debugfs files for this instance of UBIFS. Returns
  2537. * zero in case of success and a negative error code in case of failure.
  2538. *
  2539. * Note, the only reason we have not merged this function with the
  2540. * 'ubifs_debugging_init()' function is because it is better to initialize
  2541. * debugfs interfaces at the very end of the mount process, and remove them at
  2542. * the very beginning of the mount process.
  2543. */
  2544. int dbg_debugfs_init_fs(struct ubifs_info *c)
  2545. {
  2546. int err;
  2547. const char *fname;
  2548. struct dentry *dent;
  2549. struct ubifs_debug_info *d = c->dbg;
  2550. sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
  2551. fname = d->dfs_dir_name;
  2552. dent = debugfs_create_dir(fname, dfs_rootdir);
  2553. if (IS_ERR_OR_NULL(dent))
  2554. goto out;
  2555. d->dfs_dir = dent;
  2556. fname = "dump_lprops";
  2557. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2558. if (IS_ERR_OR_NULL(dent))
  2559. goto out_remove;
  2560. d->dfs_dump_lprops = dent;
  2561. fname = "dump_budg";
  2562. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2563. if (IS_ERR_OR_NULL(dent))
  2564. goto out_remove;
  2565. d->dfs_dump_budg = dent;
  2566. fname = "dump_tnc";
  2567. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2568. if (IS_ERR_OR_NULL(dent))
  2569. goto out_remove;
  2570. d->dfs_dump_tnc = dent;
  2571. return 0;
  2572. out_remove:
  2573. debugfs_remove_recursive(d->dfs_dir);
  2574. out:
  2575. err = dent ? PTR_ERR(dent) : -ENODEV;
  2576. ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
  2577. fname, err);
  2578. return err;
  2579. }
  2580. /**
  2581. * dbg_debugfs_exit_fs - remove all debugfs files.
  2582. * @c: UBIFS file-system description object
  2583. */
  2584. void dbg_debugfs_exit_fs(struct ubifs_info *c)
  2585. {
  2586. debugfs_remove_recursive(c->dbg->dfs_dir);
  2587. }
  2588. #endif /* CONFIG_UBIFS_FS_DEBUG */