debug.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Artem Bityutskiy (Битюцкий Артём)
  20. * Adrian Hunter
  21. */
  22. /*
  23. * This file implements most of the debugging stuff which is compiled in only
  24. * when it is enabled. But some debugging check functions are implemented in
  25. * corresponding subsystem, just because they are closely related and utilize
  26. * various local functions of those subsystems.
  27. */
  28. #define UBIFS_DBG_PRESERVE_UBI
  29. #include "ubifs.h"
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/math64.h>
  34. #ifdef CONFIG_UBIFS_FS_DEBUG
  35. DEFINE_SPINLOCK(dbg_lock);
  36. static char dbg_key_buf0[128];
  37. static char dbg_key_buf1[128];
  38. unsigned int ubifs_msg_flags;
  39. unsigned int ubifs_chk_flags;
  40. unsigned int ubifs_tst_flags;
  41. module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
  42. module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
  43. module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
  44. MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
  45. MODULE_PARM_DESC(debug_chks, "Debug check flags");
  46. MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
  47. static const char *get_key_fmt(int fmt)
  48. {
  49. switch (fmt) {
  50. case UBIFS_SIMPLE_KEY_FMT:
  51. return "simple";
  52. default:
  53. return "unknown/invalid format";
  54. }
  55. }
  56. static const char *get_key_hash(int hash)
  57. {
  58. switch (hash) {
  59. case UBIFS_KEY_HASH_R5:
  60. return "R5";
  61. case UBIFS_KEY_HASH_TEST:
  62. return "test";
  63. default:
  64. return "unknown/invalid name hash";
  65. }
  66. }
  67. static const char *get_key_type(int type)
  68. {
  69. switch (type) {
  70. case UBIFS_INO_KEY:
  71. return "inode";
  72. case UBIFS_DENT_KEY:
  73. return "direntry";
  74. case UBIFS_XENT_KEY:
  75. return "xentry";
  76. case UBIFS_DATA_KEY:
  77. return "data";
  78. case UBIFS_TRUN_KEY:
  79. return "truncate";
  80. default:
  81. return "unknown/invalid key";
  82. }
  83. }
  84. static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
  85. char *buffer)
  86. {
  87. char *p = buffer;
  88. int type = key_type(c, key);
  89. if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
  90. switch (type) {
  91. case UBIFS_INO_KEY:
  92. sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
  93. get_key_type(type));
  94. break;
  95. case UBIFS_DENT_KEY:
  96. case UBIFS_XENT_KEY:
  97. sprintf(p, "(%lu, %s, %#08x)",
  98. (unsigned long)key_inum(c, key),
  99. get_key_type(type), key_hash(c, key));
  100. break;
  101. case UBIFS_DATA_KEY:
  102. sprintf(p, "(%lu, %s, %u)",
  103. (unsigned long)key_inum(c, key),
  104. get_key_type(type), key_block(c, key));
  105. break;
  106. case UBIFS_TRUN_KEY:
  107. sprintf(p, "(%lu, %s)",
  108. (unsigned long)key_inum(c, key),
  109. get_key_type(type));
  110. break;
  111. default:
  112. sprintf(p, "(bad key type: %#08x, %#08x)",
  113. key->u32[0], key->u32[1]);
  114. }
  115. } else
  116. sprintf(p, "bad key format %d", c->key_fmt);
  117. }
  118. const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
  119. {
  120. /* dbg_lock must be held */
  121. sprintf_key(c, key, dbg_key_buf0);
  122. return dbg_key_buf0;
  123. }
  124. const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
  125. {
  126. /* dbg_lock must be held */
  127. sprintf_key(c, key, dbg_key_buf1);
  128. return dbg_key_buf1;
  129. }
  130. const char *dbg_ntype(int type)
  131. {
  132. switch (type) {
  133. case UBIFS_PAD_NODE:
  134. return "padding node";
  135. case UBIFS_SB_NODE:
  136. return "superblock node";
  137. case UBIFS_MST_NODE:
  138. return "master node";
  139. case UBIFS_REF_NODE:
  140. return "reference node";
  141. case UBIFS_INO_NODE:
  142. return "inode node";
  143. case UBIFS_DENT_NODE:
  144. return "direntry node";
  145. case UBIFS_XENT_NODE:
  146. return "xentry node";
  147. case UBIFS_DATA_NODE:
  148. return "data node";
  149. case UBIFS_TRUN_NODE:
  150. return "truncate node";
  151. case UBIFS_IDX_NODE:
  152. return "indexing node";
  153. case UBIFS_CS_NODE:
  154. return "commit start node";
  155. case UBIFS_ORPH_NODE:
  156. return "orphan node";
  157. default:
  158. return "unknown node";
  159. }
  160. }
  161. static const char *dbg_gtype(int type)
  162. {
  163. switch (type) {
  164. case UBIFS_NO_NODE_GROUP:
  165. return "no node group";
  166. case UBIFS_IN_NODE_GROUP:
  167. return "in node group";
  168. case UBIFS_LAST_OF_NODE_GROUP:
  169. return "last of node group";
  170. default:
  171. return "unknown";
  172. }
  173. }
  174. const char *dbg_cstate(int cmt_state)
  175. {
  176. switch (cmt_state) {
  177. case COMMIT_RESTING:
  178. return "commit resting";
  179. case COMMIT_BACKGROUND:
  180. return "background commit requested";
  181. case COMMIT_REQUIRED:
  182. return "commit required";
  183. case COMMIT_RUNNING_BACKGROUND:
  184. return "BACKGROUND commit running";
  185. case COMMIT_RUNNING_REQUIRED:
  186. return "commit running and required";
  187. case COMMIT_BROKEN:
  188. return "broken commit";
  189. default:
  190. return "unknown commit state";
  191. }
  192. }
  193. const char *dbg_jhead(int jhead)
  194. {
  195. switch (jhead) {
  196. case GCHD:
  197. return "0 (GC)";
  198. case BASEHD:
  199. return "1 (base)";
  200. case DATAHD:
  201. return "2 (data)";
  202. default:
  203. return "unknown journal head";
  204. }
  205. }
  206. static void dump_ch(const struct ubifs_ch *ch)
  207. {
  208. printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
  209. printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc));
  210. printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type,
  211. dbg_ntype(ch->node_type));
  212. printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type,
  213. dbg_gtype(ch->group_type));
  214. printk(KERN_DEBUG "\tsqnum %llu\n",
  215. (unsigned long long)le64_to_cpu(ch->sqnum));
  216. printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
  217. }
  218. void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
  219. {
  220. const struct ubifs_inode *ui = ubifs_inode(inode);
  221. printk(KERN_DEBUG "Dump in-memory inode:");
  222. printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
  223. printk(KERN_DEBUG "\tsize %llu\n",
  224. (unsigned long long)i_size_read(inode));
  225. printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink);
  226. printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid);
  227. printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid);
  228. printk(KERN_DEBUG "\tatime %u.%u\n",
  229. (unsigned int)inode->i_atime.tv_sec,
  230. (unsigned int)inode->i_atime.tv_nsec);
  231. printk(KERN_DEBUG "\tmtime %u.%u\n",
  232. (unsigned int)inode->i_mtime.tv_sec,
  233. (unsigned int)inode->i_mtime.tv_nsec);
  234. printk(KERN_DEBUG "\tctime %u.%u\n",
  235. (unsigned int)inode->i_ctime.tv_sec,
  236. (unsigned int)inode->i_ctime.tv_nsec);
  237. printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum);
  238. printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size);
  239. printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt);
  240. printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names);
  241. printk(KERN_DEBUG "\tdirty %u\n", ui->dirty);
  242. printk(KERN_DEBUG "\txattr %u\n", ui->xattr);
  243. printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr);
  244. printk(KERN_DEBUG "\tsynced_i_size %llu\n",
  245. (unsigned long long)ui->synced_i_size);
  246. printk(KERN_DEBUG "\tui_size %llu\n",
  247. (unsigned long long)ui->ui_size);
  248. printk(KERN_DEBUG "\tflags %d\n", ui->flags);
  249. printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type);
  250. printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
  251. printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
  252. printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
  253. }
  254. void dbg_dump_node(const struct ubifs_info *c, const void *node)
  255. {
  256. int i, n;
  257. union ubifs_key key;
  258. const struct ubifs_ch *ch = node;
  259. if (dbg_failure_mode)
  260. return;
  261. /* If the magic is incorrect, just hexdump the first bytes */
  262. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
  263. printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
  264. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  265. (void *)node, UBIFS_CH_SZ, 1);
  266. return;
  267. }
  268. spin_lock(&dbg_lock);
  269. dump_ch(node);
  270. switch (ch->node_type) {
  271. case UBIFS_PAD_NODE:
  272. {
  273. const struct ubifs_pad_node *pad = node;
  274. printk(KERN_DEBUG "\tpad_len %u\n",
  275. le32_to_cpu(pad->pad_len));
  276. break;
  277. }
  278. case UBIFS_SB_NODE:
  279. {
  280. const struct ubifs_sb_node *sup = node;
  281. unsigned int sup_flags = le32_to_cpu(sup->flags);
  282. printk(KERN_DEBUG "\tkey_hash %d (%s)\n",
  283. (int)sup->key_hash, get_key_hash(sup->key_hash));
  284. printk(KERN_DEBUG "\tkey_fmt %d (%s)\n",
  285. (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
  286. printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
  287. printk(KERN_DEBUG "\t big_lpt %u\n",
  288. !!(sup_flags & UBIFS_FLG_BIGLPT));
  289. printk(KERN_DEBUG "\tmin_io_size %u\n",
  290. le32_to_cpu(sup->min_io_size));
  291. printk(KERN_DEBUG "\tleb_size %u\n",
  292. le32_to_cpu(sup->leb_size));
  293. printk(KERN_DEBUG "\tleb_cnt %u\n",
  294. le32_to_cpu(sup->leb_cnt));
  295. printk(KERN_DEBUG "\tmax_leb_cnt %u\n",
  296. le32_to_cpu(sup->max_leb_cnt));
  297. printk(KERN_DEBUG "\tmax_bud_bytes %llu\n",
  298. (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
  299. printk(KERN_DEBUG "\tlog_lebs %u\n",
  300. le32_to_cpu(sup->log_lebs));
  301. printk(KERN_DEBUG "\tlpt_lebs %u\n",
  302. le32_to_cpu(sup->lpt_lebs));
  303. printk(KERN_DEBUG "\torph_lebs %u\n",
  304. le32_to_cpu(sup->orph_lebs));
  305. printk(KERN_DEBUG "\tjhead_cnt %u\n",
  306. le32_to_cpu(sup->jhead_cnt));
  307. printk(KERN_DEBUG "\tfanout %u\n",
  308. le32_to_cpu(sup->fanout));
  309. printk(KERN_DEBUG "\tlsave_cnt %u\n",
  310. le32_to_cpu(sup->lsave_cnt));
  311. printk(KERN_DEBUG "\tdefault_compr %u\n",
  312. (int)le16_to_cpu(sup->default_compr));
  313. printk(KERN_DEBUG "\trp_size %llu\n",
  314. (unsigned long long)le64_to_cpu(sup->rp_size));
  315. printk(KERN_DEBUG "\trp_uid %u\n",
  316. le32_to_cpu(sup->rp_uid));
  317. printk(KERN_DEBUG "\trp_gid %u\n",
  318. le32_to_cpu(sup->rp_gid));
  319. printk(KERN_DEBUG "\tfmt_version %u\n",
  320. le32_to_cpu(sup->fmt_version));
  321. printk(KERN_DEBUG "\ttime_gran %u\n",
  322. le32_to_cpu(sup->time_gran));
  323. printk(KERN_DEBUG "\tUUID %pUB\n",
  324. sup->uuid);
  325. break;
  326. }
  327. case UBIFS_MST_NODE:
  328. {
  329. const struct ubifs_mst_node *mst = node;
  330. printk(KERN_DEBUG "\thighest_inum %llu\n",
  331. (unsigned long long)le64_to_cpu(mst->highest_inum));
  332. printk(KERN_DEBUG "\tcommit number %llu\n",
  333. (unsigned long long)le64_to_cpu(mst->cmt_no));
  334. printk(KERN_DEBUG "\tflags %#x\n",
  335. le32_to_cpu(mst->flags));
  336. printk(KERN_DEBUG "\tlog_lnum %u\n",
  337. le32_to_cpu(mst->log_lnum));
  338. printk(KERN_DEBUG "\troot_lnum %u\n",
  339. le32_to_cpu(mst->root_lnum));
  340. printk(KERN_DEBUG "\troot_offs %u\n",
  341. le32_to_cpu(mst->root_offs));
  342. printk(KERN_DEBUG "\troot_len %u\n",
  343. le32_to_cpu(mst->root_len));
  344. printk(KERN_DEBUG "\tgc_lnum %u\n",
  345. le32_to_cpu(mst->gc_lnum));
  346. printk(KERN_DEBUG "\tihead_lnum %u\n",
  347. le32_to_cpu(mst->ihead_lnum));
  348. printk(KERN_DEBUG "\tihead_offs %u\n",
  349. le32_to_cpu(mst->ihead_offs));
  350. printk(KERN_DEBUG "\tindex_size %llu\n",
  351. (unsigned long long)le64_to_cpu(mst->index_size));
  352. printk(KERN_DEBUG "\tlpt_lnum %u\n",
  353. le32_to_cpu(mst->lpt_lnum));
  354. printk(KERN_DEBUG "\tlpt_offs %u\n",
  355. le32_to_cpu(mst->lpt_offs));
  356. printk(KERN_DEBUG "\tnhead_lnum %u\n",
  357. le32_to_cpu(mst->nhead_lnum));
  358. printk(KERN_DEBUG "\tnhead_offs %u\n",
  359. le32_to_cpu(mst->nhead_offs));
  360. printk(KERN_DEBUG "\tltab_lnum %u\n",
  361. le32_to_cpu(mst->ltab_lnum));
  362. printk(KERN_DEBUG "\tltab_offs %u\n",
  363. le32_to_cpu(mst->ltab_offs));
  364. printk(KERN_DEBUG "\tlsave_lnum %u\n",
  365. le32_to_cpu(mst->lsave_lnum));
  366. printk(KERN_DEBUG "\tlsave_offs %u\n",
  367. le32_to_cpu(mst->lsave_offs));
  368. printk(KERN_DEBUG "\tlscan_lnum %u\n",
  369. le32_to_cpu(mst->lscan_lnum));
  370. printk(KERN_DEBUG "\tleb_cnt %u\n",
  371. le32_to_cpu(mst->leb_cnt));
  372. printk(KERN_DEBUG "\tempty_lebs %u\n",
  373. le32_to_cpu(mst->empty_lebs));
  374. printk(KERN_DEBUG "\tidx_lebs %u\n",
  375. le32_to_cpu(mst->idx_lebs));
  376. printk(KERN_DEBUG "\ttotal_free %llu\n",
  377. (unsigned long long)le64_to_cpu(mst->total_free));
  378. printk(KERN_DEBUG "\ttotal_dirty %llu\n",
  379. (unsigned long long)le64_to_cpu(mst->total_dirty));
  380. printk(KERN_DEBUG "\ttotal_used %llu\n",
  381. (unsigned long long)le64_to_cpu(mst->total_used));
  382. printk(KERN_DEBUG "\ttotal_dead %llu\n",
  383. (unsigned long long)le64_to_cpu(mst->total_dead));
  384. printk(KERN_DEBUG "\ttotal_dark %llu\n",
  385. (unsigned long long)le64_to_cpu(mst->total_dark));
  386. break;
  387. }
  388. case UBIFS_REF_NODE:
  389. {
  390. const struct ubifs_ref_node *ref = node;
  391. printk(KERN_DEBUG "\tlnum %u\n",
  392. le32_to_cpu(ref->lnum));
  393. printk(KERN_DEBUG "\toffs %u\n",
  394. le32_to_cpu(ref->offs));
  395. printk(KERN_DEBUG "\tjhead %u\n",
  396. le32_to_cpu(ref->jhead));
  397. break;
  398. }
  399. case UBIFS_INO_NODE:
  400. {
  401. const struct ubifs_ino_node *ino = node;
  402. key_read(c, &ino->key, &key);
  403. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  404. printk(KERN_DEBUG "\tcreat_sqnum %llu\n",
  405. (unsigned long long)le64_to_cpu(ino->creat_sqnum));
  406. printk(KERN_DEBUG "\tsize %llu\n",
  407. (unsigned long long)le64_to_cpu(ino->size));
  408. printk(KERN_DEBUG "\tnlink %u\n",
  409. le32_to_cpu(ino->nlink));
  410. printk(KERN_DEBUG "\tatime %lld.%u\n",
  411. (long long)le64_to_cpu(ino->atime_sec),
  412. le32_to_cpu(ino->atime_nsec));
  413. printk(KERN_DEBUG "\tmtime %lld.%u\n",
  414. (long long)le64_to_cpu(ino->mtime_sec),
  415. le32_to_cpu(ino->mtime_nsec));
  416. printk(KERN_DEBUG "\tctime %lld.%u\n",
  417. (long long)le64_to_cpu(ino->ctime_sec),
  418. le32_to_cpu(ino->ctime_nsec));
  419. printk(KERN_DEBUG "\tuid %u\n",
  420. le32_to_cpu(ino->uid));
  421. printk(KERN_DEBUG "\tgid %u\n",
  422. le32_to_cpu(ino->gid));
  423. printk(KERN_DEBUG "\tmode %u\n",
  424. le32_to_cpu(ino->mode));
  425. printk(KERN_DEBUG "\tflags %#x\n",
  426. le32_to_cpu(ino->flags));
  427. printk(KERN_DEBUG "\txattr_cnt %u\n",
  428. le32_to_cpu(ino->xattr_cnt));
  429. printk(KERN_DEBUG "\txattr_size %u\n",
  430. le32_to_cpu(ino->xattr_size));
  431. printk(KERN_DEBUG "\txattr_names %u\n",
  432. le32_to_cpu(ino->xattr_names));
  433. printk(KERN_DEBUG "\tcompr_type %#x\n",
  434. (int)le16_to_cpu(ino->compr_type));
  435. printk(KERN_DEBUG "\tdata len %u\n",
  436. le32_to_cpu(ino->data_len));
  437. break;
  438. }
  439. case UBIFS_DENT_NODE:
  440. case UBIFS_XENT_NODE:
  441. {
  442. const struct ubifs_dent_node *dent = node;
  443. int nlen = le16_to_cpu(dent->nlen);
  444. key_read(c, &dent->key, &key);
  445. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  446. printk(KERN_DEBUG "\tinum %llu\n",
  447. (unsigned long long)le64_to_cpu(dent->inum));
  448. printk(KERN_DEBUG "\ttype %d\n", (int)dent->type);
  449. printk(KERN_DEBUG "\tnlen %d\n", nlen);
  450. printk(KERN_DEBUG "\tname ");
  451. if (nlen > UBIFS_MAX_NLEN)
  452. printk(KERN_DEBUG "(bad name length, not printing, "
  453. "bad or corrupted node)");
  454. else {
  455. for (i = 0; i < nlen && dent->name[i]; i++)
  456. printk(KERN_CONT "%c", dent->name[i]);
  457. }
  458. printk(KERN_CONT "\n");
  459. break;
  460. }
  461. case UBIFS_DATA_NODE:
  462. {
  463. const struct ubifs_data_node *dn = node;
  464. int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
  465. key_read(c, &dn->key, &key);
  466. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  467. printk(KERN_DEBUG "\tsize %u\n",
  468. le32_to_cpu(dn->size));
  469. printk(KERN_DEBUG "\tcompr_typ %d\n",
  470. (int)le16_to_cpu(dn->compr_type));
  471. printk(KERN_DEBUG "\tdata size %d\n",
  472. dlen);
  473. printk(KERN_DEBUG "\tdata:\n");
  474. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
  475. (void *)&dn->data, dlen, 0);
  476. break;
  477. }
  478. case UBIFS_TRUN_NODE:
  479. {
  480. const struct ubifs_trun_node *trun = node;
  481. printk(KERN_DEBUG "\tinum %u\n",
  482. le32_to_cpu(trun->inum));
  483. printk(KERN_DEBUG "\told_size %llu\n",
  484. (unsigned long long)le64_to_cpu(trun->old_size));
  485. printk(KERN_DEBUG "\tnew_size %llu\n",
  486. (unsigned long long)le64_to_cpu(trun->new_size));
  487. break;
  488. }
  489. case UBIFS_IDX_NODE:
  490. {
  491. const struct ubifs_idx_node *idx = node;
  492. n = le16_to_cpu(idx->child_cnt);
  493. printk(KERN_DEBUG "\tchild_cnt %d\n", n);
  494. printk(KERN_DEBUG "\tlevel %d\n",
  495. (int)le16_to_cpu(idx->level));
  496. printk(KERN_DEBUG "\tBranches:\n");
  497. for (i = 0; i < n && i < c->fanout - 1; i++) {
  498. const struct ubifs_branch *br;
  499. br = ubifs_idx_branch(c, idx, i);
  500. key_read(c, &br->key, &key);
  501. printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
  502. i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
  503. le32_to_cpu(br->len), DBGKEY(&key));
  504. }
  505. break;
  506. }
  507. case UBIFS_CS_NODE:
  508. break;
  509. case UBIFS_ORPH_NODE:
  510. {
  511. const struct ubifs_orph_node *orph = node;
  512. printk(KERN_DEBUG "\tcommit number %llu\n",
  513. (unsigned long long)
  514. le64_to_cpu(orph->cmt_no) & LLONG_MAX);
  515. printk(KERN_DEBUG "\tlast node flag %llu\n",
  516. (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
  517. n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
  518. printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
  519. for (i = 0; i < n; i++)
  520. printk(KERN_DEBUG "\t ino %llu\n",
  521. (unsigned long long)le64_to_cpu(orph->inos[i]));
  522. break;
  523. }
  524. default:
  525. printk(KERN_DEBUG "node type %d was not recognized\n",
  526. (int)ch->node_type);
  527. }
  528. spin_unlock(&dbg_lock);
  529. }
  530. void dbg_dump_budget_req(const struct ubifs_budget_req *req)
  531. {
  532. spin_lock(&dbg_lock);
  533. printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
  534. req->new_ino, req->dirtied_ino);
  535. printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n",
  536. req->new_ino_d, req->dirtied_ino_d);
  537. printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n",
  538. req->new_page, req->dirtied_page);
  539. printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n",
  540. req->new_dent, req->mod_dent);
  541. printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth);
  542. printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n",
  543. req->data_growth, req->dd_growth);
  544. spin_unlock(&dbg_lock);
  545. }
  546. void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
  547. {
  548. spin_lock(&dbg_lock);
  549. printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
  550. "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
  551. printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
  552. "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
  553. lst->total_dirty);
  554. printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
  555. "total_dead %lld\n", lst->total_used, lst->total_dark,
  556. lst->total_dead);
  557. spin_unlock(&dbg_lock);
  558. }
  559. void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
  560. {
  561. int i;
  562. struct rb_node *rb;
  563. struct ubifs_bud *bud;
  564. struct ubifs_gced_idx_leb *idx_gc;
  565. long long available, outstanding, free;
  566. spin_lock(&c->space_lock);
  567. spin_lock(&dbg_lock);
  568. printk(KERN_DEBUG "(pid %d) Budgeting info: data budget sum %lld, "
  569. "total budget sum %lld\n", current->pid,
  570. bi->data_growth + bi->dd_growth,
  571. bi->data_growth + bi->dd_growth + bi->idx_growth);
  572. printk(KERN_DEBUG "\tbudg_data_growth %lld, budg_dd_growth %lld, "
  573. "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth,
  574. bi->idx_growth);
  575. printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %llu, "
  576. "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz,
  577. bi->uncommitted_idx);
  578. printk(KERN_DEBUG "\tpage_budget %d, inode_budget %d, dent_budget %d\n",
  579. bi->page_budget, bi->inode_budget, bi->dent_budget);
  580. printk(KERN_DEBUG "\tnospace %u, nospace_rp %u\n",
  581. bi->nospace, bi->nospace_rp);
  582. printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
  583. c->dark_wm, c->dead_wm, c->max_idx_node_sz);
  584. if (bi != &c->bi)
  585. /*
  586. * If we are dumping saved budgeting data, do not print
  587. * additional information which is about the current state, not
  588. * the old one which corresponded to the saved budgeting data.
  589. */
  590. goto out_unlock;
  591. printk(KERN_DEBUG "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
  592. c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
  593. printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
  594. "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
  595. atomic_long_read(&c->dirty_zn_cnt),
  596. atomic_long_read(&c->clean_zn_cnt));
  597. printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
  598. c->gc_lnum, c->ihead_lnum);
  599. /* If we are in R/O mode, journal heads do not exist */
  600. if (c->jheads)
  601. for (i = 0; i < c->jhead_cnt; i++)
  602. printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
  603. dbg_jhead(c->jheads[i].wbuf.jhead),
  604. c->jheads[i].wbuf.lnum);
  605. for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
  606. bud = rb_entry(rb, struct ubifs_bud, rb);
  607. printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
  608. }
  609. list_for_each_entry(bud, &c->old_buds, list)
  610. printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
  611. list_for_each_entry(idx_gc, &c->idx_gc, list)
  612. printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
  613. idx_gc->lnum, idx_gc->unmap);
  614. printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
  615. /* Print budgeting predictions */
  616. available = ubifs_calc_available(c, c->bi.min_idx_lebs);
  617. outstanding = c->bi.data_growth + c->bi.dd_growth;
  618. free = ubifs_get_free_space_nolock(c);
  619. printk(KERN_DEBUG "Budgeting predictions:\n");
  620. printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
  621. available, outstanding, free);
  622. out_unlock:
  623. spin_unlock(&dbg_lock);
  624. spin_unlock(&c->space_lock);
  625. }
  626. void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
  627. {
  628. int i, spc, dark = 0, dead = 0;
  629. struct rb_node *rb;
  630. struct ubifs_bud *bud;
  631. spc = lp->free + lp->dirty;
  632. if (spc < c->dead_wm)
  633. dead = spc;
  634. else
  635. dark = ubifs_calc_dark(c, spc);
  636. if (lp->flags & LPROPS_INDEX)
  637. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  638. "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
  639. lp->dirty, c->leb_size - spc, spc, lp->flags);
  640. else
  641. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  642. "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
  643. "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
  644. c->leb_size - spc, spc, dark, dead,
  645. (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
  646. if (lp->flags & LPROPS_TAKEN) {
  647. if (lp->flags & LPROPS_INDEX)
  648. printk(KERN_CONT "index, taken");
  649. else
  650. printk(KERN_CONT "taken");
  651. } else {
  652. const char *s;
  653. if (lp->flags & LPROPS_INDEX) {
  654. switch (lp->flags & LPROPS_CAT_MASK) {
  655. case LPROPS_DIRTY_IDX:
  656. s = "dirty index";
  657. break;
  658. case LPROPS_FRDI_IDX:
  659. s = "freeable index";
  660. break;
  661. default:
  662. s = "index";
  663. }
  664. } else {
  665. switch (lp->flags & LPROPS_CAT_MASK) {
  666. case LPROPS_UNCAT:
  667. s = "not categorized";
  668. break;
  669. case LPROPS_DIRTY:
  670. s = "dirty";
  671. break;
  672. case LPROPS_FREE:
  673. s = "free";
  674. break;
  675. case LPROPS_EMPTY:
  676. s = "empty";
  677. break;
  678. case LPROPS_FREEABLE:
  679. s = "freeable";
  680. break;
  681. default:
  682. s = NULL;
  683. break;
  684. }
  685. }
  686. printk(KERN_CONT "%s", s);
  687. }
  688. for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
  689. bud = rb_entry(rb, struct ubifs_bud, rb);
  690. if (bud->lnum == lp->lnum) {
  691. int head = 0;
  692. for (i = 0; i < c->jhead_cnt; i++) {
  693. if (lp->lnum == c->jheads[i].wbuf.lnum) {
  694. printk(KERN_CONT ", jhead %s",
  695. dbg_jhead(i));
  696. head = 1;
  697. }
  698. }
  699. if (!head)
  700. printk(KERN_CONT ", bud of jhead %s",
  701. dbg_jhead(bud->jhead));
  702. }
  703. }
  704. if (lp->lnum == c->gc_lnum)
  705. printk(KERN_CONT ", GC LEB");
  706. printk(KERN_CONT ")\n");
  707. }
  708. void dbg_dump_lprops(struct ubifs_info *c)
  709. {
  710. int lnum, err;
  711. struct ubifs_lprops lp;
  712. struct ubifs_lp_stats lst;
  713. printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n",
  714. current->pid);
  715. ubifs_get_lp_stats(c, &lst);
  716. dbg_dump_lstats(&lst);
  717. for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
  718. err = ubifs_read_one_lp(c, lnum, &lp);
  719. if (err)
  720. ubifs_err("cannot read lprops for LEB %d", lnum);
  721. dbg_dump_lprop(c, &lp);
  722. }
  723. printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n",
  724. current->pid);
  725. }
  726. void dbg_dump_lpt_info(struct ubifs_info *c)
  727. {
  728. int i;
  729. spin_lock(&dbg_lock);
  730. printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid);
  731. printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz);
  732. printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz);
  733. printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz);
  734. printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz);
  735. printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz);
  736. printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt);
  737. printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght);
  738. printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt);
  739. printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt);
  740. printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt);
  741. printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt);
  742. printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt);
  743. printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits);
  744. printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
  745. printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
  746. printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits);
  747. printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits);
  748. printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits);
  749. printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
  750. printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
  751. c->nhead_lnum, c->nhead_offs);
  752. printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n",
  753. c->ltab_lnum, c->ltab_offs);
  754. if (c->big_lpt)
  755. printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
  756. c->lsave_lnum, c->lsave_offs);
  757. for (i = 0; i < c->lpt_lebs; i++)
  758. printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
  759. "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
  760. c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
  761. spin_unlock(&dbg_lock);
  762. }
  763. void dbg_dump_leb(const struct ubifs_info *c, int lnum)
  764. {
  765. struct ubifs_scan_leb *sleb;
  766. struct ubifs_scan_node *snod;
  767. void *buf;
  768. if (dbg_failure_mode)
  769. return;
  770. printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
  771. current->pid, lnum);
  772. buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
  773. if (!buf) {
  774. ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
  775. return;
  776. }
  777. sleb = ubifs_scan(c, lnum, 0, buf, 0);
  778. if (IS_ERR(sleb)) {
  779. ubifs_err("scan error %d", (int)PTR_ERR(sleb));
  780. goto out;
  781. }
  782. printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
  783. sleb->nodes_cnt, sleb->endpt);
  784. list_for_each_entry(snod, &sleb->nodes, list) {
  785. cond_resched();
  786. printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
  787. snod->offs, snod->len);
  788. dbg_dump_node(c, snod->node);
  789. }
  790. printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
  791. current->pid, lnum);
  792. ubifs_scan_destroy(sleb);
  793. out:
  794. vfree(buf);
  795. return;
  796. }
  797. void dbg_dump_znode(const struct ubifs_info *c,
  798. const struct ubifs_znode *znode)
  799. {
  800. int n;
  801. const struct ubifs_zbranch *zbr;
  802. spin_lock(&dbg_lock);
  803. if (znode->parent)
  804. zbr = &znode->parent->zbranch[znode->iip];
  805. else
  806. zbr = &c->zroot;
  807. printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
  808. " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
  809. zbr->len, znode->parent, znode->iip, znode->level,
  810. znode->child_cnt, znode->flags);
  811. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  812. spin_unlock(&dbg_lock);
  813. return;
  814. }
  815. printk(KERN_DEBUG "zbranches:\n");
  816. for (n = 0; n < znode->child_cnt; n++) {
  817. zbr = &znode->zbranch[n];
  818. if (znode->level > 0)
  819. printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
  820. "%s\n", n, zbr->znode, zbr->lnum,
  821. zbr->offs, zbr->len,
  822. DBGKEY(&zbr->key));
  823. else
  824. printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
  825. "%s\n", n, zbr->znode, zbr->lnum,
  826. zbr->offs, zbr->len,
  827. DBGKEY(&zbr->key));
  828. }
  829. spin_unlock(&dbg_lock);
  830. }
  831. void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
  832. {
  833. int i;
  834. printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n",
  835. current->pid, cat, heap->cnt);
  836. for (i = 0; i < heap->cnt; i++) {
  837. struct ubifs_lprops *lprops = heap->arr[i];
  838. printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
  839. "flags %d\n", i, lprops->lnum, lprops->hpos,
  840. lprops->free, lprops->dirty, lprops->flags);
  841. }
  842. printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid);
  843. }
  844. void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
  845. struct ubifs_nnode *parent, int iip)
  846. {
  847. int i;
  848. printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid);
  849. printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
  850. (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
  851. printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
  852. pnode->flags, iip, pnode->level, pnode->num);
  853. for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
  854. struct ubifs_lprops *lp = &pnode->lprops[i];
  855. printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
  856. i, lp->free, lp->dirty, lp->flags, lp->lnum);
  857. }
  858. }
  859. void dbg_dump_tnc(struct ubifs_info *c)
  860. {
  861. struct ubifs_znode *znode;
  862. int level;
  863. printk(KERN_DEBUG "\n");
  864. printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid);
  865. znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
  866. level = znode->level;
  867. printk(KERN_DEBUG "== Level %d ==\n", level);
  868. while (znode) {
  869. if (level != znode->level) {
  870. level = znode->level;
  871. printk(KERN_DEBUG "== Level %d ==\n", level);
  872. }
  873. dbg_dump_znode(c, znode);
  874. znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
  875. }
  876. printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid);
  877. }
  878. static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
  879. void *priv)
  880. {
  881. dbg_dump_znode(c, znode);
  882. return 0;
  883. }
  884. /**
  885. * dbg_dump_index - dump the on-flash index.
  886. * @c: UBIFS file-system description object
  887. *
  888. * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
  889. * which dumps only in-memory znodes and does not read znodes which from flash.
  890. */
  891. void dbg_dump_index(struct ubifs_info *c)
  892. {
  893. dbg_walk_index(c, NULL, dump_znode, NULL);
  894. }
  895. /**
  896. * dbg_save_space_info - save information about flash space.
  897. * @c: UBIFS file-system description object
  898. *
  899. * This function saves information about UBIFS free space, dirty space, etc, in
  900. * order to check it later.
  901. */
  902. void dbg_save_space_info(struct ubifs_info *c)
  903. {
  904. struct ubifs_debug_info *d = c->dbg;
  905. int freeable_cnt;
  906. spin_lock(&c->space_lock);
  907. memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
  908. memcpy(&d->saved_bi, &c->bi, sizeof(struct ubifs_budg_info));
  909. d->saved_idx_gc_cnt = c->idx_gc_cnt;
  910. /*
  911. * We use a dirty hack here and zero out @c->freeable_cnt, because it
  912. * affects the free space calculations, and UBIFS might not know about
  913. * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
  914. * only when we read their lprops, and we do this only lazily, upon the
  915. * need. So at any given point of time @c->freeable_cnt might be not
  916. * exactly accurate.
  917. *
  918. * Just one example about the issue we hit when we did not zero
  919. * @c->freeable_cnt.
  920. * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
  921. * amount of free space in @d->saved_free
  922. * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
  923. * information from flash, where we cache LEBs from various
  924. * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
  925. * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
  926. * -> 'ubifs_get_pnode()' -> 'update_cats()'
  927. * -> 'ubifs_add_to_cat()').
  928. * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
  929. * becomes %1.
  930. * 4. We calculate the amount of free space when the re-mount is
  931. * finished in 'dbg_check_space_info()' and it does not match
  932. * @d->saved_free.
  933. */
  934. freeable_cnt = c->freeable_cnt;
  935. c->freeable_cnt = 0;
  936. d->saved_free = ubifs_get_free_space_nolock(c);
  937. c->freeable_cnt = freeable_cnt;
  938. spin_unlock(&c->space_lock);
  939. }
  940. /**
  941. * dbg_check_space_info - check flash space information.
  942. * @c: UBIFS file-system description object
  943. *
  944. * This function compares current flash space information with the information
  945. * which was saved when the 'dbg_save_space_info()' function was called.
  946. * Returns zero if the information has not changed, and %-EINVAL it it has
  947. * changed.
  948. */
  949. int dbg_check_space_info(struct ubifs_info *c)
  950. {
  951. struct ubifs_debug_info *d = c->dbg;
  952. struct ubifs_lp_stats lst;
  953. long long free;
  954. int freeable_cnt;
  955. spin_lock(&c->space_lock);
  956. freeable_cnt = c->freeable_cnt;
  957. c->freeable_cnt = 0;
  958. free = ubifs_get_free_space_nolock(c);
  959. c->freeable_cnt = freeable_cnt;
  960. spin_unlock(&c->space_lock);
  961. if (free != d->saved_free) {
  962. ubifs_err("free space changed from %lld to %lld",
  963. d->saved_free, free);
  964. goto out;
  965. }
  966. return 0;
  967. out:
  968. ubifs_msg("saved lprops statistics dump");
  969. dbg_dump_lstats(&d->saved_lst);
  970. ubifs_msg("saved budgeting info dump");
  971. dbg_dump_budg(c, &d->saved_bi);
  972. ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
  973. ubifs_msg("current lprops statistics dump");
  974. ubifs_get_lp_stats(c, &lst);
  975. dbg_dump_lstats(&lst);
  976. ubifs_msg("current budgeting info dump");
  977. dbg_dump_budg(c, &c->bi);
  978. dump_stack();
  979. return -EINVAL;
  980. }
  981. /**
  982. * dbg_check_synced_i_size - check synchronized inode size.
  983. * @inode: inode to check
  984. *
  985. * If inode is clean, synchronized inode size has to be equivalent to current
  986. * inode size. This function has to be called only for locked inodes (@i_mutex
  987. * has to be locked). Returns %0 if synchronized inode size if correct, and
  988. * %-EINVAL if not.
  989. */
  990. int dbg_check_synced_i_size(struct inode *inode)
  991. {
  992. int err = 0;
  993. struct ubifs_inode *ui = ubifs_inode(inode);
  994. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  995. return 0;
  996. if (!S_ISREG(inode->i_mode))
  997. return 0;
  998. mutex_lock(&ui->ui_mutex);
  999. spin_lock(&ui->ui_lock);
  1000. if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
  1001. ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
  1002. "is clean", ui->ui_size, ui->synced_i_size);
  1003. ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
  1004. inode->i_mode, i_size_read(inode));
  1005. dbg_dump_stack();
  1006. err = -EINVAL;
  1007. }
  1008. spin_unlock(&ui->ui_lock);
  1009. mutex_unlock(&ui->ui_mutex);
  1010. return err;
  1011. }
  1012. /*
  1013. * dbg_check_dir - check directory inode size and link count.
  1014. * @c: UBIFS file-system description object
  1015. * @dir: the directory to calculate size for
  1016. * @size: the result is returned here
  1017. *
  1018. * This function makes sure that directory size and link count are correct.
  1019. * Returns zero in case of success and a negative error code in case of
  1020. * failure.
  1021. *
  1022. * Note, it is good idea to make sure the @dir->i_mutex is locked before
  1023. * calling this function.
  1024. */
  1025. int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
  1026. {
  1027. unsigned int nlink = 2;
  1028. union ubifs_key key;
  1029. struct ubifs_dent_node *dent, *pdent = NULL;
  1030. struct qstr nm = { .name = NULL };
  1031. loff_t size = UBIFS_INO_NODE_SZ;
  1032. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  1033. return 0;
  1034. if (!S_ISDIR(dir->i_mode))
  1035. return 0;
  1036. lowest_dent_key(c, &key, dir->i_ino);
  1037. while (1) {
  1038. int err;
  1039. dent = ubifs_tnc_next_ent(c, &key, &nm);
  1040. if (IS_ERR(dent)) {
  1041. err = PTR_ERR(dent);
  1042. if (err == -ENOENT)
  1043. break;
  1044. return err;
  1045. }
  1046. nm.name = dent->name;
  1047. nm.len = le16_to_cpu(dent->nlen);
  1048. size += CALC_DENT_SIZE(nm.len);
  1049. if (dent->type == UBIFS_ITYPE_DIR)
  1050. nlink += 1;
  1051. kfree(pdent);
  1052. pdent = dent;
  1053. key_read(c, &dent->key, &key);
  1054. }
  1055. kfree(pdent);
  1056. if (i_size_read(dir) != size) {
  1057. ubifs_err("directory inode %lu has size %llu, "
  1058. "but calculated size is %llu", dir->i_ino,
  1059. (unsigned long long)i_size_read(dir),
  1060. (unsigned long long)size);
  1061. dump_stack();
  1062. return -EINVAL;
  1063. }
  1064. if (dir->i_nlink != nlink) {
  1065. ubifs_err("directory inode %lu has nlink %u, but calculated "
  1066. "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
  1067. dump_stack();
  1068. return -EINVAL;
  1069. }
  1070. return 0;
  1071. }
  1072. /**
  1073. * dbg_check_key_order - make sure that colliding keys are properly ordered.
  1074. * @c: UBIFS file-system description object
  1075. * @zbr1: first zbranch
  1076. * @zbr2: following zbranch
  1077. *
  1078. * In UBIFS indexing B-tree colliding keys has to be sorted in binary order of
  1079. * names of the direntries/xentries which are referred by the keys. This
  1080. * function reads direntries/xentries referred by @zbr1 and @zbr2 and makes
  1081. * sure the name of direntry/xentry referred by @zbr1 is less than
  1082. * direntry/xentry referred by @zbr2. Returns zero if this is true, %1 if not,
  1083. * and a negative error code in case of failure.
  1084. */
  1085. static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
  1086. struct ubifs_zbranch *zbr2)
  1087. {
  1088. int err, nlen1, nlen2, cmp;
  1089. struct ubifs_dent_node *dent1, *dent2;
  1090. union ubifs_key key;
  1091. ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
  1092. dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1093. if (!dent1)
  1094. return -ENOMEM;
  1095. dent2 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1096. if (!dent2) {
  1097. err = -ENOMEM;
  1098. goto out_free;
  1099. }
  1100. err = ubifs_tnc_read_node(c, zbr1, dent1);
  1101. if (err)
  1102. goto out_free;
  1103. err = ubifs_validate_entry(c, dent1);
  1104. if (err)
  1105. goto out_free;
  1106. err = ubifs_tnc_read_node(c, zbr2, dent2);
  1107. if (err)
  1108. goto out_free;
  1109. err = ubifs_validate_entry(c, dent2);
  1110. if (err)
  1111. goto out_free;
  1112. /* Make sure node keys are the same as in zbranch */
  1113. err = 1;
  1114. key_read(c, &dent1->key, &key);
  1115. if (keys_cmp(c, &zbr1->key, &key)) {
  1116. dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
  1117. zbr1->offs, DBGKEY(&key));
  1118. dbg_err("but it should have key %s according to tnc",
  1119. DBGKEY(&zbr1->key));
  1120. dbg_dump_node(c, dent1);
  1121. goto out_free;
  1122. }
  1123. key_read(c, &dent2->key, &key);
  1124. if (keys_cmp(c, &zbr2->key, &key)) {
  1125. dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
  1126. zbr1->offs, DBGKEY(&key));
  1127. dbg_err("but it should have key %s according to tnc",
  1128. DBGKEY(&zbr2->key));
  1129. dbg_dump_node(c, dent2);
  1130. goto out_free;
  1131. }
  1132. nlen1 = le16_to_cpu(dent1->nlen);
  1133. nlen2 = le16_to_cpu(dent2->nlen);
  1134. cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
  1135. if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
  1136. err = 0;
  1137. goto out_free;
  1138. }
  1139. if (cmp == 0 && nlen1 == nlen2)
  1140. dbg_err("2 xent/dent nodes with the same name");
  1141. else
  1142. dbg_err("bad order of colliding key %s",
  1143. DBGKEY(&key));
  1144. ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
  1145. dbg_dump_node(c, dent1);
  1146. ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
  1147. dbg_dump_node(c, dent2);
  1148. out_free:
  1149. kfree(dent2);
  1150. kfree(dent1);
  1151. return err;
  1152. }
  1153. /**
  1154. * dbg_check_znode - check if znode is all right.
  1155. * @c: UBIFS file-system description object
  1156. * @zbr: zbranch which points to this znode
  1157. *
  1158. * This function makes sure that znode referred to by @zbr is all right.
  1159. * Returns zero if it is, and %-EINVAL if it is not.
  1160. */
  1161. static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
  1162. {
  1163. struct ubifs_znode *znode = zbr->znode;
  1164. struct ubifs_znode *zp = znode->parent;
  1165. int n, err, cmp;
  1166. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  1167. err = 1;
  1168. goto out;
  1169. }
  1170. if (znode->level < 0) {
  1171. err = 2;
  1172. goto out;
  1173. }
  1174. if (znode->iip < 0 || znode->iip >= c->fanout) {
  1175. err = 3;
  1176. goto out;
  1177. }
  1178. if (zbr->len == 0)
  1179. /* Only dirty zbranch may have no on-flash nodes */
  1180. if (!ubifs_zn_dirty(znode)) {
  1181. err = 4;
  1182. goto out;
  1183. }
  1184. if (ubifs_zn_dirty(znode)) {
  1185. /*
  1186. * If znode is dirty, its parent has to be dirty as well. The
  1187. * order of the operation is important, so we have to have
  1188. * memory barriers.
  1189. */
  1190. smp_mb();
  1191. if (zp && !ubifs_zn_dirty(zp)) {
  1192. /*
  1193. * The dirty flag is atomic and is cleared outside the
  1194. * TNC mutex, so znode's dirty flag may now have
  1195. * been cleared. The child is always cleared before the
  1196. * parent, so we just need to check again.
  1197. */
  1198. smp_mb();
  1199. if (ubifs_zn_dirty(znode)) {
  1200. err = 5;
  1201. goto out;
  1202. }
  1203. }
  1204. }
  1205. if (zp) {
  1206. const union ubifs_key *min, *max;
  1207. if (znode->level != zp->level - 1) {
  1208. err = 6;
  1209. goto out;
  1210. }
  1211. /* Make sure the 'parent' pointer in our znode is correct */
  1212. err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
  1213. if (!err) {
  1214. /* This zbranch does not exist in the parent */
  1215. err = 7;
  1216. goto out;
  1217. }
  1218. if (znode->iip >= zp->child_cnt) {
  1219. err = 8;
  1220. goto out;
  1221. }
  1222. if (znode->iip != n) {
  1223. /* This may happen only in case of collisions */
  1224. if (keys_cmp(c, &zp->zbranch[n].key,
  1225. &zp->zbranch[znode->iip].key)) {
  1226. err = 9;
  1227. goto out;
  1228. }
  1229. n = znode->iip;
  1230. }
  1231. /*
  1232. * Make sure that the first key in our znode is greater than or
  1233. * equal to the key in the pointing zbranch.
  1234. */
  1235. min = &zbr->key;
  1236. cmp = keys_cmp(c, min, &znode->zbranch[0].key);
  1237. if (cmp == 1) {
  1238. err = 10;
  1239. goto out;
  1240. }
  1241. if (n + 1 < zp->child_cnt) {
  1242. max = &zp->zbranch[n + 1].key;
  1243. /*
  1244. * Make sure the last key in our znode is less or
  1245. * equivalent than the key in the zbranch which goes
  1246. * after our pointing zbranch.
  1247. */
  1248. cmp = keys_cmp(c, max,
  1249. &znode->zbranch[znode->child_cnt - 1].key);
  1250. if (cmp == -1) {
  1251. err = 11;
  1252. goto out;
  1253. }
  1254. }
  1255. } else {
  1256. /* This may only be root znode */
  1257. if (zbr != &c->zroot) {
  1258. err = 12;
  1259. goto out;
  1260. }
  1261. }
  1262. /*
  1263. * Make sure that next key is greater or equivalent then the previous
  1264. * one.
  1265. */
  1266. for (n = 1; n < znode->child_cnt; n++) {
  1267. cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
  1268. &znode->zbranch[n].key);
  1269. if (cmp > 0) {
  1270. err = 13;
  1271. goto out;
  1272. }
  1273. if (cmp == 0) {
  1274. /* This can only be keys with colliding hash */
  1275. if (!is_hash_key(c, &znode->zbranch[n].key)) {
  1276. err = 14;
  1277. goto out;
  1278. }
  1279. if (znode->level != 0 || c->replaying)
  1280. continue;
  1281. /*
  1282. * Colliding keys should follow binary order of
  1283. * corresponding xentry/dentry names.
  1284. */
  1285. err = dbg_check_key_order(c, &znode->zbranch[n - 1],
  1286. &znode->zbranch[n]);
  1287. if (err < 0)
  1288. return err;
  1289. if (err) {
  1290. err = 15;
  1291. goto out;
  1292. }
  1293. }
  1294. }
  1295. for (n = 0; n < znode->child_cnt; n++) {
  1296. if (!znode->zbranch[n].znode &&
  1297. (znode->zbranch[n].lnum == 0 ||
  1298. znode->zbranch[n].len == 0)) {
  1299. err = 16;
  1300. goto out;
  1301. }
  1302. if (znode->zbranch[n].lnum != 0 &&
  1303. znode->zbranch[n].len == 0) {
  1304. err = 17;
  1305. goto out;
  1306. }
  1307. if (znode->zbranch[n].lnum == 0 &&
  1308. znode->zbranch[n].len != 0) {
  1309. err = 18;
  1310. goto out;
  1311. }
  1312. if (znode->zbranch[n].lnum == 0 &&
  1313. znode->zbranch[n].offs != 0) {
  1314. err = 19;
  1315. goto out;
  1316. }
  1317. if (znode->level != 0 && znode->zbranch[n].znode)
  1318. if (znode->zbranch[n].znode->parent != znode) {
  1319. err = 20;
  1320. goto out;
  1321. }
  1322. }
  1323. return 0;
  1324. out:
  1325. ubifs_err("failed, error %d", err);
  1326. ubifs_msg("dump of the znode");
  1327. dbg_dump_znode(c, znode);
  1328. if (zp) {
  1329. ubifs_msg("dump of the parent znode");
  1330. dbg_dump_znode(c, zp);
  1331. }
  1332. dump_stack();
  1333. return -EINVAL;
  1334. }
  1335. /**
  1336. * dbg_check_tnc - check TNC tree.
  1337. * @c: UBIFS file-system description object
  1338. * @extra: do extra checks that are possible at start commit
  1339. *
  1340. * This function traverses whole TNC tree and checks every znode. Returns zero
  1341. * if everything is all right and %-EINVAL if something is wrong with TNC.
  1342. */
  1343. int dbg_check_tnc(struct ubifs_info *c, int extra)
  1344. {
  1345. struct ubifs_znode *znode;
  1346. long clean_cnt = 0, dirty_cnt = 0;
  1347. int err, last;
  1348. if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
  1349. return 0;
  1350. ubifs_assert(mutex_is_locked(&c->tnc_mutex));
  1351. if (!c->zroot.znode)
  1352. return 0;
  1353. znode = ubifs_tnc_postorder_first(c->zroot.znode);
  1354. while (1) {
  1355. struct ubifs_znode *prev;
  1356. struct ubifs_zbranch *zbr;
  1357. if (!znode->parent)
  1358. zbr = &c->zroot;
  1359. else
  1360. zbr = &znode->parent->zbranch[znode->iip];
  1361. err = dbg_check_znode(c, zbr);
  1362. if (err)
  1363. return err;
  1364. if (extra) {
  1365. if (ubifs_zn_dirty(znode))
  1366. dirty_cnt += 1;
  1367. else
  1368. clean_cnt += 1;
  1369. }
  1370. prev = znode;
  1371. znode = ubifs_tnc_postorder_next(znode);
  1372. if (!znode)
  1373. break;
  1374. /*
  1375. * If the last key of this znode is equivalent to the first key
  1376. * of the next znode (collision), then check order of the keys.
  1377. */
  1378. last = prev->child_cnt - 1;
  1379. if (prev->level == 0 && znode->level == 0 && !c->replaying &&
  1380. !keys_cmp(c, &prev->zbranch[last].key,
  1381. &znode->zbranch[0].key)) {
  1382. err = dbg_check_key_order(c, &prev->zbranch[last],
  1383. &znode->zbranch[0]);
  1384. if (err < 0)
  1385. return err;
  1386. if (err) {
  1387. ubifs_msg("first znode");
  1388. dbg_dump_znode(c, prev);
  1389. ubifs_msg("second znode");
  1390. dbg_dump_znode(c, znode);
  1391. return -EINVAL;
  1392. }
  1393. }
  1394. }
  1395. if (extra) {
  1396. if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
  1397. ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
  1398. atomic_long_read(&c->clean_zn_cnt),
  1399. clean_cnt);
  1400. return -EINVAL;
  1401. }
  1402. if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
  1403. ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
  1404. atomic_long_read(&c->dirty_zn_cnt),
  1405. dirty_cnt);
  1406. return -EINVAL;
  1407. }
  1408. }
  1409. return 0;
  1410. }
  1411. /**
  1412. * dbg_walk_index - walk the on-flash index.
  1413. * @c: UBIFS file-system description object
  1414. * @leaf_cb: called for each leaf node
  1415. * @znode_cb: called for each indexing node
  1416. * @priv: private data which is passed to callbacks
  1417. *
  1418. * This function walks the UBIFS index and calls the @leaf_cb for each leaf
  1419. * node and @znode_cb for each indexing node. Returns zero in case of success
  1420. * and a negative error code in case of failure.
  1421. *
  1422. * It would be better if this function removed every znode it pulled to into
  1423. * the TNC, so that the behavior more closely matched the non-debugging
  1424. * behavior.
  1425. */
  1426. int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
  1427. dbg_znode_callback znode_cb, void *priv)
  1428. {
  1429. int err;
  1430. struct ubifs_zbranch *zbr;
  1431. struct ubifs_znode *znode, *child;
  1432. mutex_lock(&c->tnc_mutex);
  1433. /* If the root indexing node is not in TNC - pull it */
  1434. if (!c->zroot.znode) {
  1435. c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
  1436. if (IS_ERR(c->zroot.znode)) {
  1437. err = PTR_ERR(c->zroot.znode);
  1438. c->zroot.znode = NULL;
  1439. goto out_unlock;
  1440. }
  1441. }
  1442. /*
  1443. * We are going to traverse the indexing tree in the postorder manner.
  1444. * Go down and find the leftmost indexing node where we are going to
  1445. * start from.
  1446. */
  1447. znode = c->zroot.znode;
  1448. while (znode->level > 0) {
  1449. zbr = &znode->zbranch[0];
  1450. child = zbr->znode;
  1451. if (!child) {
  1452. child = ubifs_load_znode(c, zbr, znode, 0);
  1453. if (IS_ERR(child)) {
  1454. err = PTR_ERR(child);
  1455. goto out_unlock;
  1456. }
  1457. zbr->znode = child;
  1458. }
  1459. znode = child;
  1460. }
  1461. /* Iterate over all indexing nodes */
  1462. while (1) {
  1463. int idx;
  1464. cond_resched();
  1465. if (znode_cb) {
  1466. err = znode_cb(c, znode, priv);
  1467. if (err) {
  1468. ubifs_err("znode checking function returned "
  1469. "error %d", err);
  1470. dbg_dump_znode(c, znode);
  1471. goto out_dump;
  1472. }
  1473. }
  1474. if (leaf_cb && znode->level == 0) {
  1475. for (idx = 0; idx < znode->child_cnt; idx++) {
  1476. zbr = &znode->zbranch[idx];
  1477. err = leaf_cb(c, zbr, priv);
  1478. if (err) {
  1479. ubifs_err("leaf checking function "
  1480. "returned error %d, for leaf "
  1481. "at LEB %d:%d",
  1482. err, zbr->lnum, zbr->offs);
  1483. goto out_dump;
  1484. }
  1485. }
  1486. }
  1487. if (!znode->parent)
  1488. break;
  1489. idx = znode->iip + 1;
  1490. znode = znode->parent;
  1491. if (idx < znode->child_cnt) {
  1492. /* Switch to the next index in the parent */
  1493. zbr = &znode->zbranch[idx];
  1494. child = zbr->znode;
  1495. if (!child) {
  1496. child = ubifs_load_znode(c, zbr, znode, idx);
  1497. if (IS_ERR(child)) {
  1498. err = PTR_ERR(child);
  1499. goto out_unlock;
  1500. }
  1501. zbr->znode = child;
  1502. }
  1503. znode = child;
  1504. } else
  1505. /*
  1506. * This is the last child, switch to the parent and
  1507. * continue.
  1508. */
  1509. continue;
  1510. /* Go to the lowest leftmost znode in the new sub-tree */
  1511. while (znode->level > 0) {
  1512. zbr = &znode->zbranch[0];
  1513. child = zbr->znode;
  1514. if (!child) {
  1515. child = ubifs_load_znode(c, zbr, znode, 0);
  1516. if (IS_ERR(child)) {
  1517. err = PTR_ERR(child);
  1518. goto out_unlock;
  1519. }
  1520. zbr->znode = child;
  1521. }
  1522. znode = child;
  1523. }
  1524. }
  1525. mutex_unlock(&c->tnc_mutex);
  1526. return 0;
  1527. out_dump:
  1528. if (znode->parent)
  1529. zbr = &znode->parent->zbranch[znode->iip];
  1530. else
  1531. zbr = &c->zroot;
  1532. ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
  1533. dbg_dump_znode(c, znode);
  1534. out_unlock:
  1535. mutex_unlock(&c->tnc_mutex);
  1536. return err;
  1537. }
  1538. /**
  1539. * add_size - add znode size to partially calculated index size.
  1540. * @c: UBIFS file-system description object
  1541. * @znode: znode to add size for
  1542. * @priv: partially calculated index size
  1543. *
  1544. * This is a helper function for 'dbg_check_idx_size()' which is called for
  1545. * every indexing node and adds its size to the 'long long' variable pointed to
  1546. * by @priv.
  1547. */
  1548. static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
  1549. {
  1550. long long *idx_size = priv;
  1551. int add;
  1552. add = ubifs_idx_node_sz(c, znode->child_cnt);
  1553. add = ALIGN(add, 8);
  1554. *idx_size += add;
  1555. return 0;
  1556. }
  1557. /**
  1558. * dbg_check_idx_size - check index size.
  1559. * @c: UBIFS file-system description object
  1560. * @idx_size: size to check
  1561. *
  1562. * This function walks the UBIFS index, calculates its size and checks that the
  1563. * size is equivalent to @idx_size. Returns zero in case of success and a
  1564. * negative error code in case of failure.
  1565. */
  1566. int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
  1567. {
  1568. int err;
  1569. long long calc = 0;
  1570. if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
  1571. return 0;
  1572. err = dbg_walk_index(c, NULL, add_size, &calc);
  1573. if (err) {
  1574. ubifs_err("error %d while walking the index", err);
  1575. return err;
  1576. }
  1577. if (calc != idx_size) {
  1578. ubifs_err("index size check failed: calculated size is %lld, "
  1579. "should be %lld", calc, idx_size);
  1580. dump_stack();
  1581. return -EINVAL;
  1582. }
  1583. return 0;
  1584. }
  1585. /**
  1586. * struct fsck_inode - information about an inode used when checking the file-system.
  1587. * @rb: link in the RB-tree of inodes
  1588. * @inum: inode number
  1589. * @mode: inode type, permissions, etc
  1590. * @nlink: inode link count
  1591. * @xattr_cnt: count of extended attributes
  1592. * @references: how many directory/xattr entries refer this inode (calculated
  1593. * while walking the index)
  1594. * @calc_cnt: for directory inode count of child directories
  1595. * @size: inode size (read from on-flash inode)
  1596. * @xattr_sz: summary size of all extended attributes (read from on-flash
  1597. * inode)
  1598. * @calc_sz: for directories calculated directory size
  1599. * @calc_xcnt: count of extended attributes
  1600. * @calc_xsz: calculated summary size of all extended attributes
  1601. * @xattr_nms: sum of lengths of all extended attribute names belonging to this
  1602. * inode (read from on-flash inode)
  1603. * @calc_xnms: calculated sum of lengths of all extended attribute names
  1604. */
  1605. struct fsck_inode {
  1606. struct rb_node rb;
  1607. ino_t inum;
  1608. umode_t mode;
  1609. unsigned int nlink;
  1610. unsigned int xattr_cnt;
  1611. int references;
  1612. int calc_cnt;
  1613. long long size;
  1614. unsigned int xattr_sz;
  1615. long long calc_sz;
  1616. long long calc_xcnt;
  1617. long long calc_xsz;
  1618. unsigned int xattr_nms;
  1619. long long calc_xnms;
  1620. };
  1621. /**
  1622. * struct fsck_data - private FS checking information.
  1623. * @inodes: RB-tree of all inodes (contains @struct fsck_inode objects)
  1624. */
  1625. struct fsck_data {
  1626. struct rb_root inodes;
  1627. };
  1628. /**
  1629. * add_inode - add inode information to RB-tree of inodes.
  1630. * @c: UBIFS file-system description object
  1631. * @fsckd: FS checking information
  1632. * @ino: raw UBIFS inode to add
  1633. *
  1634. * This is a helper function for 'check_leaf()' which adds information about
  1635. * inode @ino to the RB-tree of inodes. Returns inode information pointer in
  1636. * case of success and a negative error code in case of failure.
  1637. */
  1638. static struct fsck_inode *add_inode(struct ubifs_info *c,
  1639. struct fsck_data *fsckd,
  1640. struct ubifs_ino_node *ino)
  1641. {
  1642. struct rb_node **p, *parent = NULL;
  1643. struct fsck_inode *fscki;
  1644. ino_t inum = key_inum_flash(c, &ino->key);
  1645. p = &fsckd->inodes.rb_node;
  1646. while (*p) {
  1647. parent = *p;
  1648. fscki = rb_entry(parent, struct fsck_inode, rb);
  1649. if (inum < fscki->inum)
  1650. p = &(*p)->rb_left;
  1651. else if (inum > fscki->inum)
  1652. p = &(*p)->rb_right;
  1653. else
  1654. return fscki;
  1655. }
  1656. if (inum > c->highest_inum) {
  1657. ubifs_err("too high inode number, max. is %lu",
  1658. (unsigned long)c->highest_inum);
  1659. return ERR_PTR(-EINVAL);
  1660. }
  1661. fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
  1662. if (!fscki)
  1663. return ERR_PTR(-ENOMEM);
  1664. fscki->inum = inum;
  1665. fscki->nlink = le32_to_cpu(ino->nlink);
  1666. fscki->size = le64_to_cpu(ino->size);
  1667. fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
  1668. fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
  1669. fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
  1670. fscki->mode = le32_to_cpu(ino->mode);
  1671. if (S_ISDIR(fscki->mode)) {
  1672. fscki->calc_sz = UBIFS_INO_NODE_SZ;
  1673. fscki->calc_cnt = 2;
  1674. }
  1675. rb_link_node(&fscki->rb, parent, p);
  1676. rb_insert_color(&fscki->rb, &fsckd->inodes);
  1677. return fscki;
  1678. }
  1679. /**
  1680. * search_inode - search inode in the RB-tree of inodes.
  1681. * @fsckd: FS checking information
  1682. * @inum: inode number to search
  1683. *
  1684. * This is a helper function for 'check_leaf()' which searches inode @inum in
  1685. * the RB-tree of inodes and returns an inode information pointer or %NULL if
  1686. * the inode was not found.
  1687. */
  1688. static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
  1689. {
  1690. struct rb_node *p;
  1691. struct fsck_inode *fscki;
  1692. p = fsckd->inodes.rb_node;
  1693. while (p) {
  1694. fscki = rb_entry(p, struct fsck_inode, rb);
  1695. if (inum < fscki->inum)
  1696. p = p->rb_left;
  1697. else if (inum > fscki->inum)
  1698. p = p->rb_right;
  1699. else
  1700. return fscki;
  1701. }
  1702. return NULL;
  1703. }
  1704. /**
  1705. * read_add_inode - read inode node and add it to RB-tree of inodes.
  1706. * @c: UBIFS file-system description object
  1707. * @fsckd: FS checking information
  1708. * @inum: inode number to read
  1709. *
  1710. * This is a helper function for 'check_leaf()' which finds inode node @inum in
  1711. * the index, reads it, and adds it to the RB-tree of inodes. Returns inode
  1712. * information pointer in case of success and a negative error code in case of
  1713. * failure.
  1714. */
  1715. static struct fsck_inode *read_add_inode(struct ubifs_info *c,
  1716. struct fsck_data *fsckd, ino_t inum)
  1717. {
  1718. int n, err;
  1719. union ubifs_key key;
  1720. struct ubifs_znode *znode;
  1721. struct ubifs_zbranch *zbr;
  1722. struct ubifs_ino_node *ino;
  1723. struct fsck_inode *fscki;
  1724. fscki = search_inode(fsckd, inum);
  1725. if (fscki)
  1726. return fscki;
  1727. ino_key_init(c, &key, inum);
  1728. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1729. if (!err) {
  1730. ubifs_err("inode %lu not found in index", (unsigned long)inum);
  1731. return ERR_PTR(-ENOENT);
  1732. } else if (err < 0) {
  1733. ubifs_err("error %d while looking up inode %lu",
  1734. err, (unsigned long)inum);
  1735. return ERR_PTR(err);
  1736. }
  1737. zbr = &znode->zbranch[n];
  1738. if (zbr->len < UBIFS_INO_NODE_SZ) {
  1739. ubifs_err("bad node %lu node length %d",
  1740. (unsigned long)inum, zbr->len);
  1741. return ERR_PTR(-EINVAL);
  1742. }
  1743. ino = kmalloc(zbr->len, GFP_NOFS);
  1744. if (!ino)
  1745. return ERR_PTR(-ENOMEM);
  1746. err = ubifs_tnc_read_node(c, zbr, ino);
  1747. if (err) {
  1748. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1749. zbr->lnum, zbr->offs, err);
  1750. kfree(ino);
  1751. return ERR_PTR(err);
  1752. }
  1753. fscki = add_inode(c, fsckd, ino);
  1754. kfree(ino);
  1755. if (IS_ERR(fscki)) {
  1756. ubifs_err("error %ld while adding inode %lu node",
  1757. PTR_ERR(fscki), (unsigned long)inum);
  1758. return fscki;
  1759. }
  1760. return fscki;
  1761. }
  1762. /**
  1763. * check_leaf - check leaf node.
  1764. * @c: UBIFS file-system description object
  1765. * @zbr: zbranch of the leaf node to check
  1766. * @priv: FS checking information
  1767. *
  1768. * This is a helper function for 'dbg_check_filesystem()' which is called for
  1769. * every single leaf node while walking the indexing tree. It checks that the
  1770. * leaf node referred from the indexing tree exists, has correct CRC, and does
  1771. * some other basic validation. This function is also responsible for building
  1772. * an RB-tree of inodes - it adds all inodes into the RB-tree. It also
  1773. * calculates reference count, size, etc for each inode in order to later
  1774. * compare them to the information stored inside the inodes and detect possible
  1775. * inconsistencies. Returns zero in case of success and a negative error code
  1776. * in case of failure.
  1777. */
  1778. static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  1779. void *priv)
  1780. {
  1781. ino_t inum;
  1782. void *node;
  1783. struct ubifs_ch *ch;
  1784. int err, type = key_type(c, &zbr->key);
  1785. struct fsck_inode *fscki;
  1786. if (zbr->len < UBIFS_CH_SZ) {
  1787. ubifs_err("bad leaf length %d (LEB %d:%d)",
  1788. zbr->len, zbr->lnum, zbr->offs);
  1789. return -EINVAL;
  1790. }
  1791. node = kmalloc(zbr->len, GFP_NOFS);
  1792. if (!node)
  1793. return -ENOMEM;
  1794. err = ubifs_tnc_read_node(c, zbr, node);
  1795. if (err) {
  1796. ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
  1797. zbr->lnum, zbr->offs, err);
  1798. goto out_free;
  1799. }
  1800. /* If this is an inode node, add it to RB-tree of inodes */
  1801. if (type == UBIFS_INO_KEY) {
  1802. fscki = add_inode(c, priv, node);
  1803. if (IS_ERR(fscki)) {
  1804. err = PTR_ERR(fscki);
  1805. ubifs_err("error %d while adding inode node", err);
  1806. goto out_dump;
  1807. }
  1808. goto out;
  1809. }
  1810. if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
  1811. type != UBIFS_DATA_KEY) {
  1812. ubifs_err("unexpected node type %d at LEB %d:%d",
  1813. type, zbr->lnum, zbr->offs);
  1814. err = -EINVAL;
  1815. goto out_free;
  1816. }
  1817. ch = node;
  1818. if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
  1819. ubifs_err("too high sequence number, max. is %llu",
  1820. c->max_sqnum);
  1821. err = -EINVAL;
  1822. goto out_dump;
  1823. }
  1824. if (type == UBIFS_DATA_KEY) {
  1825. long long blk_offs;
  1826. struct ubifs_data_node *dn = node;
  1827. /*
  1828. * Search the inode node this data node belongs to and insert
  1829. * it to the RB-tree of inodes.
  1830. */
  1831. inum = key_inum_flash(c, &dn->key);
  1832. fscki = read_add_inode(c, priv, inum);
  1833. if (IS_ERR(fscki)) {
  1834. err = PTR_ERR(fscki);
  1835. ubifs_err("error %d while processing data node and "
  1836. "trying to find inode node %lu",
  1837. err, (unsigned long)inum);
  1838. goto out_dump;
  1839. }
  1840. /* Make sure the data node is within inode size */
  1841. blk_offs = key_block_flash(c, &dn->key);
  1842. blk_offs <<= UBIFS_BLOCK_SHIFT;
  1843. blk_offs += le32_to_cpu(dn->size);
  1844. if (blk_offs > fscki->size) {
  1845. ubifs_err("data node at LEB %d:%d is not within inode "
  1846. "size %lld", zbr->lnum, zbr->offs,
  1847. fscki->size);
  1848. err = -EINVAL;
  1849. goto out_dump;
  1850. }
  1851. } else {
  1852. int nlen;
  1853. struct ubifs_dent_node *dent = node;
  1854. struct fsck_inode *fscki1;
  1855. err = ubifs_validate_entry(c, dent);
  1856. if (err)
  1857. goto out_dump;
  1858. /*
  1859. * Search the inode node this entry refers to and the parent
  1860. * inode node and insert them to the RB-tree of inodes.
  1861. */
  1862. inum = le64_to_cpu(dent->inum);
  1863. fscki = read_add_inode(c, priv, inum);
  1864. if (IS_ERR(fscki)) {
  1865. err = PTR_ERR(fscki);
  1866. ubifs_err("error %d while processing entry node and "
  1867. "trying to find inode node %lu",
  1868. err, (unsigned long)inum);
  1869. goto out_dump;
  1870. }
  1871. /* Count how many direntries or xentries refers this inode */
  1872. fscki->references += 1;
  1873. inum = key_inum_flash(c, &dent->key);
  1874. fscki1 = read_add_inode(c, priv, inum);
  1875. if (IS_ERR(fscki1)) {
  1876. err = PTR_ERR(fscki1);
  1877. ubifs_err("error %d while processing entry node and "
  1878. "trying to find parent inode node %lu",
  1879. err, (unsigned long)inum);
  1880. goto out_dump;
  1881. }
  1882. nlen = le16_to_cpu(dent->nlen);
  1883. if (type == UBIFS_XENT_KEY) {
  1884. fscki1->calc_xcnt += 1;
  1885. fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
  1886. fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
  1887. fscki1->calc_xnms += nlen;
  1888. } else {
  1889. fscki1->calc_sz += CALC_DENT_SIZE(nlen);
  1890. if (dent->type == UBIFS_ITYPE_DIR)
  1891. fscki1->calc_cnt += 1;
  1892. }
  1893. }
  1894. out:
  1895. kfree(node);
  1896. return 0;
  1897. out_dump:
  1898. ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
  1899. dbg_dump_node(c, node);
  1900. out_free:
  1901. kfree(node);
  1902. return err;
  1903. }
  1904. /**
  1905. * free_inodes - free RB-tree of inodes.
  1906. * @fsckd: FS checking information
  1907. */
  1908. static void free_inodes(struct fsck_data *fsckd)
  1909. {
  1910. struct rb_node *this = fsckd->inodes.rb_node;
  1911. struct fsck_inode *fscki;
  1912. while (this) {
  1913. if (this->rb_left)
  1914. this = this->rb_left;
  1915. else if (this->rb_right)
  1916. this = this->rb_right;
  1917. else {
  1918. fscki = rb_entry(this, struct fsck_inode, rb);
  1919. this = rb_parent(this);
  1920. if (this) {
  1921. if (this->rb_left == &fscki->rb)
  1922. this->rb_left = NULL;
  1923. else
  1924. this->rb_right = NULL;
  1925. }
  1926. kfree(fscki);
  1927. }
  1928. }
  1929. }
  1930. /**
  1931. * check_inodes - checks all inodes.
  1932. * @c: UBIFS file-system description object
  1933. * @fsckd: FS checking information
  1934. *
  1935. * This is a helper function for 'dbg_check_filesystem()' which walks the
  1936. * RB-tree of inodes after the index scan has been finished, and checks that
  1937. * inode nlink, size, etc are correct. Returns zero if inodes are fine,
  1938. * %-EINVAL if not, and a negative error code in case of failure.
  1939. */
  1940. static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
  1941. {
  1942. int n, err;
  1943. union ubifs_key key;
  1944. struct ubifs_znode *znode;
  1945. struct ubifs_zbranch *zbr;
  1946. struct ubifs_ino_node *ino;
  1947. struct fsck_inode *fscki;
  1948. struct rb_node *this = rb_first(&fsckd->inodes);
  1949. while (this) {
  1950. fscki = rb_entry(this, struct fsck_inode, rb);
  1951. this = rb_next(this);
  1952. if (S_ISDIR(fscki->mode)) {
  1953. /*
  1954. * Directories have to have exactly one reference (they
  1955. * cannot have hardlinks), although root inode is an
  1956. * exception.
  1957. */
  1958. if (fscki->inum != UBIFS_ROOT_INO &&
  1959. fscki->references != 1) {
  1960. ubifs_err("directory inode %lu has %d "
  1961. "direntries which refer it, but "
  1962. "should be 1",
  1963. (unsigned long)fscki->inum,
  1964. fscki->references);
  1965. goto out_dump;
  1966. }
  1967. if (fscki->inum == UBIFS_ROOT_INO &&
  1968. fscki->references != 0) {
  1969. ubifs_err("root inode %lu has non-zero (%d) "
  1970. "direntries which refer it",
  1971. (unsigned long)fscki->inum,
  1972. fscki->references);
  1973. goto out_dump;
  1974. }
  1975. if (fscki->calc_sz != fscki->size) {
  1976. ubifs_err("directory inode %lu size is %lld, "
  1977. "but calculated size is %lld",
  1978. (unsigned long)fscki->inum,
  1979. fscki->size, fscki->calc_sz);
  1980. goto out_dump;
  1981. }
  1982. if (fscki->calc_cnt != fscki->nlink) {
  1983. ubifs_err("directory inode %lu nlink is %d, "
  1984. "but calculated nlink is %d",
  1985. (unsigned long)fscki->inum,
  1986. fscki->nlink, fscki->calc_cnt);
  1987. goto out_dump;
  1988. }
  1989. } else {
  1990. if (fscki->references != fscki->nlink) {
  1991. ubifs_err("inode %lu nlink is %d, but "
  1992. "calculated nlink is %d",
  1993. (unsigned long)fscki->inum,
  1994. fscki->nlink, fscki->references);
  1995. goto out_dump;
  1996. }
  1997. }
  1998. if (fscki->xattr_sz != fscki->calc_xsz) {
  1999. ubifs_err("inode %lu has xattr size %u, but "
  2000. "calculated size is %lld",
  2001. (unsigned long)fscki->inum, fscki->xattr_sz,
  2002. fscki->calc_xsz);
  2003. goto out_dump;
  2004. }
  2005. if (fscki->xattr_cnt != fscki->calc_xcnt) {
  2006. ubifs_err("inode %lu has %u xattrs, but "
  2007. "calculated count is %lld",
  2008. (unsigned long)fscki->inum,
  2009. fscki->xattr_cnt, fscki->calc_xcnt);
  2010. goto out_dump;
  2011. }
  2012. if (fscki->xattr_nms != fscki->calc_xnms) {
  2013. ubifs_err("inode %lu has xattr names' size %u, but "
  2014. "calculated names' size is %lld",
  2015. (unsigned long)fscki->inum, fscki->xattr_nms,
  2016. fscki->calc_xnms);
  2017. goto out_dump;
  2018. }
  2019. }
  2020. return 0;
  2021. out_dump:
  2022. /* Read the bad inode and dump it */
  2023. ino_key_init(c, &key, fscki->inum);
  2024. err = ubifs_lookup_level0(c, &key, &znode, &n);
  2025. if (!err) {
  2026. ubifs_err("inode %lu not found in index",
  2027. (unsigned long)fscki->inum);
  2028. return -ENOENT;
  2029. } else if (err < 0) {
  2030. ubifs_err("error %d while looking up inode %lu",
  2031. err, (unsigned long)fscki->inum);
  2032. return err;
  2033. }
  2034. zbr = &znode->zbranch[n];
  2035. ino = kmalloc(zbr->len, GFP_NOFS);
  2036. if (!ino)
  2037. return -ENOMEM;
  2038. err = ubifs_tnc_read_node(c, zbr, ino);
  2039. if (err) {
  2040. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  2041. zbr->lnum, zbr->offs, err);
  2042. kfree(ino);
  2043. return err;
  2044. }
  2045. ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
  2046. (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
  2047. dbg_dump_node(c, ino);
  2048. kfree(ino);
  2049. return -EINVAL;
  2050. }
  2051. /**
  2052. * dbg_check_filesystem - check the file-system.
  2053. * @c: UBIFS file-system description object
  2054. *
  2055. * This function checks the file system, namely:
  2056. * o makes sure that all leaf nodes exist and their CRCs are correct;
  2057. * o makes sure inode nlink, size, xattr size/count are correct (for all
  2058. * inodes).
  2059. *
  2060. * The function reads whole indexing tree and all nodes, so it is pretty
  2061. * heavy-weight. Returns zero if the file-system is consistent, %-EINVAL if
  2062. * not, and a negative error code in case of failure.
  2063. */
  2064. int dbg_check_filesystem(struct ubifs_info *c)
  2065. {
  2066. int err;
  2067. struct fsck_data fsckd;
  2068. if (!(ubifs_chk_flags & UBIFS_CHK_FS))
  2069. return 0;
  2070. fsckd.inodes = RB_ROOT;
  2071. err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
  2072. if (err)
  2073. goto out_free;
  2074. err = check_inodes(c, &fsckd);
  2075. if (err)
  2076. goto out_free;
  2077. free_inodes(&fsckd);
  2078. return 0;
  2079. out_free:
  2080. ubifs_err("file-system check failed with error %d", err);
  2081. dump_stack();
  2082. free_inodes(&fsckd);
  2083. return err;
  2084. }
  2085. /**
  2086. * dbg_check_data_nodes_order - check that list of data nodes is sorted.
  2087. * @c: UBIFS file-system description object
  2088. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2089. *
  2090. * This function returns zero if the list of data nodes is sorted correctly,
  2091. * and %-EINVAL if not.
  2092. */
  2093. int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
  2094. {
  2095. struct list_head *cur;
  2096. struct ubifs_scan_node *sa, *sb;
  2097. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2098. return 0;
  2099. for (cur = head->next; cur->next != head; cur = cur->next) {
  2100. ino_t inuma, inumb;
  2101. uint32_t blka, blkb;
  2102. cond_resched();
  2103. sa = container_of(cur, struct ubifs_scan_node, list);
  2104. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2105. if (sa->type != UBIFS_DATA_NODE) {
  2106. ubifs_err("bad node type %d", sa->type);
  2107. dbg_dump_node(c, sa->node);
  2108. return -EINVAL;
  2109. }
  2110. if (sb->type != UBIFS_DATA_NODE) {
  2111. ubifs_err("bad node type %d", sb->type);
  2112. dbg_dump_node(c, sb->node);
  2113. return -EINVAL;
  2114. }
  2115. inuma = key_inum(c, &sa->key);
  2116. inumb = key_inum(c, &sb->key);
  2117. if (inuma < inumb)
  2118. continue;
  2119. if (inuma > inumb) {
  2120. ubifs_err("larger inum %lu goes before inum %lu",
  2121. (unsigned long)inuma, (unsigned long)inumb);
  2122. goto error_dump;
  2123. }
  2124. blka = key_block(c, &sa->key);
  2125. blkb = key_block(c, &sb->key);
  2126. if (blka > blkb) {
  2127. ubifs_err("larger block %u goes before %u", blka, blkb);
  2128. goto error_dump;
  2129. }
  2130. if (blka == blkb) {
  2131. ubifs_err("two data nodes for the same block");
  2132. goto error_dump;
  2133. }
  2134. }
  2135. return 0;
  2136. error_dump:
  2137. dbg_dump_node(c, sa->node);
  2138. dbg_dump_node(c, sb->node);
  2139. return -EINVAL;
  2140. }
  2141. /**
  2142. * dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
  2143. * @c: UBIFS file-system description object
  2144. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2145. *
  2146. * This function returns zero if the list of non-data nodes is sorted correctly,
  2147. * and %-EINVAL if not.
  2148. */
  2149. int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
  2150. {
  2151. struct list_head *cur;
  2152. struct ubifs_scan_node *sa, *sb;
  2153. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2154. return 0;
  2155. for (cur = head->next; cur->next != head; cur = cur->next) {
  2156. ino_t inuma, inumb;
  2157. uint32_t hasha, hashb;
  2158. cond_resched();
  2159. sa = container_of(cur, struct ubifs_scan_node, list);
  2160. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2161. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2162. sa->type != UBIFS_XENT_NODE) {
  2163. ubifs_err("bad node type %d", sa->type);
  2164. dbg_dump_node(c, sa->node);
  2165. return -EINVAL;
  2166. }
  2167. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2168. sa->type != UBIFS_XENT_NODE) {
  2169. ubifs_err("bad node type %d", sb->type);
  2170. dbg_dump_node(c, sb->node);
  2171. return -EINVAL;
  2172. }
  2173. if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2174. ubifs_err("non-inode node goes before inode node");
  2175. goto error_dump;
  2176. }
  2177. if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
  2178. continue;
  2179. if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2180. /* Inode nodes are sorted in descending size order */
  2181. if (sa->len < sb->len) {
  2182. ubifs_err("smaller inode node goes first");
  2183. goto error_dump;
  2184. }
  2185. continue;
  2186. }
  2187. /*
  2188. * This is either a dentry or xentry, which should be sorted in
  2189. * ascending (parent ino, hash) order.
  2190. */
  2191. inuma = key_inum(c, &sa->key);
  2192. inumb = key_inum(c, &sb->key);
  2193. if (inuma < inumb)
  2194. continue;
  2195. if (inuma > inumb) {
  2196. ubifs_err("larger inum %lu goes before inum %lu",
  2197. (unsigned long)inuma, (unsigned long)inumb);
  2198. goto error_dump;
  2199. }
  2200. hasha = key_block(c, &sa->key);
  2201. hashb = key_block(c, &sb->key);
  2202. if (hasha > hashb) {
  2203. ubifs_err("larger hash %u goes before %u",
  2204. hasha, hashb);
  2205. goto error_dump;
  2206. }
  2207. }
  2208. return 0;
  2209. error_dump:
  2210. ubifs_msg("dumping first node");
  2211. dbg_dump_node(c, sa->node);
  2212. ubifs_msg("dumping second node");
  2213. dbg_dump_node(c, sb->node);
  2214. return -EINVAL;
  2215. return 0;
  2216. }
  2217. int dbg_force_in_the_gaps(void)
  2218. {
  2219. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2220. return 0;
  2221. return !(random32() & 7);
  2222. }
  2223. /* Failure mode for recovery testing */
  2224. #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
  2225. struct failure_mode_info {
  2226. struct list_head list;
  2227. struct ubifs_info *c;
  2228. };
  2229. static LIST_HEAD(fmi_list);
  2230. static DEFINE_SPINLOCK(fmi_lock);
  2231. static unsigned int next;
  2232. static int simple_rand(void)
  2233. {
  2234. if (next == 0)
  2235. next = current->pid;
  2236. next = next * 1103515245 + 12345;
  2237. return (next >> 16) & 32767;
  2238. }
  2239. static void failure_mode_init(struct ubifs_info *c)
  2240. {
  2241. struct failure_mode_info *fmi;
  2242. fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
  2243. if (!fmi) {
  2244. ubifs_err("Failed to register failure mode - no memory");
  2245. return;
  2246. }
  2247. fmi->c = c;
  2248. spin_lock(&fmi_lock);
  2249. list_add_tail(&fmi->list, &fmi_list);
  2250. spin_unlock(&fmi_lock);
  2251. }
  2252. static void failure_mode_exit(struct ubifs_info *c)
  2253. {
  2254. struct failure_mode_info *fmi, *tmp;
  2255. spin_lock(&fmi_lock);
  2256. list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
  2257. if (fmi->c == c) {
  2258. list_del(&fmi->list);
  2259. kfree(fmi);
  2260. }
  2261. spin_unlock(&fmi_lock);
  2262. }
  2263. static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
  2264. {
  2265. struct failure_mode_info *fmi;
  2266. spin_lock(&fmi_lock);
  2267. list_for_each_entry(fmi, &fmi_list, list)
  2268. if (fmi->c->ubi == desc) {
  2269. struct ubifs_info *c = fmi->c;
  2270. spin_unlock(&fmi_lock);
  2271. return c;
  2272. }
  2273. spin_unlock(&fmi_lock);
  2274. return NULL;
  2275. }
  2276. static int in_failure_mode(struct ubi_volume_desc *desc)
  2277. {
  2278. struct ubifs_info *c = dbg_find_info(desc);
  2279. if (c && dbg_failure_mode)
  2280. return c->dbg->failure_mode;
  2281. return 0;
  2282. }
  2283. static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
  2284. {
  2285. struct ubifs_info *c = dbg_find_info(desc);
  2286. struct ubifs_debug_info *d;
  2287. if (!c || !dbg_failure_mode)
  2288. return 0;
  2289. d = c->dbg;
  2290. if (d->failure_mode)
  2291. return 1;
  2292. if (!d->fail_cnt) {
  2293. /* First call - decide delay to failure */
  2294. if (chance(1, 2)) {
  2295. unsigned int delay = 1 << (simple_rand() >> 11);
  2296. if (chance(1, 2)) {
  2297. d->fail_delay = 1;
  2298. d->fail_timeout = jiffies +
  2299. msecs_to_jiffies(delay);
  2300. dbg_rcvry("failing after %ums", delay);
  2301. } else {
  2302. d->fail_delay = 2;
  2303. d->fail_cnt_max = delay;
  2304. dbg_rcvry("failing after %u calls", delay);
  2305. }
  2306. }
  2307. d->fail_cnt += 1;
  2308. }
  2309. /* Determine if failure delay has expired */
  2310. if (d->fail_delay == 1) {
  2311. if (time_before(jiffies, d->fail_timeout))
  2312. return 0;
  2313. } else if (d->fail_delay == 2)
  2314. if (d->fail_cnt++ < d->fail_cnt_max)
  2315. return 0;
  2316. if (lnum == UBIFS_SB_LNUM) {
  2317. if (write) {
  2318. if (chance(1, 2))
  2319. return 0;
  2320. } else if (chance(19, 20))
  2321. return 0;
  2322. dbg_rcvry("failing in super block LEB %d", lnum);
  2323. } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
  2324. if (chance(19, 20))
  2325. return 0;
  2326. dbg_rcvry("failing in master LEB %d", lnum);
  2327. } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
  2328. if (write) {
  2329. if (chance(99, 100))
  2330. return 0;
  2331. } else if (chance(399, 400))
  2332. return 0;
  2333. dbg_rcvry("failing in log LEB %d", lnum);
  2334. } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
  2335. if (write) {
  2336. if (chance(7, 8))
  2337. return 0;
  2338. } else if (chance(19, 20))
  2339. return 0;
  2340. dbg_rcvry("failing in LPT LEB %d", lnum);
  2341. } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
  2342. if (write) {
  2343. if (chance(1, 2))
  2344. return 0;
  2345. } else if (chance(9, 10))
  2346. return 0;
  2347. dbg_rcvry("failing in orphan LEB %d", lnum);
  2348. } else if (lnum == c->ihead_lnum) {
  2349. if (chance(99, 100))
  2350. return 0;
  2351. dbg_rcvry("failing in index head LEB %d", lnum);
  2352. } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
  2353. if (chance(9, 10))
  2354. return 0;
  2355. dbg_rcvry("failing in GC head LEB %d", lnum);
  2356. } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
  2357. !ubifs_search_bud(c, lnum)) {
  2358. if (chance(19, 20))
  2359. return 0;
  2360. dbg_rcvry("failing in non-bud LEB %d", lnum);
  2361. } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
  2362. c->cmt_state == COMMIT_RUNNING_REQUIRED) {
  2363. if (chance(999, 1000))
  2364. return 0;
  2365. dbg_rcvry("failing in bud LEB %d commit running", lnum);
  2366. } else {
  2367. if (chance(9999, 10000))
  2368. return 0;
  2369. dbg_rcvry("failing in bud LEB %d commit not running", lnum);
  2370. }
  2371. ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
  2372. d->failure_mode = 1;
  2373. dump_stack();
  2374. return 1;
  2375. }
  2376. static void cut_data(const void *buf, int len)
  2377. {
  2378. int flen, i;
  2379. unsigned char *p = (void *)buf;
  2380. flen = (len * (long long)simple_rand()) >> 15;
  2381. for (i = flen; i < len; i++)
  2382. p[i] = 0xff;
  2383. }
  2384. int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
  2385. int len, int check)
  2386. {
  2387. if (in_failure_mode(desc))
  2388. return -EIO;
  2389. return ubi_leb_read(desc, lnum, buf, offset, len, check);
  2390. }
  2391. int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2392. int offset, int len, int dtype)
  2393. {
  2394. int err, failing;
  2395. if (in_failure_mode(desc))
  2396. return -EIO;
  2397. failing = do_fail(desc, lnum, 1);
  2398. if (failing)
  2399. cut_data(buf, len);
  2400. err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
  2401. if (err)
  2402. return err;
  2403. if (failing)
  2404. return -EIO;
  2405. return 0;
  2406. }
  2407. int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2408. int len, int dtype)
  2409. {
  2410. int err;
  2411. if (do_fail(desc, lnum, 1))
  2412. return -EIO;
  2413. err = ubi_leb_change(desc, lnum, buf, len, dtype);
  2414. if (err)
  2415. return err;
  2416. if (do_fail(desc, lnum, 1))
  2417. return -EIO;
  2418. return 0;
  2419. }
  2420. int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
  2421. {
  2422. int err;
  2423. if (do_fail(desc, lnum, 0))
  2424. return -EIO;
  2425. err = ubi_leb_erase(desc, lnum);
  2426. if (err)
  2427. return err;
  2428. if (do_fail(desc, lnum, 0))
  2429. return -EIO;
  2430. return 0;
  2431. }
  2432. int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
  2433. {
  2434. int err;
  2435. if (do_fail(desc, lnum, 0))
  2436. return -EIO;
  2437. err = ubi_leb_unmap(desc, lnum);
  2438. if (err)
  2439. return err;
  2440. if (do_fail(desc, lnum, 0))
  2441. return -EIO;
  2442. return 0;
  2443. }
  2444. int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
  2445. {
  2446. if (in_failure_mode(desc))
  2447. return -EIO;
  2448. return ubi_is_mapped(desc, lnum);
  2449. }
  2450. int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
  2451. {
  2452. int err;
  2453. if (do_fail(desc, lnum, 0))
  2454. return -EIO;
  2455. err = ubi_leb_map(desc, lnum, dtype);
  2456. if (err)
  2457. return err;
  2458. if (do_fail(desc, lnum, 0))
  2459. return -EIO;
  2460. return 0;
  2461. }
  2462. /**
  2463. * ubifs_debugging_init - initialize UBIFS debugging.
  2464. * @c: UBIFS file-system description object
  2465. *
  2466. * This function initializes debugging-related data for the file system.
  2467. * Returns zero in case of success and a negative error code in case of
  2468. * failure.
  2469. */
  2470. int ubifs_debugging_init(struct ubifs_info *c)
  2471. {
  2472. c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
  2473. if (!c->dbg)
  2474. return -ENOMEM;
  2475. failure_mode_init(c);
  2476. return 0;
  2477. }
  2478. /**
  2479. * ubifs_debugging_exit - free debugging data.
  2480. * @c: UBIFS file-system description object
  2481. */
  2482. void ubifs_debugging_exit(struct ubifs_info *c)
  2483. {
  2484. failure_mode_exit(c);
  2485. kfree(c->dbg);
  2486. }
  2487. /*
  2488. * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
  2489. * contain the stuff specific to particular file-system mounts.
  2490. */
  2491. static struct dentry *dfs_rootdir;
  2492. /**
  2493. * dbg_debugfs_init - initialize debugfs file-system.
  2494. *
  2495. * UBIFS uses debugfs file-system to expose various debugging knobs to
  2496. * user-space. This function creates "ubifs" directory in the debugfs
  2497. * file-system. Returns zero in case of success and a negative error code in
  2498. * case of failure.
  2499. */
  2500. int dbg_debugfs_init(void)
  2501. {
  2502. dfs_rootdir = debugfs_create_dir("ubifs", NULL);
  2503. if (IS_ERR(dfs_rootdir)) {
  2504. int err = PTR_ERR(dfs_rootdir);
  2505. ubifs_err("cannot create \"ubifs\" debugfs directory, "
  2506. "error %d\n", err);
  2507. return err;
  2508. }
  2509. return 0;
  2510. }
  2511. /**
  2512. * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system.
  2513. */
  2514. void dbg_debugfs_exit(void)
  2515. {
  2516. debugfs_remove(dfs_rootdir);
  2517. }
  2518. static int open_debugfs_file(struct inode *inode, struct file *file)
  2519. {
  2520. file->private_data = inode->i_private;
  2521. return nonseekable_open(inode, file);
  2522. }
  2523. static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
  2524. size_t count, loff_t *ppos)
  2525. {
  2526. struct ubifs_info *c = file->private_data;
  2527. struct ubifs_debug_info *d = c->dbg;
  2528. if (file->f_path.dentry == d->dfs_dump_lprops)
  2529. dbg_dump_lprops(c);
  2530. else if (file->f_path.dentry == d->dfs_dump_budg)
  2531. dbg_dump_budg(c, &c->bi);
  2532. else if (file->f_path.dentry == d->dfs_dump_tnc) {
  2533. mutex_lock(&c->tnc_mutex);
  2534. dbg_dump_tnc(c);
  2535. mutex_unlock(&c->tnc_mutex);
  2536. } else
  2537. return -EINVAL;
  2538. *ppos += count;
  2539. return count;
  2540. }
  2541. static const struct file_operations dfs_fops = {
  2542. .open = open_debugfs_file,
  2543. .write = write_debugfs_file,
  2544. .owner = THIS_MODULE,
  2545. .llseek = no_llseek,
  2546. };
  2547. /**
  2548. * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance.
  2549. * @c: UBIFS file-system description object
  2550. *
  2551. * This function creates all debugfs files for this instance of UBIFS. Returns
  2552. * zero in case of success and a negative error code in case of failure.
  2553. *
  2554. * Note, the only reason we have not merged this function with the
  2555. * 'ubifs_debugging_init()' function is because it is better to initialize
  2556. * debugfs interfaces at the very end of the mount process, and remove them at
  2557. * the very beginning of the mount process.
  2558. */
  2559. int dbg_debugfs_init_fs(struct ubifs_info *c)
  2560. {
  2561. int err;
  2562. const char *fname;
  2563. struct dentry *dent;
  2564. struct ubifs_debug_info *d = c->dbg;
  2565. sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
  2566. fname = d->dfs_dir_name;
  2567. dent = debugfs_create_dir(fname, dfs_rootdir);
  2568. if (IS_ERR_OR_NULL(dent))
  2569. goto out;
  2570. d->dfs_dir = dent;
  2571. fname = "dump_lprops";
  2572. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2573. if (IS_ERR_OR_NULL(dent))
  2574. goto out_remove;
  2575. d->dfs_dump_lprops = dent;
  2576. fname = "dump_budg";
  2577. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2578. if (IS_ERR_OR_NULL(dent))
  2579. goto out_remove;
  2580. d->dfs_dump_budg = dent;
  2581. fname = "dump_tnc";
  2582. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2583. if (IS_ERR_OR_NULL(dent))
  2584. goto out_remove;
  2585. d->dfs_dump_tnc = dent;
  2586. return 0;
  2587. out_remove:
  2588. debugfs_remove_recursive(d->dfs_dir);
  2589. out:
  2590. err = dent ? PTR_ERR(dent) : -ENODEV;
  2591. ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
  2592. fname, err);
  2593. return err;
  2594. }
  2595. /**
  2596. * dbg_debugfs_exit_fs - remove all debugfs files.
  2597. * @c: UBIFS file-system description object
  2598. */
  2599. void dbg_debugfs_exit_fs(struct ubifs_info *c)
  2600. {
  2601. debugfs_remove_recursive(c->dbg->dfs_dir);
  2602. }
  2603. #endif /* CONFIG_UBIFS_FS_DEBUG */