recovery.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Adrian Hunter
  20. * Artem Bityutskiy (Битюцкий Артём)
  21. */
  22. /*
  23. * This file implements functions needed to recover from unclean un-mounts.
  24. * When UBIFS is mounted, it checks a flag on the master node to determine if
  25. * an un-mount was completed sucessfully. If not, the process of mounting
  26. * incorparates additional checking and fixing of on-flash data structures.
  27. * UBIFS always cleans away all remnants of an unclean un-mount, so that
  28. * errors do not accumulate. However UBIFS defers recovery if it is mounted
  29. * read-only, and the flash is not modified in that case.
  30. */
  31. #include <linux/crc32.h>
  32. #include "ubifs.h"
  33. /**
  34. * is_empty - determine whether a buffer is empty (contains all 0xff).
  35. * @buf: buffer to clean
  36. * @len: length of buffer
  37. *
  38. * This function returns %1 if the buffer is empty (contains all 0xff) otherwise
  39. * %0 is returned.
  40. */
  41. static int is_empty(void *buf, int len)
  42. {
  43. uint8_t *p = buf;
  44. int i;
  45. for (i = 0; i < len; i++)
  46. if (*p++ != 0xff)
  47. return 0;
  48. return 1;
  49. }
  50. /**
  51. * get_master_node - get the last valid master node allowing for corruption.
  52. * @c: UBIFS file-system description object
  53. * @lnum: LEB number
  54. * @pbuf: buffer containing the LEB read, is returned here
  55. * @mst: master node, if found, is returned here
  56. * @cor: corruption, if found, is returned here
  57. *
  58. * This function allocates a buffer, reads the LEB into it, and finds and
  59. * returns the last valid master node allowing for one area of corruption.
  60. * The corrupt area, if there is one, must be consistent with the assumption
  61. * that it is the result of an unclean unmount while the master node was being
  62. * written. Under those circumstances, it is valid to use the previously written
  63. * master node.
  64. *
  65. * This function returns %0 on success and a negative error code on failure.
  66. */
  67. static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
  68. struct ubifs_mst_node **mst, void **cor)
  69. {
  70. const int sz = c->mst_node_alsz;
  71. int err, offs, len;
  72. void *sbuf, *buf;
  73. sbuf = vmalloc(c->leb_size);
  74. if (!sbuf)
  75. return -ENOMEM;
  76. err = ubi_read(c->ubi, lnum, sbuf, 0, c->leb_size);
  77. if (err && err != -EBADMSG)
  78. goto out_free;
  79. /* Find the first position that is definitely not a node */
  80. offs = 0;
  81. buf = sbuf;
  82. len = c->leb_size;
  83. while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) {
  84. struct ubifs_ch *ch = buf;
  85. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
  86. break;
  87. offs += sz;
  88. buf += sz;
  89. len -= sz;
  90. }
  91. /* See if there was a valid master node before that */
  92. if (offs) {
  93. int ret;
  94. offs -= sz;
  95. buf -= sz;
  96. len += sz;
  97. ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
  98. if (ret != SCANNED_A_NODE && offs) {
  99. /* Could have been corruption so check one place back */
  100. offs -= sz;
  101. buf -= sz;
  102. len += sz;
  103. ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
  104. if (ret != SCANNED_A_NODE)
  105. /*
  106. * We accept only one area of corruption because
  107. * we are assuming that it was caused while
  108. * trying to write a master node.
  109. */
  110. goto out_err;
  111. }
  112. if (ret == SCANNED_A_NODE) {
  113. struct ubifs_ch *ch = buf;
  114. if (ch->node_type != UBIFS_MST_NODE)
  115. goto out_err;
  116. dbg_rcvry("found a master node at %d:%d", lnum, offs);
  117. *mst = buf;
  118. offs += sz;
  119. buf += sz;
  120. len -= sz;
  121. }
  122. }
  123. /* Check for corruption */
  124. if (offs < c->leb_size) {
  125. if (!is_empty(buf, min_t(int, len, sz))) {
  126. *cor = buf;
  127. dbg_rcvry("found corruption at %d:%d", lnum, offs);
  128. }
  129. offs += sz;
  130. buf += sz;
  131. len -= sz;
  132. }
  133. /* Check remaining empty space */
  134. if (offs < c->leb_size)
  135. if (!is_empty(buf, len))
  136. goto out_err;
  137. *pbuf = sbuf;
  138. return 0;
  139. out_err:
  140. err = -EINVAL;
  141. out_free:
  142. vfree(sbuf);
  143. *mst = NULL;
  144. *cor = NULL;
  145. return err;
  146. }
  147. /**
  148. * write_rcvrd_mst_node - write recovered master node.
  149. * @c: UBIFS file-system description object
  150. * @mst: master node
  151. *
  152. * This function returns %0 on success and a negative error code on failure.
  153. */
  154. static int write_rcvrd_mst_node(struct ubifs_info *c,
  155. struct ubifs_mst_node *mst)
  156. {
  157. int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
  158. __le32 save_flags;
  159. dbg_rcvry("recovery");
  160. save_flags = mst->flags;
  161. mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
  162. ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
  163. err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
  164. if (err)
  165. goto out;
  166. err = ubi_leb_change(c->ubi, lnum + 1, mst, sz, UBI_SHORTTERM);
  167. if (err)
  168. goto out;
  169. out:
  170. mst->flags = save_flags;
  171. return err;
  172. }
  173. /**
  174. * ubifs_recover_master_node - recover the master node.
  175. * @c: UBIFS file-system description object
  176. *
  177. * This function recovers the master node from corruption that may occur due to
  178. * an unclean unmount.
  179. *
  180. * This function returns %0 on success and a negative error code on failure.
  181. */
  182. int ubifs_recover_master_node(struct ubifs_info *c)
  183. {
  184. void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL;
  185. struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst;
  186. const int sz = c->mst_node_alsz;
  187. int err, offs1, offs2;
  188. dbg_rcvry("recovery");
  189. err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1);
  190. if (err)
  191. goto out_free;
  192. err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2);
  193. if (err)
  194. goto out_free;
  195. if (mst1) {
  196. offs1 = (void *)mst1 - buf1;
  197. if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) &&
  198. (offs1 == 0 && !cor1)) {
  199. /*
  200. * mst1 was written by recovery at offset 0 with no
  201. * corruption.
  202. */
  203. dbg_rcvry("recovery recovery");
  204. mst = mst1;
  205. } else if (mst2) {
  206. offs2 = (void *)mst2 - buf2;
  207. if (offs1 == offs2) {
  208. /* Same offset, so must be the same */
  209. if (memcmp((void *)mst1 + UBIFS_CH_SZ,
  210. (void *)mst2 + UBIFS_CH_SZ,
  211. UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
  212. goto out_err;
  213. mst = mst1;
  214. } else if (offs2 + sz == offs1) {
  215. /* 1st LEB was written, 2nd was not */
  216. if (cor1)
  217. goto out_err;
  218. mst = mst1;
  219. } else if (offs1 == 0 && offs2 + sz >= c->leb_size) {
  220. /* 1st LEB was unmapped and written, 2nd not */
  221. if (cor1)
  222. goto out_err;
  223. mst = mst1;
  224. } else
  225. goto out_err;
  226. } else {
  227. /*
  228. * 2nd LEB was unmapped and about to be written, so
  229. * there must be only one master node in the first LEB
  230. * and no corruption.
  231. */
  232. if (offs1 != 0 || cor1)
  233. goto out_err;
  234. mst = mst1;
  235. }
  236. } else {
  237. if (!mst2)
  238. goto out_err;
  239. /*
  240. * 1st LEB was unmapped and about to be written, so there must
  241. * be no room left in 2nd LEB.
  242. */
  243. offs2 = (void *)mst2 - buf2;
  244. if (offs2 + sz + sz <= c->leb_size)
  245. goto out_err;
  246. mst = mst2;
  247. }
  248. dbg_rcvry("recovered master node from LEB %d",
  249. (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
  250. memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
  251. if ((c->vfs_sb->s_flags & MS_RDONLY)) {
  252. /* Read-only mode. Keep a copy for switching to rw mode */
  253. c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
  254. if (!c->rcvrd_mst_node) {
  255. err = -ENOMEM;
  256. goto out_free;
  257. }
  258. memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
  259. } else {
  260. /* Write the recovered master node */
  261. c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
  262. err = write_rcvrd_mst_node(c, c->mst_node);
  263. if (err)
  264. goto out_free;
  265. }
  266. vfree(buf2);
  267. vfree(buf1);
  268. return 0;
  269. out_err:
  270. err = -EINVAL;
  271. out_free:
  272. ubifs_err("failed to recover master node");
  273. if (mst1) {
  274. dbg_err("dumping first master node");
  275. dbg_dump_node(c, mst1);
  276. }
  277. if (mst2) {
  278. dbg_err("dumping second master node");
  279. dbg_dump_node(c, mst2);
  280. }
  281. vfree(buf2);
  282. vfree(buf1);
  283. return err;
  284. }
  285. /**
  286. * ubifs_write_rcvrd_mst_node - write the recovered master node.
  287. * @c: UBIFS file-system description object
  288. *
  289. * This function writes the master node that was recovered during mounting in
  290. * read-only mode and must now be written because we are remounting rw.
  291. *
  292. * This function returns %0 on success and a negative error code on failure.
  293. */
  294. int ubifs_write_rcvrd_mst_node(struct ubifs_info *c)
  295. {
  296. int err;
  297. if (!c->rcvrd_mst_node)
  298. return 0;
  299. c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
  300. c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
  301. err = write_rcvrd_mst_node(c, c->rcvrd_mst_node);
  302. if (err)
  303. return err;
  304. kfree(c->rcvrd_mst_node);
  305. c->rcvrd_mst_node = NULL;
  306. return 0;
  307. }
  308. /**
  309. * is_last_write - determine if an offset was in the last write to a LEB.
  310. * @c: UBIFS file-system description object
  311. * @buf: buffer to check
  312. * @offs: offset to check
  313. *
  314. * This function returns %1 if @offs was in the last write to the LEB whose data
  315. * is in @buf, otherwise %0 is returned. The determination is made by checking
  316. * for subsequent empty space starting from the next min_io_size boundary (or a
  317. * bit less than the common header size if min_io_size is one).
  318. */
  319. static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
  320. {
  321. int empty_offs;
  322. int check_len;
  323. uint8_t *p;
  324. if (c->min_io_size == 1) {
  325. check_len = c->leb_size - offs;
  326. p = buf + check_len;
  327. for (; check_len > 0; check_len--)
  328. if (*--p != 0xff)
  329. break;
  330. /*
  331. * 'check_len' is the size of the corruption which cannot be
  332. * more than the size of 1 node if it was caused by an unclean
  333. * unmount.
  334. */
  335. if (check_len > UBIFS_MAX_NODE_SZ)
  336. return 0;
  337. return 1;
  338. }
  339. /*
  340. * Round up to the next c->min_io_size boundary i.e. 'offs' is in the
  341. * last wbuf written. After that should be empty space.
  342. */
  343. empty_offs = ALIGN(offs + 1, c->min_io_size);
  344. check_len = c->leb_size - empty_offs;
  345. p = buf + empty_offs - offs;
  346. for (; check_len > 0; check_len--)
  347. if (*p++ != 0xff)
  348. return 0;
  349. return 1;
  350. }
  351. /**
  352. * clean_buf - clean the data from an LEB sitting in a buffer.
  353. * @c: UBIFS file-system description object
  354. * @buf: buffer to clean
  355. * @lnum: LEB number to clean
  356. * @offs: offset from which to clean
  357. * @len: length of buffer
  358. *
  359. * This function pads up to the next min_io_size boundary (if there is one) and
  360. * sets empty space to all 0xff. @buf, @offs and @len are updated to the next
  361. * min_io_size boundary (if there is one).
  362. */
  363. static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
  364. int *offs, int *len)
  365. {
  366. int empty_offs, pad_len;
  367. lnum = lnum;
  368. dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
  369. if (c->min_io_size == 1) {
  370. memset(*buf, 0xff, c->leb_size - *offs);
  371. return;
  372. }
  373. ubifs_assert(!(*offs & 7));
  374. empty_offs = ALIGN(*offs, c->min_io_size);
  375. pad_len = empty_offs - *offs;
  376. ubifs_pad(c, *buf, pad_len);
  377. *offs += pad_len;
  378. *buf += pad_len;
  379. *len -= pad_len;
  380. memset(*buf, 0xff, c->leb_size - empty_offs);
  381. }
  382. /**
  383. * no_more_nodes - determine if there are no more nodes in a buffer.
  384. * @c: UBIFS file-system description object
  385. * @buf: buffer to check
  386. * @len: length of buffer
  387. * @lnum: LEB number of the LEB from which @buf was read
  388. * @offs: offset from which @buf was read
  389. *
  390. * This function scans @buf for more nodes and returns %0 is a node is found and
  391. * %1 if no more nodes are found.
  392. */
  393. static int no_more_nodes(const struct ubifs_info *c, void *buf, int len,
  394. int lnum, int offs)
  395. {
  396. int skip, next_offs = 0;
  397. if (len > UBIFS_DATA_NODE_SZ) {
  398. struct ubifs_ch *ch = buf;
  399. int dlen = le32_to_cpu(ch->len);
  400. if (ch->node_type == UBIFS_DATA_NODE && dlen >= UBIFS_CH_SZ &&
  401. dlen <= UBIFS_MAX_DATA_NODE_SZ)
  402. /* The corrupt node looks like a data node */
  403. next_offs = ALIGN(offs + dlen, 8);
  404. }
  405. if (c->min_io_size == 1)
  406. skip = 8;
  407. else
  408. skip = ALIGN(offs + 1, c->min_io_size) - offs;
  409. offs += skip;
  410. buf += skip;
  411. len -= skip;
  412. while (len > 8) {
  413. struct ubifs_ch *ch = buf;
  414. uint32_t magic = le32_to_cpu(ch->magic);
  415. int ret;
  416. if (magic == UBIFS_NODE_MAGIC) {
  417. ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
  418. if (ret == SCANNED_A_NODE || ret > 0) {
  419. /*
  420. * There is a small chance this is just data in
  421. * a data node, so check that possibility. e.g.
  422. * this is part of a file that itself contains
  423. * a UBIFS image.
  424. */
  425. if (next_offs && offs + le32_to_cpu(ch->len) <=
  426. next_offs)
  427. continue;
  428. dbg_rcvry("unexpected node at %d:%d", lnum,
  429. offs);
  430. return 0;
  431. }
  432. }
  433. offs += 8;
  434. buf += 8;
  435. len -= 8;
  436. }
  437. return 1;
  438. }
  439. /**
  440. * fix_unclean_leb - fix an unclean LEB.
  441. * @c: UBIFS file-system description object
  442. * @sleb: scanned LEB information
  443. * @start: offset where scan started
  444. */
  445. static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
  446. int start)
  447. {
  448. int lnum = sleb->lnum, endpt = start;
  449. /* Get the end offset of the last node we are keeping */
  450. if (!list_empty(&sleb->nodes)) {
  451. struct ubifs_scan_node *snod;
  452. snod = list_entry(sleb->nodes.prev,
  453. struct ubifs_scan_node, list);
  454. endpt = snod->offs + snod->len;
  455. }
  456. if ((c->vfs_sb->s_flags & MS_RDONLY) && !c->remounting_rw) {
  457. /* Add to recovery list */
  458. struct ubifs_unclean_leb *ucleb;
  459. dbg_rcvry("need to fix LEB %d start %d endpt %d",
  460. lnum, start, sleb->endpt);
  461. ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS);
  462. if (!ucleb)
  463. return -ENOMEM;
  464. ucleb->lnum = lnum;
  465. ucleb->endpt = endpt;
  466. list_add_tail(&ucleb->list, &c->unclean_leb_list);
  467. } else {
  468. /* Write the fixed LEB back to flash */
  469. int err;
  470. dbg_rcvry("fixing LEB %d start %d endpt %d",
  471. lnum, start, sleb->endpt);
  472. if (endpt == 0) {
  473. err = ubifs_leb_unmap(c, lnum);
  474. if (err)
  475. return err;
  476. } else {
  477. int len = ALIGN(endpt, c->min_io_size);
  478. if (start) {
  479. err = ubi_read(c->ubi, lnum, sleb->buf, 0,
  480. start);
  481. if (err)
  482. return err;
  483. }
  484. /* Pad to min_io_size */
  485. if (len > endpt) {
  486. int pad_len = len - ALIGN(endpt, 8);
  487. if (pad_len > 0) {
  488. void *buf = sleb->buf + len - pad_len;
  489. ubifs_pad(c, buf, pad_len);
  490. }
  491. }
  492. err = ubi_leb_change(c->ubi, lnum, sleb->buf, len,
  493. UBI_UNKNOWN);
  494. if (err)
  495. return err;
  496. }
  497. }
  498. return 0;
  499. }
  500. /**
  501. * drop_incomplete_group - drop nodes from an incomplete group.
  502. * @sleb: scanned LEB information
  503. * @offs: offset of dropped nodes is returned here
  504. *
  505. * This function returns %1 if nodes are dropped and %0 otherwise.
  506. */
  507. static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
  508. {
  509. int dropped = 0;
  510. while (!list_empty(&sleb->nodes)) {
  511. struct ubifs_scan_node *snod;
  512. struct ubifs_ch *ch;
  513. snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
  514. list);
  515. ch = snod->node;
  516. if (ch->group_type != UBIFS_IN_NODE_GROUP)
  517. return dropped;
  518. dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
  519. *offs = snod->offs;
  520. list_del(&snod->list);
  521. kfree(snod);
  522. sleb->nodes_cnt -= 1;
  523. dropped = 1;
  524. }
  525. return dropped;
  526. }
  527. /**
  528. * ubifs_recover_leb - scan and recover a LEB.
  529. * @c: UBIFS file-system description object
  530. * @lnum: LEB number
  531. * @offs: offset
  532. * @sbuf: LEB-sized buffer to use
  533. * @grouped: nodes may be grouped for recovery
  534. *
  535. * This function does a scan of a LEB, but caters for errors that might have
  536. * been caused by the unclean unmount from which we are attempting to recover.
  537. *
  538. * This function returns %0 on success and a negative error code on failure.
  539. */
  540. struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
  541. int offs, void *sbuf, int grouped)
  542. {
  543. int err, len = c->leb_size - offs, need_clean = 0, quiet = 1;
  544. int empty_chkd = 0, start = offs;
  545. struct ubifs_scan_leb *sleb;
  546. void *buf = sbuf + offs;
  547. dbg_rcvry("%d:%d", lnum, offs);
  548. sleb = ubifs_start_scan(c, lnum, offs, sbuf);
  549. if (IS_ERR(sleb))
  550. return sleb;
  551. if (sleb->ecc)
  552. need_clean = 1;
  553. while (len >= 8) {
  554. int ret;
  555. dbg_scan("look at LEB %d:%d (%d bytes left)",
  556. lnum, offs, len);
  557. cond_resched();
  558. /*
  559. * Scan quietly until there is an error from which we cannot
  560. * recover
  561. */
  562. ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
  563. if (ret == SCANNED_A_NODE) {
  564. /* A valid node, and not a padding node */
  565. struct ubifs_ch *ch = buf;
  566. int node_len;
  567. err = ubifs_add_snod(c, sleb, buf, offs);
  568. if (err)
  569. goto error;
  570. node_len = ALIGN(le32_to_cpu(ch->len), 8);
  571. offs += node_len;
  572. buf += node_len;
  573. len -= node_len;
  574. continue;
  575. }
  576. if (ret > 0) {
  577. /* Padding bytes or a valid padding node */
  578. offs += ret;
  579. buf += ret;
  580. len -= ret;
  581. continue;
  582. }
  583. if (ret == SCANNED_EMPTY_SPACE) {
  584. if (!is_empty(buf, len)) {
  585. if (!is_last_write(c, buf, offs))
  586. break;
  587. clean_buf(c, &buf, lnum, &offs, &len);
  588. need_clean = 1;
  589. }
  590. empty_chkd = 1;
  591. break;
  592. }
  593. if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE)
  594. if (is_last_write(c, buf, offs)) {
  595. clean_buf(c, &buf, lnum, &offs, &len);
  596. need_clean = 1;
  597. empty_chkd = 1;
  598. break;
  599. }
  600. if (ret == SCANNED_A_CORRUPT_NODE)
  601. if (no_more_nodes(c, buf, len, lnum, offs)) {
  602. clean_buf(c, &buf, lnum, &offs, &len);
  603. need_clean = 1;
  604. empty_chkd = 1;
  605. break;
  606. }
  607. if (quiet) {
  608. /* Redo the last scan but noisily */
  609. quiet = 0;
  610. continue;
  611. }
  612. switch (ret) {
  613. case SCANNED_GARBAGE:
  614. dbg_err("garbage");
  615. goto corrupted;
  616. case SCANNED_A_CORRUPT_NODE:
  617. case SCANNED_A_BAD_PAD_NODE:
  618. dbg_err("bad node");
  619. goto corrupted;
  620. default:
  621. dbg_err("unknown");
  622. goto corrupted;
  623. }
  624. }
  625. if (!empty_chkd && !is_empty(buf, len)) {
  626. if (is_last_write(c, buf, offs)) {
  627. clean_buf(c, &buf, lnum, &offs, &len);
  628. need_clean = 1;
  629. } else {
  630. ubifs_err("corrupt empty space at LEB %d:%d",
  631. lnum, offs);
  632. goto corrupted;
  633. }
  634. }
  635. /* Drop nodes from incomplete group */
  636. if (grouped && drop_incomplete_group(sleb, &offs)) {
  637. buf = sbuf + offs;
  638. len = c->leb_size - offs;
  639. clean_buf(c, &buf, lnum, &offs, &len);
  640. need_clean = 1;
  641. }
  642. if (offs % c->min_io_size) {
  643. clean_buf(c, &buf, lnum, &offs, &len);
  644. need_clean = 1;
  645. }
  646. ubifs_end_scan(c, sleb, lnum, offs);
  647. if (need_clean) {
  648. err = fix_unclean_leb(c, sleb, start);
  649. if (err)
  650. goto error;
  651. }
  652. return sleb;
  653. corrupted:
  654. ubifs_scanned_corruption(c, lnum, offs, buf);
  655. err = -EUCLEAN;
  656. error:
  657. ubifs_err("LEB %d scanning failed", lnum);
  658. ubifs_scan_destroy(sleb);
  659. return ERR_PTR(err);
  660. }
  661. /**
  662. * get_cs_sqnum - get commit start sequence number.
  663. * @c: UBIFS file-system description object
  664. * @lnum: LEB number of commit start node
  665. * @offs: offset of commit start node
  666. * @cs_sqnum: commit start sequence number is returned here
  667. *
  668. * This function returns %0 on success and a negative error code on failure.
  669. */
  670. static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
  671. unsigned long long *cs_sqnum)
  672. {
  673. struct ubifs_cs_node *cs_node = NULL;
  674. int err, ret;
  675. dbg_rcvry("at %d:%d", lnum, offs);
  676. cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL);
  677. if (!cs_node)
  678. return -ENOMEM;
  679. if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
  680. goto out_err;
  681. err = ubi_read(c->ubi, lnum, (void *)cs_node, offs, UBIFS_CS_NODE_SZ);
  682. if (err && err != -EBADMSG)
  683. goto out_free;
  684. ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
  685. if (ret != SCANNED_A_NODE) {
  686. dbg_err("Not a valid node");
  687. goto out_err;
  688. }
  689. if (cs_node->ch.node_type != UBIFS_CS_NODE) {
  690. dbg_err("Node a CS node, type is %d", cs_node->ch.node_type);
  691. goto out_err;
  692. }
  693. if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
  694. dbg_err("CS node cmt_no %llu != current cmt_no %llu",
  695. (unsigned long long)le64_to_cpu(cs_node->cmt_no),
  696. c->cmt_no);
  697. goto out_err;
  698. }
  699. *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
  700. dbg_rcvry("commit start sqnum %llu", *cs_sqnum);
  701. kfree(cs_node);
  702. return 0;
  703. out_err:
  704. err = -EINVAL;
  705. out_free:
  706. ubifs_err("failed to get CS sqnum");
  707. kfree(cs_node);
  708. return err;
  709. }
  710. /**
  711. * ubifs_recover_log_leb - scan and recover a log LEB.
  712. * @c: UBIFS file-system description object
  713. * @lnum: LEB number
  714. * @offs: offset
  715. * @sbuf: LEB-sized buffer to use
  716. *
  717. * This function does a scan of a LEB, but caters for errors that might have
  718. * been caused by the unclean unmount from which we are attempting to recover.
  719. *
  720. * This function returns %0 on success and a negative error code on failure.
  721. */
  722. struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
  723. int offs, void *sbuf)
  724. {
  725. struct ubifs_scan_leb *sleb;
  726. int next_lnum;
  727. dbg_rcvry("LEB %d", lnum);
  728. next_lnum = lnum + 1;
  729. if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs)
  730. next_lnum = UBIFS_LOG_LNUM;
  731. if (next_lnum != c->ltail_lnum) {
  732. /*
  733. * We can only recover at the end of the log, so check that the
  734. * next log LEB is empty or out of date.
  735. */
  736. sleb = ubifs_scan(c, next_lnum, 0, sbuf);
  737. if (IS_ERR(sleb))
  738. return sleb;
  739. if (sleb->nodes_cnt) {
  740. struct ubifs_scan_node *snod;
  741. unsigned long long cs_sqnum = c->cs_sqnum;
  742. snod = list_entry(sleb->nodes.next,
  743. struct ubifs_scan_node, list);
  744. if (cs_sqnum == 0) {
  745. int err;
  746. err = get_cs_sqnum(c, lnum, offs, &cs_sqnum);
  747. if (err) {
  748. ubifs_scan_destroy(sleb);
  749. return ERR_PTR(err);
  750. }
  751. }
  752. if (snod->sqnum > cs_sqnum) {
  753. ubifs_err("unrecoverable log corruption "
  754. "in LEB %d", lnum);
  755. ubifs_scan_destroy(sleb);
  756. return ERR_PTR(-EUCLEAN);
  757. }
  758. }
  759. ubifs_scan_destroy(sleb);
  760. }
  761. return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
  762. }
  763. /**
  764. * recover_head - recover a head.
  765. * @c: UBIFS file-system description object
  766. * @lnum: LEB number of head to recover
  767. * @offs: offset of head to recover
  768. * @sbuf: LEB-sized buffer to use
  769. *
  770. * This function ensures that there is no data on the flash at a head location.
  771. *
  772. * This function returns %0 on success and a negative error code on failure.
  773. */
  774. static int recover_head(const struct ubifs_info *c, int lnum, int offs,
  775. void *sbuf)
  776. {
  777. int len, err, need_clean = 0;
  778. if (c->min_io_size > 1)
  779. len = c->min_io_size;
  780. else
  781. len = 512;
  782. if (offs + len > c->leb_size)
  783. len = c->leb_size - offs;
  784. if (!len)
  785. return 0;
  786. /* Read at the head location and check it is empty flash */
  787. err = ubi_read(c->ubi, lnum, sbuf, offs, len);
  788. if (err)
  789. need_clean = 1;
  790. else {
  791. uint8_t *p = sbuf;
  792. while (len--)
  793. if (*p++ != 0xff) {
  794. need_clean = 1;
  795. break;
  796. }
  797. }
  798. if (need_clean) {
  799. dbg_rcvry("cleaning head at %d:%d", lnum, offs);
  800. if (offs == 0)
  801. return ubifs_leb_unmap(c, lnum);
  802. err = ubi_read(c->ubi, lnum, sbuf, 0, offs);
  803. if (err)
  804. return err;
  805. return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN);
  806. }
  807. return 0;
  808. }
  809. /**
  810. * ubifs_recover_inl_heads - recover index and LPT heads.
  811. * @c: UBIFS file-system description object
  812. * @sbuf: LEB-sized buffer to use
  813. *
  814. * This function ensures that there is no data on the flash at the index and
  815. * LPT head locations.
  816. *
  817. * This deals with the recovery of a half-completed journal commit. UBIFS is
  818. * careful never to overwrite the last version of the index or the LPT. Because
  819. * the index and LPT are wandering trees, data from a half-completed commit will
  820. * not be referenced anywhere in UBIFS. The data will be either in LEBs that are
  821. * assumed to be empty and will be unmapped anyway before use, or in the index
  822. * and LPT heads.
  823. *
  824. * This function returns %0 on success and a negative error code on failure.
  825. */
  826. int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
  827. {
  828. int err;
  829. ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY) || c->remounting_rw);
  830. dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
  831. err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
  832. if (err)
  833. return err;
  834. dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
  835. err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
  836. if (err)
  837. return err;
  838. return 0;
  839. }
  840. /**
  841. * clean_an_unclean_leb - read and write a LEB to remove corruption.
  842. * @c: UBIFS file-system description object
  843. * @ucleb: unclean LEB information
  844. * @sbuf: LEB-sized buffer to use
  845. *
  846. * This function reads a LEB up to a point pre-determined by the mount recovery,
  847. * checks the nodes, and writes the result back to the flash, thereby cleaning
  848. * off any following corruption, or non-fatal ECC errors.
  849. *
  850. * This function returns %0 on success and a negative error code on failure.
  851. */
  852. static int clean_an_unclean_leb(const struct ubifs_info *c,
  853. struct ubifs_unclean_leb *ucleb, void *sbuf)
  854. {
  855. int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
  856. void *buf = sbuf;
  857. dbg_rcvry("LEB %d len %d", lnum, len);
  858. if (len == 0) {
  859. /* Nothing to read, just unmap it */
  860. err = ubifs_leb_unmap(c, lnum);
  861. if (err)
  862. return err;
  863. return 0;
  864. }
  865. err = ubi_read(c->ubi, lnum, buf, offs, len);
  866. if (err && err != -EBADMSG)
  867. return err;
  868. while (len >= 8) {
  869. int ret;
  870. cond_resched();
  871. /* Scan quietly until there is an error */
  872. ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
  873. if (ret == SCANNED_A_NODE) {
  874. /* A valid node, and not a padding node */
  875. struct ubifs_ch *ch = buf;
  876. int node_len;
  877. node_len = ALIGN(le32_to_cpu(ch->len), 8);
  878. offs += node_len;
  879. buf += node_len;
  880. len -= node_len;
  881. continue;
  882. }
  883. if (ret > 0) {
  884. /* Padding bytes or a valid padding node */
  885. offs += ret;
  886. buf += ret;
  887. len -= ret;
  888. continue;
  889. }
  890. if (ret == SCANNED_EMPTY_SPACE) {
  891. ubifs_err("unexpected empty space at %d:%d",
  892. lnum, offs);
  893. return -EUCLEAN;
  894. }
  895. if (quiet) {
  896. /* Redo the last scan but noisily */
  897. quiet = 0;
  898. continue;
  899. }
  900. ubifs_scanned_corruption(c, lnum, offs, buf);
  901. return -EUCLEAN;
  902. }
  903. /* Pad to min_io_size */
  904. len = ALIGN(ucleb->endpt, c->min_io_size);
  905. if (len > ucleb->endpt) {
  906. int pad_len = len - ALIGN(ucleb->endpt, 8);
  907. if (pad_len > 0) {
  908. buf = c->sbuf + len - pad_len;
  909. ubifs_pad(c, buf, pad_len);
  910. }
  911. }
  912. /* Write back the LEB atomically */
  913. err = ubi_leb_change(c->ubi, lnum, sbuf, len, UBI_UNKNOWN);
  914. if (err)
  915. return err;
  916. dbg_rcvry("cleaned LEB %d", lnum);
  917. return 0;
  918. }
  919. /**
  920. * ubifs_clean_lebs - clean LEBs recovered during read-only mount.
  921. * @c: UBIFS file-system description object
  922. * @sbuf: LEB-sized buffer to use
  923. *
  924. * This function cleans a LEB identified during recovery that needs to be
  925. * written but was not because UBIFS was mounted read-only. This happens when
  926. * remounting to read-write mode.
  927. *
  928. * This function returns %0 on success and a negative error code on failure.
  929. */
  930. int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf)
  931. {
  932. dbg_rcvry("recovery");
  933. while (!list_empty(&c->unclean_leb_list)) {
  934. struct ubifs_unclean_leb *ucleb;
  935. int err;
  936. ucleb = list_entry(c->unclean_leb_list.next,
  937. struct ubifs_unclean_leb, list);
  938. err = clean_an_unclean_leb(c, ucleb, sbuf);
  939. if (err)
  940. return err;
  941. list_del(&ucleb->list);
  942. kfree(ucleb);
  943. }
  944. return 0;
  945. }
  946. /**
  947. * ubifs_rcvry_gc_commit - recover the GC LEB number and run the commit.
  948. * @c: UBIFS file-system description object
  949. *
  950. * Out-of-place garbage collection requires always one empty LEB with which to
  951. * start garbage collection. The LEB number is recorded in c->gc_lnum and is
  952. * written to the master node on unmounting. In the case of an unclean unmount
  953. * the value of gc_lnum recorded in the master node is out of date and cannot
  954. * be used. Instead, recovery must allocate an empty LEB for this purpose.
  955. * However, there may not be enough empty space, in which case it must be
  956. * possible to GC the dirtiest LEB into the GC head LEB.
  957. *
  958. * This function also runs the commit which causes the TNC updates from
  959. * size-recovery and orphans to be written to the flash. That is important to
  960. * ensure correct replay order for subsequent mounts.
  961. *
  962. * This function returns %0 on success and a negative error code on failure.
  963. */
  964. int ubifs_rcvry_gc_commit(struct ubifs_info *c)
  965. {
  966. struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
  967. struct ubifs_lprops lp;
  968. int lnum, err;
  969. c->gc_lnum = -1;
  970. if (wbuf->lnum == -1) {
  971. dbg_rcvry("no GC head LEB");
  972. goto find_free;
  973. }
  974. /*
  975. * See whether the used space in the dirtiest LEB fits in the GC head
  976. * LEB.
  977. */
  978. if (wbuf->offs == c->leb_size) {
  979. dbg_rcvry("no room in GC head LEB");
  980. goto find_free;
  981. }
  982. err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
  983. if (err) {
  984. if (err == -ENOSPC)
  985. dbg_err("could not find a dirty LEB");
  986. return err;
  987. }
  988. ubifs_assert(!(lp.flags & LPROPS_INDEX));
  989. lnum = lp.lnum;
  990. if (lp.free + lp.dirty == c->leb_size) {
  991. /* An empty LEB was returned */
  992. if (lp.free != c->leb_size) {
  993. err = ubifs_change_one_lp(c, lnum, c->leb_size,
  994. 0, 0, 0, 0);
  995. if (err)
  996. return err;
  997. }
  998. err = ubifs_leb_unmap(c, lnum);
  999. if (err)
  1000. return err;
  1001. c->gc_lnum = lnum;
  1002. dbg_rcvry("allocated LEB %d for GC", lnum);
  1003. /* Run the commit */
  1004. dbg_rcvry("committing");
  1005. return ubifs_run_commit(c);
  1006. }
  1007. /*
  1008. * There was no empty LEB so the used space in the dirtiest LEB must fit
  1009. * in the GC head LEB.
  1010. */
  1011. if (lp.free + lp.dirty < wbuf->offs) {
  1012. dbg_rcvry("LEB %d doesn't fit in GC head LEB %d:%d",
  1013. lnum, wbuf->lnum, wbuf->offs);
  1014. err = ubifs_return_leb(c, lnum);
  1015. if (err)
  1016. return err;
  1017. goto find_free;
  1018. }
  1019. /*
  1020. * We run the commit before garbage collection otherwise subsequent
  1021. * mounts will see the GC and orphan deletion in a different order.
  1022. */
  1023. dbg_rcvry("committing");
  1024. err = ubifs_run_commit(c);
  1025. if (err)
  1026. return err;
  1027. /*
  1028. * The data in the dirtiest LEB fits in the GC head LEB, so do the GC
  1029. * - use locking to keep 'ubifs_assert()' happy.
  1030. */
  1031. dbg_rcvry("GC'ing LEB %d", lnum);
  1032. mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
  1033. err = ubifs_garbage_collect_leb(c, &lp);
  1034. if (err >= 0) {
  1035. int err2 = ubifs_wbuf_sync_nolock(wbuf);
  1036. if (err2)
  1037. err = err2;
  1038. }
  1039. mutex_unlock(&wbuf->io_mutex);
  1040. if (err < 0) {
  1041. dbg_err("GC failed, error %d", err);
  1042. if (err == -EAGAIN)
  1043. err = -EINVAL;
  1044. return err;
  1045. }
  1046. if (err != LEB_RETAINED) {
  1047. dbg_err("GC returned %d", err);
  1048. return -EINVAL;
  1049. }
  1050. err = ubifs_leb_unmap(c, c->gc_lnum);
  1051. if (err)
  1052. return err;
  1053. dbg_rcvry("allocated LEB %d for GC", lnum);
  1054. return 0;
  1055. find_free:
  1056. /*
  1057. * There is no GC head LEB or the free space in the GC head LEB is too
  1058. * small. Allocate gc_lnum by calling 'ubifs_find_free_leb_for_idx()' so
  1059. * GC is not run.
  1060. */
  1061. lnum = ubifs_find_free_leb_for_idx(c);
  1062. if (lnum < 0) {
  1063. dbg_err("could not find an empty LEB");
  1064. return lnum;
  1065. }
  1066. /* And reset the index flag */
  1067. err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
  1068. LPROPS_INDEX, 0);
  1069. if (err)
  1070. return err;
  1071. c->gc_lnum = lnum;
  1072. dbg_rcvry("allocated LEB %d for GC", lnum);
  1073. /* Run the commit */
  1074. dbg_rcvry("committing");
  1075. return ubifs_run_commit(c);
  1076. }
  1077. /**
  1078. * struct size_entry - inode size information for recovery.
  1079. * @rb: link in the RB-tree of sizes
  1080. * @inum: inode number
  1081. * @i_size: size on inode
  1082. * @d_size: maximum size based on data nodes
  1083. * @exists: indicates whether the inode exists
  1084. * @inode: inode if pinned in memory awaiting rw mode to fix it
  1085. */
  1086. struct size_entry {
  1087. struct rb_node rb;
  1088. ino_t inum;
  1089. loff_t i_size;
  1090. loff_t d_size;
  1091. int exists;
  1092. struct inode *inode;
  1093. };
  1094. /**
  1095. * add_ino - add an entry to the size tree.
  1096. * @c: UBIFS file-system description object
  1097. * @inum: inode number
  1098. * @i_size: size on inode
  1099. * @d_size: maximum size based on data nodes
  1100. * @exists: indicates whether the inode exists
  1101. */
  1102. static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size,
  1103. loff_t d_size, int exists)
  1104. {
  1105. struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
  1106. struct size_entry *e;
  1107. while (*p) {
  1108. parent = *p;
  1109. e = rb_entry(parent, struct size_entry, rb);
  1110. if (inum < e->inum)
  1111. p = &(*p)->rb_left;
  1112. else
  1113. p = &(*p)->rb_right;
  1114. }
  1115. e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
  1116. if (!e)
  1117. return -ENOMEM;
  1118. e->inum = inum;
  1119. e->i_size = i_size;
  1120. e->d_size = d_size;
  1121. e->exists = exists;
  1122. rb_link_node(&e->rb, parent, p);
  1123. rb_insert_color(&e->rb, &c->size_tree);
  1124. return 0;
  1125. }
  1126. /**
  1127. * find_ino - find an entry on the size tree.
  1128. * @c: UBIFS file-system description object
  1129. * @inum: inode number
  1130. */
  1131. static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum)
  1132. {
  1133. struct rb_node *p = c->size_tree.rb_node;
  1134. struct size_entry *e;
  1135. while (p) {
  1136. e = rb_entry(p, struct size_entry, rb);
  1137. if (inum < e->inum)
  1138. p = p->rb_left;
  1139. else if (inum > e->inum)
  1140. p = p->rb_right;
  1141. else
  1142. return e;
  1143. }
  1144. return NULL;
  1145. }
  1146. /**
  1147. * remove_ino - remove an entry from the size tree.
  1148. * @c: UBIFS file-system description object
  1149. * @inum: inode number
  1150. */
  1151. static void remove_ino(struct ubifs_info *c, ino_t inum)
  1152. {
  1153. struct size_entry *e = find_ino(c, inum);
  1154. if (!e)
  1155. return;
  1156. rb_erase(&e->rb, &c->size_tree);
  1157. kfree(e);
  1158. }
  1159. /**
  1160. * ubifs_destroy_size_tree - free resources related to the size tree.
  1161. * @c: UBIFS file-system description object
  1162. */
  1163. void ubifs_destroy_size_tree(struct ubifs_info *c)
  1164. {
  1165. struct rb_node *this = c->size_tree.rb_node;
  1166. struct size_entry *e;
  1167. while (this) {
  1168. if (this->rb_left) {
  1169. this = this->rb_left;
  1170. continue;
  1171. } else if (this->rb_right) {
  1172. this = this->rb_right;
  1173. continue;
  1174. }
  1175. e = rb_entry(this, struct size_entry, rb);
  1176. if (e->inode)
  1177. iput(e->inode);
  1178. this = rb_parent(this);
  1179. if (this) {
  1180. if (this->rb_left == &e->rb)
  1181. this->rb_left = NULL;
  1182. else
  1183. this->rb_right = NULL;
  1184. }
  1185. kfree(e);
  1186. }
  1187. c->size_tree = RB_ROOT;
  1188. }
  1189. /**
  1190. * ubifs_recover_size_accum - accumulate inode sizes for recovery.
  1191. * @c: UBIFS file-system description object
  1192. * @key: node key
  1193. * @deletion: node is for a deletion
  1194. * @new_size: inode size
  1195. *
  1196. * This function has two purposes:
  1197. * 1) to ensure there are no data nodes that fall outside the inode size
  1198. * 2) to ensure there are no data nodes for inodes that do not exist
  1199. * To accomplish those purposes, a rb-tree is constructed containing an entry
  1200. * for each inode number in the journal that has not been deleted, and recording
  1201. * the size from the inode node, the maximum size of any data node (also altered
  1202. * by truncations) and a flag indicating a inode number for which no inode node
  1203. * was present in the journal.
  1204. *
  1205. * Note that there is still the possibility that there are data nodes that have
  1206. * been committed that are beyond the inode size, however the only way to find
  1207. * them would be to scan the entire index. Alternatively, some provision could
  1208. * be made to record the size of inodes at the start of commit, which would seem
  1209. * very cumbersome for a scenario that is quite unlikely and the only negative
  1210. * consequence of which is wasted space.
  1211. *
  1212. * This functions returns %0 on success and a negative error code on failure.
  1213. */
  1214. int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
  1215. int deletion, loff_t new_size)
  1216. {
  1217. ino_t inum = key_inum(c, key);
  1218. struct size_entry *e;
  1219. int err;
  1220. switch (key_type(c, key)) {
  1221. case UBIFS_INO_KEY:
  1222. if (deletion)
  1223. remove_ino(c, inum);
  1224. else {
  1225. e = find_ino(c, inum);
  1226. if (e) {
  1227. e->i_size = new_size;
  1228. e->exists = 1;
  1229. } else {
  1230. err = add_ino(c, inum, new_size, 0, 1);
  1231. if (err)
  1232. return err;
  1233. }
  1234. }
  1235. break;
  1236. case UBIFS_DATA_KEY:
  1237. e = find_ino(c, inum);
  1238. if (e) {
  1239. if (new_size > e->d_size)
  1240. e->d_size = new_size;
  1241. } else {
  1242. err = add_ino(c, inum, 0, new_size, 0);
  1243. if (err)
  1244. return err;
  1245. }
  1246. break;
  1247. case UBIFS_TRUN_KEY:
  1248. e = find_ino(c, inum);
  1249. if (e)
  1250. e->d_size = new_size;
  1251. break;
  1252. }
  1253. return 0;
  1254. }
  1255. /**
  1256. * fix_size_in_place - fix inode size in place on flash.
  1257. * @c: UBIFS file-system description object
  1258. * @e: inode size information for recovery
  1259. */
  1260. static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
  1261. {
  1262. struct ubifs_ino_node *ino = c->sbuf;
  1263. unsigned char *p;
  1264. union ubifs_key key;
  1265. int err, lnum, offs, len;
  1266. loff_t i_size;
  1267. uint32_t crc;
  1268. /* Locate the inode node LEB number and offset */
  1269. ino_key_init(c, &key, e->inum);
  1270. err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs);
  1271. if (err)
  1272. goto out;
  1273. /*
  1274. * If the size recorded on the inode node is greater than the size that
  1275. * was calculated from nodes in the journal then don't change the inode.
  1276. */
  1277. i_size = le64_to_cpu(ino->size);
  1278. if (i_size >= e->d_size)
  1279. return 0;
  1280. /* Read the LEB */
  1281. err = ubi_read(c->ubi, lnum, c->sbuf, 0, c->leb_size);
  1282. if (err)
  1283. goto out;
  1284. /* Change the size field and recalculate the CRC */
  1285. ino = c->sbuf + offs;
  1286. ino->size = cpu_to_le64(e->d_size);
  1287. len = le32_to_cpu(ino->ch.len);
  1288. crc = crc32(UBIFS_CRC32_INIT, (void *)ino + 8, len - 8);
  1289. ino->ch.crc = cpu_to_le32(crc);
  1290. /* Work out where data in the LEB ends and free space begins */
  1291. p = c->sbuf;
  1292. len = c->leb_size - 1;
  1293. while (p[len] == 0xff)
  1294. len -= 1;
  1295. len = ALIGN(len + 1, c->min_io_size);
  1296. /* Atomically write the fixed LEB back again */
  1297. err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
  1298. if (err)
  1299. goto out;
  1300. dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ",
  1301. (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
  1302. return 0;
  1303. out:
  1304. ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
  1305. (unsigned long)e->inum, e->i_size, e->d_size, err);
  1306. return err;
  1307. }
  1308. /**
  1309. * ubifs_recover_size - recover inode size.
  1310. * @c: UBIFS file-system description object
  1311. *
  1312. * This function attempts to fix inode size discrepancies identified by the
  1313. * 'ubifs_recover_size_accum()' function.
  1314. *
  1315. * This functions returns %0 on success and a negative error code on failure.
  1316. */
  1317. int ubifs_recover_size(struct ubifs_info *c)
  1318. {
  1319. struct rb_node *this = rb_first(&c->size_tree);
  1320. while (this) {
  1321. struct size_entry *e;
  1322. int err;
  1323. e = rb_entry(this, struct size_entry, rb);
  1324. if (!e->exists) {
  1325. union ubifs_key key;
  1326. ino_key_init(c, &key, e->inum);
  1327. err = ubifs_tnc_lookup(c, &key, c->sbuf);
  1328. if (err && err != -ENOENT)
  1329. return err;
  1330. if (err == -ENOENT) {
  1331. /* Remove data nodes that have no inode */
  1332. dbg_rcvry("removing ino %lu",
  1333. (unsigned long)e->inum);
  1334. err = ubifs_tnc_remove_ino(c, e->inum);
  1335. if (err)
  1336. return err;
  1337. } else {
  1338. struct ubifs_ino_node *ino = c->sbuf;
  1339. e->exists = 1;
  1340. e->i_size = le64_to_cpu(ino->size);
  1341. }
  1342. }
  1343. if (e->exists && e->i_size < e->d_size) {
  1344. if (!e->inode && (c->vfs_sb->s_flags & MS_RDONLY)) {
  1345. /* Fix the inode size and pin it in memory */
  1346. struct inode *inode;
  1347. inode = ubifs_iget(c->vfs_sb, e->inum);
  1348. if (IS_ERR(inode))
  1349. return PTR_ERR(inode);
  1350. if (inode->i_size < e->d_size) {
  1351. dbg_rcvry("ino %lu size %lld -> %lld",
  1352. (unsigned long)e->inum,
  1353. e->d_size, inode->i_size);
  1354. inode->i_size = e->d_size;
  1355. ubifs_inode(inode)->ui_size = e->d_size;
  1356. e->inode = inode;
  1357. this = rb_next(this);
  1358. continue;
  1359. }
  1360. iput(inode);
  1361. } else {
  1362. /* Fix the size in place */
  1363. err = fix_size_in_place(c, e);
  1364. if (err)
  1365. return err;
  1366. if (e->inode)
  1367. iput(e->inode);
  1368. }
  1369. }
  1370. this = rb_next(this);
  1371. rb_erase(&e->rb, &c->size_tree);
  1372. kfree(e);
  1373. }
  1374. return 0;
  1375. }