eba.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12. * the GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * Author: Artem Bityutskiy (Битюцкий Артём)
  19. */
  20. /*
  21. * The UBI Eraseblock Association (EBA) unit.
  22. *
  23. * This unit is responsible for I/O to/from logical eraseblock.
  24. *
  25. * Although in this implementation the EBA table is fully kept and managed in
  26. * RAM, which assumes poor scalability, it might be (partially) maintained on
  27. * flash in future implementations.
  28. *
  29. * The EBA unit implements per-logical eraseblock locking. Before accessing a
  30. * logical eraseblock it is locked for reading or writing. The per-logical
  31. * eraseblock locking is implemented by means of the lock tree. The lock tree
  32. * is an RB-tree which refers all the currently locked logical eraseblocks. The
  33. * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
  34. * (@vol_id, @lnum) pairs.
  35. *
  36. * EBA also maintains the global sequence counter which is incremented each
  37. * time a logical eraseblock is mapped to a physical eraseblock and it is
  38. * stored in the volume identifier header. This means that each VID header has
  39. * a unique sequence number. The sequence number is only increased an we assume
  40. * 64 bits is enough to never overflow.
  41. */
  42. #ifdef UBI_LINUX
  43. #include <linux/slab.h>
  44. #include <linux/crc32.h>
  45. #include <linux/err.h>
  46. #endif
  47. #include <ubi_uboot.h>
  48. #include "ubi.h"
  49. /* Number of physical eraseblocks reserved for atomic LEB change operation */
  50. #define EBA_RESERVED_PEBS 1
  51. /**
  52. * next_sqnum - get next sequence number.
  53. * @ubi: UBI device description object
  54. *
  55. * This function returns next sequence number to use, which is just the current
  56. * global sequence counter value. It also increases the global sequence
  57. * counter.
  58. */
  59. static unsigned long long next_sqnum(struct ubi_device *ubi)
  60. {
  61. unsigned long long sqnum;
  62. spin_lock(&ubi->ltree_lock);
  63. sqnum = ubi->global_sqnum++;
  64. spin_unlock(&ubi->ltree_lock);
  65. return sqnum;
  66. }
  67. /**
  68. * ubi_get_compat - get compatibility flags of a volume.
  69. * @ubi: UBI device description object
  70. * @vol_id: volume ID
  71. *
  72. * This function returns compatibility flags for an internal volume. User
  73. * volumes have no compatibility flags, so %0 is returned.
  74. */
  75. static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
  76. {
  77. if (vol_id == UBI_LAYOUT_VOLUME_ID)
  78. return UBI_LAYOUT_VOLUME_COMPAT;
  79. return 0;
  80. }
  81. /**
  82. * ltree_lookup - look up the lock tree.
  83. * @ubi: UBI device description object
  84. * @vol_id: volume ID
  85. * @lnum: logical eraseblock number
  86. *
  87. * This function returns a pointer to the corresponding &struct ubi_ltree_entry
  88. * object if the logical eraseblock is locked and %NULL if it is not.
  89. * @ubi->ltree_lock has to be locked.
  90. */
  91. static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
  92. int lnum)
  93. {
  94. struct rb_node *p;
  95. p = ubi->ltree.rb_node;
  96. while (p) {
  97. struct ubi_ltree_entry *le;
  98. le = rb_entry(p, struct ubi_ltree_entry, rb);
  99. if (vol_id < le->vol_id)
  100. p = p->rb_left;
  101. else if (vol_id > le->vol_id)
  102. p = p->rb_right;
  103. else {
  104. if (lnum < le->lnum)
  105. p = p->rb_left;
  106. else if (lnum > le->lnum)
  107. p = p->rb_right;
  108. else
  109. return le;
  110. }
  111. }
  112. return NULL;
  113. }
  114. /**
  115. * ltree_add_entry - add new entry to the lock tree.
  116. * @ubi: UBI device description object
  117. * @vol_id: volume ID
  118. * @lnum: logical eraseblock number
  119. *
  120. * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
  121. * lock tree. If such entry is already there, its usage counter is increased.
  122. * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
  123. * failed.
  124. */
  125. static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
  126. int vol_id, int lnum)
  127. {
  128. struct ubi_ltree_entry *le, *le1, *le_free;
  129. le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
  130. if (!le)
  131. return ERR_PTR(-ENOMEM);
  132. le->users = 0;
  133. init_rwsem(&le->mutex);
  134. le->vol_id = vol_id;
  135. le->lnum = lnum;
  136. spin_lock(&ubi->ltree_lock);
  137. le1 = ltree_lookup(ubi, vol_id, lnum);
  138. if (le1) {
  139. /*
  140. * This logical eraseblock is already locked. The newly
  141. * allocated lock entry is not needed.
  142. */
  143. le_free = le;
  144. le = le1;
  145. } else {
  146. struct rb_node **p, *parent = NULL;
  147. /*
  148. * No lock entry, add the newly allocated one to the
  149. * @ubi->ltree RB-tree.
  150. */
  151. le_free = NULL;
  152. p = &ubi->ltree.rb_node;
  153. while (*p) {
  154. parent = *p;
  155. le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
  156. if (vol_id < le1->vol_id)
  157. p = &(*p)->rb_left;
  158. else if (vol_id > le1->vol_id)
  159. p = &(*p)->rb_right;
  160. else {
  161. ubi_assert(lnum != le1->lnum);
  162. if (lnum < le1->lnum)
  163. p = &(*p)->rb_left;
  164. else
  165. p = &(*p)->rb_right;
  166. }
  167. }
  168. rb_link_node(&le->rb, parent, p);
  169. rb_insert_color(&le->rb, &ubi->ltree);
  170. }
  171. le->users += 1;
  172. spin_unlock(&ubi->ltree_lock);
  173. if (le_free)
  174. kfree(le_free);
  175. return le;
  176. }
  177. /**
  178. * leb_read_lock - lock logical eraseblock for reading.
  179. * @ubi: UBI device description object
  180. * @vol_id: volume ID
  181. * @lnum: logical eraseblock number
  182. *
  183. * This function locks a logical eraseblock for reading. Returns zero in case
  184. * of success and a negative error code in case of failure.
  185. */
  186. static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
  187. {
  188. struct ubi_ltree_entry *le;
  189. le = ltree_add_entry(ubi, vol_id, lnum);
  190. if (IS_ERR(le))
  191. return PTR_ERR(le);
  192. down_read(&le->mutex);
  193. return 0;
  194. }
  195. /**
  196. * leb_read_unlock - unlock logical eraseblock.
  197. * @ubi: UBI device description object
  198. * @vol_id: volume ID
  199. * @lnum: logical eraseblock number
  200. */
  201. static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  202. {
  203. int _free = 0;
  204. struct ubi_ltree_entry *le;
  205. spin_lock(&ubi->ltree_lock);
  206. le = ltree_lookup(ubi, vol_id, lnum);
  207. le->users -= 1;
  208. ubi_assert(le->users >= 0);
  209. if (le->users == 0) {
  210. rb_erase(&le->rb, &ubi->ltree);
  211. _free = 1;
  212. }
  213. spin_unlock(&ubi->ltree_lock);
  214. up_read(&le->mutex);
  215. if (_free)
  216. kfree(le);
  217. }
  218. /**
  219. * leb_write_lock - lock logical eraseblock for writing.
  220. * @ubi: UBI device description object
  221. * @vol_id: volume ID
  222. * @lnum: logical eraseblock number
  223. *
  224. * This function locks a logical eraseblock for writing. Returns zero in case
  225. * of success and a negative error code in case of failure.
  226. */
  227. static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
  228. {
  229. struct ubi_ltree_entry *le;
  230. le = ltree_add_entry(ubi, vol_id, lnum);
  231. if (IS_ERR(le))
  232. return PTR_ERR(le);
  233. down_write(&le->mutex);
  234. return 0;
  235. }
  236. /**
  237. * leb_write_lock - lock logical eraseblock for writing.
  238. * @ubi: UBI device description object
  239. * @vol_id: volume ID
  240. * @lnum: logical eraseblock number
  241. *
  242. * This function locks a logical eraseblock for writing if there is no
  243. * contention and does nothing if there is contention. Returns %0 in case of
  244. * success, %1 in case of contention, and and a negative error code in case of
  245. * failure.
  246. */
  247. static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
  248. {
  249. int _free;
  250. struct ubi_ltree_entry *le;
  251. le = ltree_add_entry(ubi, vol_id, lnum);
  252. if (IS_ERR(le))
  253. return PTR_ERR(le);
  254. if (down_write_trylock(&le->mutex))
  255. return 0;
  256. /* Contention, cancel */
  257. spin_lock(&ubi->ltree_lock);
  258. le->users -= 1;
  259. ubi_assert(le->users >= 0);
  260. if (le->users == 0) {
  261. rb_erase(&le->rb, &ubi->ltree);
  262. _free = 1;
  263. } else
  264. _free = 0;
  265. spin_unlock(&ubi->ltree_lock);
  266. if (_free)
  267. kfree(le);
  268. return 1;
  269. }
  270. /**
  271. * leb_write_unlock - unlock logical eraseblock.
  272. * @ubi: UBI device description object
  273. * @vol_id: volume ID
  274. * @lnum: logical eraseblock number
  275. */
  276. static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  277. {
  278. int _free;
  279. struct ubi_ltree_entry *le;
  280. spin_lock(&ubi->ltree_lock);
  281. le = ltree_lookup(ubi, vol_id, lnum);
  282. le->users -= 1;
  283. ubi_assert(le->users >= 0);
  284. if (le->users == 0) {
  285. rb_erase(&le->rb, &ubi->ltree);
  286. _free = 1;
  287. } else
  288. _free = 0;
  289. spin_unlock(&ubi->ltree_lock);
  290. up_write(&le->mutex);
  291. if (_free)
  292. kfree(le);
  293. }
  294. /**
  295. * ubi_eba_unmap_leb - un-map logical eraseblock.
  296. * @ubi: UBI device description object
  297. * @vol: volume description object
  298. * @lnum: logical eraseblock number
  299. *
  300. * This function un-maps logical eraseblock @lnum and schedules corresponding
  301. * physical eraseblock for erasure. Returns zero in case of success and a
  302. * negative error code in case of failure.
  303. */
  304. int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
  305. int lnum)
  306. {
  307. int err, pnum, vol_id = vol->vol_id;
  308. if (ubi->ro_mode)
  309. return -EROFS;
  310. err = leb_write_lock(ubi, vol_id, lnum);
  311. if (err)
  312. return err;
  313. pnum = vol->eba_tbl[lnum];
  314. if (pnum < 0)
  315. /* This logical eraseblock is already unmapped */
  316. goto out_unlock;
  317. dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
  318. vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
  319. err = ubi_wl_put_peb(ubi, pnum, 0);
  320. out_unlock:
  321. leb_write_unlock(ubi, vol_id, lnum);
  322. return err;
  323. }
  324. /**
  325. * ubi_eba_read_leb - read data.
  326. * @ubi: UBI device description object
  327. * @vol: volume description object
  328. * @lnum: logical eraseblock number
  329. * @buf: buffer to store the read data
  330. * @offset: offset from where to read
  331. * @len: how many bytes to read
  332. * @check: data CRC check flag
  333. *
  334. * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
  335. * bytes. The @check flag only makes sense for static volumes and forces
  336. * eraseblock data CRC checking.
  337. *
  338. * In case of success this function returns zero. In case of a static volume,
  339. * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
  340. * returned for any volume type if an ECC error was detected by the MTD device
  341. * driver. Other negative error cored may be returned in case of other errors.
  342. */
  343. int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  344. void *buf, int offset, int len, int check)
  345. {
  346. int err, pnum, scrub = 0, vol_id = vol->vol_id;
  347. struct ubi_vid_hdr *vid_hdr;
  348. uint32_t uninitialized_var(crc);
  349. err = leb_read_lock(ubi, vol_id, lnum);
  350. if (err)
  351. return err;
  352. pnum = vol->eba_tbl[lnum];
  353. if (pnum < 0) {
  354. /*
  355. * The logical eraseblock is not mapped, fill the whole buffer
  356. * with 0xFF bytes. The exception is static volumes for which
  357. * it is an error to read unmapped logical eraseblocks.
  358. */
  359. dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
  360. len, offset, vol_id, lnum);
  361. leb_read_unlock(ubi, vol_id, lnum);
  362. ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
  363. memset(buf, 0xFF, len);
  364. return 0;
  365. }
  366. dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
  367. len, offset, vol_id, lnum, pnum);
  368. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  369. check = 0;
  370. retry:
  371. if (check) {
  372. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  373. if (!vid_hdr) {
  374. err = -ENOMEM;
  375. goto out_unlock;
  376. }
  377. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  378. if (err && err != UBI_IO_BITFLIPS) {
  379. if (err > 0) {
  380. /*
  381. * The header is either absent or corrupted.
  382. * The former case means there is a bug -
  383. * switch to read-only mode just in case.
  384. * The latter case means a real corruption - we
  385. * may try to recover data. FIXME: but this is
  386. * not implemented.
  387. */
  388. if (err == UBI_IO_BAD_VID_HDR) {
  389. ubi_warn("bad VID header at PEB %d, LEB"
  390. "%d:%d", pnum, vol_id, lnum);
  391. err = -EBADMSG;
  392. } else
  393. ubi_ro_mode(ubi);
  394. }
  395. goto out_free;
  396. } else if (err == UBI_IO_BITFLIPS)
  397. scrub = 1;
  398. ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
  399. ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
  400. crc = be32_to_cpu(vid_hdr->data_crc);
  401. ubi_free_vid_hdr(ubi, vid_hdr);
  402. }
  403. err = ubi_io_read_data(ubi, buf, pnum, offset, len);
  404. if (err) {
  405. if (err == UBI_IO_BITFLIPS) {
  406. scrub = 1;
  407. err = 0;
  408. } else if (err == -EBADMSG) {
  409. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  410. goto out_unlock;
  411. scrub = 1;
  412. if (!check) {
  413. ubi_msg("force data checking");
  414. check = 1;
  415. goto retry;
  416. }
  417. } else
  418. goto out_unlock;
  419. }
  420. if (check) {
  421. uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
  422. if (crc1 != crc) {
  423. ubi_warn("CRC error: calculated %#08x, must be %#08x",
  424. crc1, crc);
  425. err = -EBADMSG;
  426. goto out_unlock;
  427. }
  428. }
  429. if (scrub)
  430. err = ubi_wl_scrub_peb(ubi, pnum);
  431. leb_read_unlock(ubi, vol_id, lnum);
  432. return err;
  433. out_free:
  434. ubi_free_vid_hdr(ubi, vid_hdr);
  435. out_unlock:
  436. leb_read_unlock(ubi, vol_id, lnum);
  437. return err;
  438. }
  439. /**
  440. * recover_peb - recover from write failure.
  441. * @ubi: UBI device description object
  442. * @pnum: the physical eraseblock to recover
  443. * @vol_id: volume ID
  444. * @lnum: logical eraseblock number
  445. * @buf: data which was not written because of the write failure
  446. * @offset: offset of the failed write
  447. * @len: how many bytes should have been written
  448. *
  449. * This function is called in case of a write failure and moves all good data
  450. * from the potentially bad physical eraseblock to a good physical eraseblock.
  451. * This function also writes the data which was not written due to the failure.
  452. * Returns new physical eraseblock number in case of success, and a negative
  453. * error code in case of failure.
  454. */
  455. static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
  456. const void *buf, int offset, int len)
  457. {
  458. int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
  459. struct ubi_volume *vol = ubi->volumes[idx];
  460. struct ubi_vid_hdr *vid_hdr;
  461. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  462. if (!vid_hdr) {
  463. return -ENOMEM;
  464. }
  465. mutex_lock(&ubi->buf_mutex);
  466. retry:
  467. new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
  468. if (new_pnum < 0) {
  469. mutex_unlock(&ubi->buf_mutex);
  470. ubi_free_vid_hdr(ubi, vid_hdr);
  471. return new_pnum;
  472. }
  473. ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
  474. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  475. if (err && err != UBI_IO_BITFLIPS) {
  476. if (err > 0)
  477. err = -EIO;
  478. goto out_put;
  479. }
  480. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  481. err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
  482. if (err)
  483. goto write_error;
  484. data_size = offset + len;
  485. memset(ubi->peb_buf1 + offset, 0xFF, len);
  486. /* Read everything before the area where the write failure happened */
  487. if (offset > 0) {
  488. err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
  489. if (err && err != UBI_IO_BITFLIPS)
  490. goto out_put;
  491. }
  492. memcpy(ubi->peb_buf1 + offset, buf, len);
  493. err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
  494. if (err)
  495. goto write_error;
  496. mutex_unlock(&ubi->buf_mutex);
  497. ubi_free_vid_hdr(ubi, vid_hdr);
  498. vol->eba_tbl[lnum] = new_pnum;
  499. ubi_wl_put_peb(ubi, pnum, 1);
  500. ubi_msg("data was successfully recovered");
  501. return 0;
  502. out_put:
  503. mutex_unlock(&ubi->buf_mutex);
  504. ubi_wl_put_peb(ubi, new_pnum, 1);
  505. ubi_free_vid_hdr(ubi, vid_hdr);
  506. return err;
  507. write_error:
  508. /*
  509. * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
  510. * get another one.
  511. */
  512. ubi_warn("failed to write to PEB %d", new_pnum);
  513. ubi_wl_put_peb(ubi, new_pnum, 1);
  514. if (++tries > UBI_IO_RETRIES) {
  515. mutex_unlock(&ubi->buf_mutex);
  516. ubi_free_vid_hdr(ubi, vid_hdr);
  517. return err;
  518. }
  519. ubi_msg("try again");
  520. goto retry;
  521. }
  522. /**
  523. * ubi_eba_write_leb - write data to dynamic volume.
  524. * @ubi: UBI device description object
  525. * @vol: volume description object
  526. * @lnum: logical eraseblock number
  527. * @buf: the data to write
  528. * @offset: offset within the logical eraseblock where to write
  529. * @len: how many bytes to write
  530. * @dtype: data type
  531. *
  532. * This function writes data to logical eraseblock @lnum of a dynamic volume
  533. * @vol. Returns zero in case of success and a negative error code in case
  534. * of failure. In case of error, it is possible that something was still
  535. * written to the flash media, but may be some garbage.
  536. */
  537. int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  538. const void *buf, int offset, int len, int dtype)
  539. {
  540. int err, pnum, tries = 0, vol_id = vol->vol_id;
  541. struct ubi_vid_hdr *vid_hdr;
  542. if (ubi->ro_mode)
  543. return -EROFS;
  544. err = leb_write_lock(ubi, vol_id, lnum);
  545. if (err)
  546. return err;
  547. pnum = vol->eba_tbl[lnum];
  548. if (pnum >= 0) {
  549. dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
  550. len, offset, vol_id, lnum, pnum);
  551. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  552. if (err) {
  553. ubi_warn("failed to write data to PEB %d", pnum);
  554. if (err == -EIO && ubi->bad_allowed)
  555. err = recover_peb(ubi, pnum, vol_id, lnum, buf,
  556. offset, len);
  557. if (err)
  558. ubi_ro_mode(ubi);
  559. }
  560. leb_write_unlock(ubi, vol_id, lnum);
  561. return err;
  562. }
  563. /*
  564. * The logical eraseblock is not mapped. We have to get a free physical
  565. * eraseblock and write the volume identifier header there first.
  566. */
  567. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  568. if (!vid_hdr) {
  569. leb_write_unlock(ubi, vol_id, lnum);
  570. return -ENOMEM;
  571. }
  572. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  573. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  574. vid_hdr->vol_id = cpu_to_be32(vol_id);
  575. vid_hdr->lnum = cpu_to_be32(lnum);
  576. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  577. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  578. retry:
  579. pnum = ubi_wl_get_peb(ubi, dtype);
  580. if (pnum < 0) {
  581. ubi_free_vid_hdr(ubi, vid_hdr);
  582. leb_write_unlock(ubi, vol_id, lnum);
  583. return pnum;
  584. }
  585. dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
  586. len, offset, vol_id, lnum, pnum);
  587. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  588. if (err) {
  589. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  590. vol_id, lnum, pnum);
  591. goto write_error;
  592. }
  593. if (len) {
  594. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  595. if (err) {
  596. ubi_warn("failed to write %d bytes at offset %d of "
  597. "LEB %d:%d, PEB %d", len, offset, vol_id,
  598. lnum, pnum);
  599. goto write_error;
  600. }
  601. }
  602. vol->eba_tbl[lnum] = pnum;
  603. leb_write_unlock(ubi, vol_id, lnum);
  604. ubi_free_vid_hdr(ubi, vid_hdr);
  605. return 0;
  606. write_error:
  607. if (err != -EIO || !ubi->bad_allowed) {
  608. ubi_ro_mode(ubi);
  609. leb_write_unlock(ubi, vol_id, lnum);
  610. ubi_free_vid_hdr(ubi, vid_hdr);
  611. return err;
  612. }
  613. /*
  614. * Fortunately, this is the first write operation to this physical
  615. * eraseblock, so just put it and request a new one. We assume that if
  616. * this physical eraseblock went bad, the erase code will handle that.
  617. */
  618. err = ubi_wl_put_peb(ubi, pnum, 1);
  619. if (err || ++tries > UBI_IO_RETRIES) {
  620. ubi_ro_mode(ubi);
  621. leb_write_unlock(ubi, vol_id, lnum);
  622. ubi_free_vid_hdr(ubi, vid_hdr);
  623. return err;
  624. }
  625. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  626. ubi_msg("try another PEB");
  627. goto retry;
  628. }
  629. /**
  630. * ubi_eba_write_leb_st - write data to static volume.
  631. * @ubi: UBI device description object
  632. * @vol: volume description object
  633. * @lnum: logical eraseblock number
  634. * @buf: data to write
  635. * @len: how many bytes to write
  636. * @dtype: data type
  637. * @used_ebs: how many logical eraseblocks will this volume contain
  638. *
  639. * This function writes data to logical eraseblock @lnum of static volume
  640. * @vol. The @used_ebs argument should contain total number of logical
  641. * eraseblock in this static volume.
  642. *
  643. * When writing to the last logical eraseblock, the @len argument doesn't have
  644. * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
  645. * to the real data size, although the @buf buffer has to contain the
  646. * alignment. In all other cases, @len has to be aligned.
  647. *
  648. * It is prohibited to write more then once to logical eraseblocks of static
  649. * volumes. This function returns zero in case of success and a negative error
  650. * code in case of failure.
  651. */
  652. int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
  653. int lnum, const void *buf, int len, int dtype,
  654. int used_ebs)
  655. {
  656. int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
  657. struct ubi_vid_hdr *vid_hdr;
  658. uint32_t crc;
  659. if (ubi->ro_mode)
  660. return -EROFS;
  661. if (lnum == used_ebs - 1)
  662. /* If this is the last LEB @len may be unaligned */
  663. len = ALIGN(data_size, ubi->min_io_size);
  664. else
  665. ubi_assert(!(len & (ubi->min_io_size - 1)));
  666. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  667. if (!vid_hdr)
  668. return -ENOMEM;
  669. err = leb_write_lock(ubi, vol_id, lnum);
  670. if (err) {
  671. ubi_free_vid_hdr(ubi, vid_hdr);
  672. return err;
  673. }
  674. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  675. vid_hdr->vol_id = cpu_to_be32(vol_id);
  676. vid_hdr->lnum = cpu_to_be32(lnum);
  677. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  678. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  679. crc = crc32(UBI_CRC32_INIT, buf, data_size);
  680. vid_hdr->vol_type = UBI_VID_STATIC;
  681. vid_hdr->data_size = cpu_to_be32(data_size);
  682. vid_hdr->used_ebs = cpu_to_be32(used_ebs);
  683. vid_hdr->data_crc = cpu_to_be32(crc);
  684. retry:
  685. pnum = ubi_wl_get_peb(ubi, dtype);
  686. if (pnum < 0) {
  687. ubi_free_vid_hdr(ubi, vid_hdr);
  688. leb_write_unlock(ubi, vol_id, lnum);
  689. return pnum;
  690. }
  691. dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
  692. len, vol_id, lnum, pnum, used_ebs);
  693. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  694. if (err) {
  695. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  696. vol_id, lnum, pnum);
  697. goto write_error;
  698. }
  699. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  700. if (err) {
  701. ubi_warn("failed to write %d bytes of data to PEB %d",
  702. len, pnum);
  703. goto write_error;
  704. }
  705. ubi_assert(vol->eba_tbl[lnum] < 0);
  706. vol->eba_tbl[lnum] = pnum;
  707. leb_write_unlock(ubi, vol_id, lnum);
  708. ubi_free_vid_hdr(ubi, vid_hdr);
  709. return 0;
  710. write_error:
  711. if (err != -EIO || !ubi->bad_allowed) {
  712. /*
  713. * This flash device does not admit of bad eraseblocks or
  714. * something nasty and unexpected happened. Switch to read-only
  715. * mode just in case.
  716. */
  717. ubi_ro_mode(ubi);
  718. leb_write_unlock(ubi, vol_id, lnum);
  719. ubi_free_vid_hdr(ubi, vid_hdr);
  720. return err;
  721. }
  722. err = ubi_wl_put_peb(ubi, pnum, 1);
  723. if (err || ++tries > UBI_IO_RETRIES) {
  724. ubi_ro_mode(ubi);
  725. leb_write_unlock(ubi, vol_id, lnum);
  726. ubi_free_vid_hdr(ubi, vid_hdr);
  727. return err;
  728. }
  729. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  730. ubi_msg("try another PEB");
  731. goto retry;
  732. }
  733. /*
  734. * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
  735. * @ubi: UBI device description object
  736. * @vol: volume description object
  737. * @lnum: logical eraseblock number
  738. * @buf: data to write
  739. * @len: how many bytes to write
  740. * @dtype: data type
  741. *
  742. * This function changes the contents of a logical eraseblock atomically. @buf
  743. * has to contain new logical eraseblock data, and @len - the length of the
  744. * data, which has to be aligned. This function guarantees that in case of an
  745. * unclean reboot the old contents is preserved. Returns zero in case of
  746. * success and a negative error code in case of failure.
  747. *
  748. * UBI reserves one LEB for the "atomic LEB change" operation, so only one
  749. * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  750. */
  751. int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
  752. int lnum, const void *buf, int len, int dtype)
  753. {
  754. int err, pnum, tries = 0, vol_id = vol->vol_id;
  755. struct ubi_vid_hdr *vid_hdr;
  756. uint32_t crc;
  757. if (ubi->ro_mode)
  758. return -EROFS;
  759. if (len == 0) {
  760. /*
  761. * Special case when data length is zero. In this case the LEB
  762. * has to be unmapped and mapped somewhere else.
  763. */
  764. err = ubi_eba_unmap_leb(ubi, vol, lnum);
  765. if (err)
  766. return err;
  767. return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
  768. }
  769. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  770. if (!vid_hdr)
  771. return -ENOMEM;
  772. mutex_lock(&ubi->alc_mutex);
  773. err = leb_write_lock(ubi, vol_id, lnum);
  774. if (err)
  775. goto out_mutex;
  776. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  777. vid_hdr->vol_id = cpu_to_be32(vol_id);
  778. vid_hdr->lnum = cpu_to_be32(lnum);
  779. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  780. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  781. crc = crc32(UBI_CRC32_INIT, buf, len);
  782. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  783. vid_hdr->data_size = cpu_to_be32(len);
  784. vid_hdr->copy_flag = 1;
  785. vid_hdr->data_crc = cpu_to_be32(crc);
  786. retry:
  787. pnum = ubi_wl_get_peb(ubi, dtype);
  788. if (pnum < 0) {
  789. err = pnum;
  790. goto out_leb_unlock;
  791. }
  792. dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
  793. vol_id, lnum, vol->eba_tbl[lnum], pnum);
  794. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  795. if (err) {
  796. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  797. vol_id, lnum, pnum);
  798. goto write_error;
  799. }
  800. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  801. if (err) {
  802. ubi_warn("failed to write %d bytes of data to PEB %d",
  803. len, pnum);
  804. goto write_error;
  805. }
  806. if (vol->eba_tbl[lnum] >= 0) {
  807. err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
  808. if (err)
  809. goto out_leb_unlock;
  810. }
  811. vol->eba_tbl[lnum] = pnum;
  812. out_leb_unlock:
  813. leb_write_unlock(ubi, vol_id, lnum);
  814. out_mutex:
  815. mutex_unlock(&ubi->alc_mutex);
  816. ubi_free_vid_hdr(ubi, vid_hdr);
  817. return err;
  818. write_error:
  819. if (err != -EIO || !ubi->bad_allowed) {
  820. /*
  821. * This flash device does not admit of bad eraseblocks or
  822. * something nasty and unexpected happened. Switch to read-only
  823. * mode just in case.
  824. */
  825. ubi_ro_mode(ubi);
  826. goto out_leb_unlock;
  827. }
  828. err = ubi_wl_put_peb(ubi, pnum, 1);
  829. if (err || ++tries > UBI_IO_RETRIES) {
  830. ubi_ro_mode(ubi);
  831. goto out_leb_unlock;
  832. }
  833. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  834. ubi_msg("try another PEB");
  835. goto retry;
  836. }
  837. /**
  838. * ubi_eba_copy_leb - copy logical eraseblock.
  839. * @ubi: UBI device description object
  840. * @from: physical eraseblock number from where to copy
  841. * @to: physical eraseblock number where to copy
  842. * @vid_hdr: VID header of the @from physical eraseblock
  843. *
  844. * This function copies logical eraseblock from physical eraseblock @from to
  845. * physical eraseblock @to. The @vid_hdr buffer may be changed by this
  846. * function. Returns:
  847. * o %0 in case of success;
  848. * o %1 if the operation was canceled and should be tried later (e.g.,
  849. * because a bit-flip was detected at the target PEB);
  850. * o %2 if the volume is being deleted and this LEB should not be moved.
  851. */
  852. int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
  853. struct ubi_vid_hdr *vid_hdr)
  854. {
  855. int err, vol_id, lnum, data_size, aldata_size, idx;
  856. struct ubi_volume *vol;
  857. uint32_t crc;
  858. vol_id = be32_to_cpu(vid_hdr->vol_id);
  859. lnum = be32_to_cpu(vid_hdr->lnum);
  860. dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
  861. if (vid_hdr->vol_type == UBI_VID_STATIC) {
  862. data_size = be32_to_cpu(vid_hdr->data_size);
  863. aldata_size = ALIGN(data_size, ubi->min_io_size);
  864. } else
  865. data_size = aldata_size =
  866. ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
  867. idx = vol_id2idx(ubi, vol_id);
  868. spin_lock(&ubi->volumes_lock);
  869. /*
  870. * Note, we may race with volume deletion, which means that the volume
  871. * this logical eraseblock belongs to might be being deleted. Since the
  872. * volume deletion unmaps all the volume's logical eraseblocks, it will
  873. * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
  874. */
  875. vol = ubi->volumes[idx];
  876. if (!vol) {
  877. /* No need to do further work, cancel */
  878. dbg_eba("volume %d is being removed, cancel", vol_id);
  879. spin_unlock(&ubi->volumes_lock);
  880. return 2;
  881. }
  882. spin_unlock(&ubi->volumes_lock);
  883. /*
  884. * We do not want anybody to write to this logical eraseblock while we
  885. * are moving it, so lock it.
  886. *
  887. * Note, we are using non-waiting locking here, because we cannot sleep
  888. * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
  889. * unmapping the LEB which is mapped to the PEB we are going to move
  890. * (@from). This task locks the LEB and goes sleep in the
  891. * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
  892. * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
  893. * LEB is already locked, we just do not move it and return %1.
  894. */
  895. err = leb_write_trylock(ubi, vol_id, lnum);
  896. if (err) {
  897. dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
  898. return err;
  899. }
  900. /*
  901. * The LEB might have been put meanwhile, and the task which put it is
  902. * probably waiting on @ubi->move_mutex. No need to continue the work,
  903. * cancel it.
  904. */
  905. if (vol->eba_tbl[lnum] != from) {
  906. dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
  907. "PEB %d, cancel", vol_id, lnum, from,
  908. vol->eba_tbl[lnum]);
  909. err = 1;
  910. goto out_unlock_leb;
  911. }
  912. /*
  913. * OK, now the LEB is locked and we can safely start moving iy. Since
  914. * this function utilizes thie @ubi->peb1_buf buffer which is shared
  915. * with some other functions, so lock the buffer by taking the
  916. * @ubi->buf_mutex.
  917. */
  918. mutex_lock(&ubi->buf_mutex);
  919. dbg_eba("read %d bytes of data", aldata_size);
  920. err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
  921. if (err && err != UBI_IO_BITFLIPS) {
  922. ubi_warn("error %d while reading data from PEB %d",
  923. err, from);
  924. goto out_unlock_buf;
  925. }
  926. /*
  927. * Now we have got to calculate how much data we have to to copy. In
  928. * case of a static volume it is fairly easy - the VID header contains
  929. * the data size. In case of a dynamic volume it is more difficult - we
  930. * have to read the contents, cut 0xFF bytes from the end and copy only
  931. * the first part. We must do this to avoid writing 0xFF bytes as it
  932. * may have some side-effects. And not only this. It is important not
  933. * to include those 0xFFs to CRC because later the they may be filled
  934. * by data.
  935. */
  936. if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
  937. aldata_size = data_size =
  938. ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
  939. cond_resched();
  940. crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
  941. cond_resched();
  942. /*
  943. * It may turn out to me that the whole @from physical eraseblock
  944. * contains only 0xFF bytes. Then we have to only write the VID header
  945. * and do not write any data. This also means we should not set
  946. * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
  947. */
  948. if (data_size > 0) {
  949. vid_hdr->copy_flag = 1;
  950. vid_hdr->data_size = cpu_to_be32(data_size);
  951. vid_hdr->data_crc = cpu_to_be32(crc);
  952. }
  953. vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
  954. err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
  955. if (err)
  956. goto out_unlock_buf;
  957. cond_resched();
  958. /* Read the VID header back and check if it was written correctly */
  959. err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
  960. if (err) {
  961. if (err != UBI_IO_BITFLIPS)
  962. ubi_warn("cannot read VID header back from PEB %d", to);
  963. else
  964. err = 1;
  965. goto out_unlock_buf;
  966. }
  967. if (data_size > 0) {
  968. err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
  969. if (err)
  970. goto out_unlock_buf;
  971. cond_resched();
  972. /*
  973. * We've written the data and are going to read it back to make
  974. * sure it was written correctly.
  975. */
  976. err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
  977. if (err) {
  978. if (err != UBI_IO_BITFLIPS)
  979. ubi_warn("cannot read data back from PEB %d",
  980. to);
  981. else
  982. err = 1;
  983. goto out_unlock_buf;
  984. }
  985. cond_resched();
  986. if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
  987. ubi_warn("read data back from PEB %d - it is different",
  988. to);
  989. goto out_unlock_buf;
  990. }
  991. }
  992. ubi_assert(vol->eba_tbl[lnum] == from);
  993. vol->eba_tbl[lnum] = to;
  994. out_unlock_buf:
  995. mutex_unlock(&ubi->buf_mutex);
  996. out_unlock_leb:
  997. leb_write_unlock(ubi, vol_id, lnum);
  998. return err;
  999. }
  1000. /**
  1001. * ubi_eba_init_scan - initialize the EBA unit using scanning information.
  1002. * @ubi: UBI device description object
  1003. * @si: scanning information
  1004. *
  1005. * This function returns zero in case of success and a negative error code in
  1006. * case of failure.
  1007. */
  1008. int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
  1009. {
  1010. int i, j, err, num_volumes;
  1011. struct ubi_scan_volume *sv;
  1012. struct ubi_volume *vol;
  1013. struct ubi_scan_leb *seb;
  1014. struct rb_node *rb;
  1015. dbg_eba("initialize EBA unit");
  1016. spin_lock_init(&ubi->ltree_lock);
  1017. mutex_init(&ubi->alc_mutex);
  1018. ubi->ltree = RB_ROOT;
  1019. ubi->global_sqnum = si->max_sqnum + 1;
  1020. num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1021. for (i = 0; i < num_volumes; i++) {
  1022. vol = ubi->volumes[i];
  1023. if (!vol)
  1024. continue;
  1025. cond_resched();
  1026. vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
  1027. GFP_KERNEL);
  1028. if (!vol->eba_tbl) {
  1029. err = -ENOMEM;
  1030. goto out_free;
  1031. }
  1032. for (j = 0; j < vol->reserved_pebs; j++)
  1033. vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
  1034. sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
  1035. if (!sv)
  1036. continue;
  1037. ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
  1038. if (seb->lnum >= vol->reserved_pebs)
  1039. /*
  1040. * This may happen in case of an unclean reboot
  1041. * during re-size.
  1042. */
  1043. ubi_scan_move_to_list(sv, seb, &si->erase);
  1044. vol->eba_tbl[seb->lnum] = seb->pnum;
  1045. }
  1046. }
  1047. if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
  1048. ubi_err("no enough physical eraseblocks (%d, need %d)",
  1049. ubi->avail_pebs, EBA_RESERVED_PEBS);
  1050. err = -ENOSPC;
  1051. goto out_free;
  1052. }
  1053. ubi->avail_pebs -= EBA_RESERVED_PEBS;
  1054. ubi->rsvd_pebs += EBA_RESERVED_PEBS;
  1055. if (ubi->bad_allowed) {
  1056. ubi_calculate_reserved(ubi);
  1057. if (ubi->avail_pebs < ubi->beb_rsvd_level) {
  1058. /* No enough free physical eraseblocks */
  1059. ubi->beb_rsvd_pebs = ubi->avail_pebs;
  1060. ubi_warn("cannot reserve enough PEBs for bad PEB "
  1061. "handling, reserved %d, need %d",
  1062. ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
  1063. } else
  1064. ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
  1065. ubi->avail_pebs -= ubi->beb_rsvd_pebs;
  1066. ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
  1067. }
  1068. dbg_eba("EBA unit is initialized");
  1069. return 0;
  1070. out_free:
  1071. for (i = 0; i < num_volumes; i++) {
  1072. if (!ubi->volumes[i])
  1073. continue;
  1074. kfree(ubi->volumes[i]->eba_tbl);
  1075. }
  1076. return err;
  1077. }
  1078. /**
  1079. * ubi_eba_close - close EBA unit.
  1080. * @ubi: UBI device description object
  1081. */
  1082. void ubi_eba_close(const struct ubi_device *ubi)
  1083. {
  1084. int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1085. dbg_eba("close EBA unit");
  1086. for (i = 0; i < num_volumes; i++) {
  1087. if (!ubi->volumes[i])
  1088. continue;
  1089. kfree(ubi->volumes[i]->eba_tbl);
  1090. }
  1091. }