cdev.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12. * the GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * Author: Artem Bityutskiy (Битюцкий Артём)
  19. */
  20. /*
  21. * This file includes implementation of UBI character device operations.
  22. *
  23. * There are two kinds of character devices in UBI: UBI character devices and
  24. * UBI volume character devices. UBI character devices allow users to
  25. * manipulate whole volumes: create, remove, and re-size them. Volume character
  26. * devices provide volume I/O capabilities.
  27. *
  28. * Major and minor numbers are assigned dynamically to both UBI and volume
  29. * character devices.
  30. *
  31. * Well, there is the third kind of character devices - the UBI control
  32. * character device, which allows to manipulate by UBI devices - create and
  33. * delete them. In other words, it is used for attaching and detaching MTD
  34. * devices.
  35. */
  36. #include <linux/module.h>
  37. #include <linux/stat.h>
  38. #include <linux/slab.h>
  39. #include <linux/ioctl.h>
  40. #include <linux/capability.h>
  41. #include <linux/uaccess.h>
  42. #include <linux/compat.h>
  43. #include <linux/math64.h>
  44. #include <mtd/ubi-user.h>
  45. #include "ubi.h"
  46. /**
  47. * get_exclusive - get exclusive access to an UBI volume.
  48. * @desc: volume descriptor
  49. *
  50. * This function changes UBI volume open mode to "exclusive". Returns previous
  51. * mode value (positive integer) in case of success and a negative error code
  52. * in case of failure.
  53. */
  54. static int get_exclusive(struct ubi_volume_desc *desc)
  55. {
  56. int users, err;
  57. struct ubi_volume *vol = desc->vol;
  58. spin_lock(&vol->ubi->volumes_lock);
  59. users = vol->readers + vol->writers + vol->exclusive;
  60. ubi_assert(users > 0);
  61. if (users > 1) {
  62. ubi_err("%d users for volume %d", users, vol->vol_id);
  63. err = -EBUSY;
  64. } else {
  65. vol->readers = vol->writers = 0;
  66. vol->exclusive = 1;
  67. err = desc->mode;
  68. desc->mode = UBI_EXCLUSIVE;
  69. }
  70. spin_unlock(&vol->ubi->volumes_lock);
  71. return err;
  72. }
  73. /**
  74. * revoke_exclusive - revoke exclusive mode.
  75. * @desc: volume descriptor
  76. * @mode: new mode to switch to
  77. */
  78. static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
  79. {
  80. struct ubi_volume *vol = desc->vol;
  81. spin_lock(&vol->ubi->volumes_lock);
  82. ubi_assert(vol->readers == 0 && vol->writers == 0);
  83. ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
  84. vol->exclusive = 0;
  85. if (mode == UBI_READONLY)
  86. vol->readers = 1;
  87. else if (mode == UBI_READWRITE)
  88. vol->writers = 1;
  89. else
  90. vol->exclusive = 1;
  91. spin_unlock(&vol->ubi->volumes_lock);
  92. desc->mode = mode;
  93. }
  94. static int vol_cdev_open(struct inode *inode, struct file *file)
  95. {
  96. struct ubi_volume_desc *desc;
  97. int vol_id = iminor(inode) - 1, mode, ubi_num;
  98. ubi_num = ubi_major2num(imajor(inode));
  99. if (ubi_num < 0)
  100. return ubi_num;
  101. if (file->f_mode & FMODE_WRITE)
  102. mode = UBI_READWRITE;
  103. else
  104. mode = UBI_READONLY;
  105. dbg_gen("open device %d, volume %d, mode %d",
  106. ubi_num, vol_id, mode);
  107. desc = ubi_open_volume(ubi_num, vol_id, mode);
  108. if (IS_ERR(desc))
  109. return PTR_ERR(desc);
  110. file->private_data = desc;
  111. return 0;
  112. }
  113. static int vol_cdev_release(struct inode *inode, struct file *file)
  114. {
  115. struct ubi_volume_desc *desc = file->private_data;
  116. struct ubi_volume *vol = desc->vol;
  117. dbg_gen("release device %d, volume %d, mode %d",
  118. vol->ubi->ubi_num, vol->vol_id, desc->mode);
  119. if (vol->updating) {
  120. ubi_warn("update of volume %d not finished, volume is damaged",
  121. vol->vol_id);
  122. ubi_assert(!vol->changing_leb);
  123. vol->updating = 0;
  124. vfree(vol->upd_buf);
  125. } else if (vol->changing_leb) {
  126. dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
  127. vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
  128. vol->vol_id);
  129. vol->changing_leb = 0;
  130. vfree(vol->upd_buf);
  131. }
  132. ubi_close_volume(desc);
  133. return 0;
  134. }
  135. static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
  136. {
  137. struct ubi_volume_desc *desc = file->private_data;
  138. struct ubi_volume *vol = desc->vol;
  139. if (vol->updating) {
  140. /* Update is in progress, seeking is prohibited */
  141. ubi_err("updating");
  142. return -EBUSY;
  143. }
  144. return fixed_size_llseek(file, offset, origin, vol->used_bytes);
  145. }
  146. static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
  147. int datasync)
  148. {
  149. struct ubi_volume_desc *desc = file->private_data;
  150. struct ubi_device *ubi = desc->vol->ubi;
  151. struct inode *inode = file_inode(file);
  152. int err;
  153. mutex_lock(&inode->i_mutex);
  154. err = ubi_sync(ubi->ubi_num);
  155. mutex_unlock(&inode->i_mutex);
  156. return err;
  157. }
  158. static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
  159. loff_t *offp)
  160. {
  161. struct ubi_volume_desc *desc = file->private_data;
  162. struct ubi_volume *vol = desc->vol;
  163. struct ubi_device *ubi = vol->ubi;
  164. int err, lnum, off, len, tbuf_size;
  165. size_t count_save = count;
  166. void *tbuf;
  167. dbg_gen("read %zd bytes from offset %lld of volume %d",
  168. count, *offp, vol->vol_id);
  169. if (vol->updating) {
  170. ubi_err("updating");
  171. return -EBUSY;
  172. }
  173. if (vol->upd_marker) {
  174. ubi_err("damaged volume, update marker is set");
  175. return -EBADF;
  176. }
  177. if (*offp == vol->used_bytes || count == 0)
  178. return 0;
  179. if (vol->corrupted)
  180. dbg_gen("read from corrupted volume %d", vol->vol_id);
  181. if (*offp + count > vol->used_bytes)
  182. count_save = count = vol->used_bytes - *offp;
  183. tbuf_size = vol->usable_leb_size;
  184. if (count < tbuf_size)
  185. tbuf_size = ALIGN(count, ubi->min_io_size);
  186. tbuf = vmalloc(tbuf_size);
  187. if (!tbuf)
  188. return -ENOMEM;
  189. len = count > tbuf_size ? tbuf_size : count;
  190. lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
  191. do {
  192. cond_resched();
  193. if (off + len >= vol->usable_leb_size)
  194. len = vol->usable_leb_size - off;
  195. err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
  196. if (err)
  197. break;
  198. off += len;
  199. if (off == vol->usable_leb_size) {
  200. lnum += 1;
  201. off -= vol->usable_leb_size;
  202. }
  203. count -= len;
  204. *offp += len;
  205. err = copy_to_user(buf, tbuf, len);
  206. if (err) {
  207. err = -EFAULT;
  208. break;
  209. }
  210. buf += len;
  211. len = count > tbuf_size ? tbuf_size : count;
  212. } while (count);
  213. vfree(tbuf);
  214. return err ? err : count_save - count;
  215. }
  216. /*
  217. * This function allows to directly write to dynamic UBI volumes, without
  218. * issuing the volume update operation.
  219. */
  220. static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
  221. size_t count, loff_t *offp)
  222. {
  223. struct ubi_volume_desc *desc = file->private_data;
  224. struct ubi_volume *vol = desc->vol;
  225. struct ubi_device *ubi = vol->ubi;
  226. int lnum, off, len, tbuf_size, err = 0;
  227. size_t count_save = count;
  228. char *tbuf;
  229. if (!vol->direct_writes)
  230. return -EPERM;
  231. dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
  232. count, *offp, vol->vol_id);
  233. if (vol->vol_type == UBI_STATIC_VOLUME)
  234. return -EROFS;
  235. lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
  236. if (off & (ubi->min_io_size - 1)) {
  237. ubi_err("unaligned position");
  238. return -EINVAL;
  239. }
  240. if (*offp + count > vol->used_bytes)
  241. count_save = count = vol->used_bytes - *offp;
  242. /* We can write only in fractions of the minimum I/O unit */
  243. if (count & (ubi->min_io_size - 1)) {
  244. ubi_err("unaligned write length");
  245. return -EINVAL;
  246. }
  247. tbuf_size = vol->usable_leb_size;
  248. if (count < tbuf_size)
  249. tbuf_size = ALIGN(count, ubi->min_io_size);
  250. tbuf = vmalloc(tbuf_size);
  251. if (!tbuf)
  252. return -ENOMEM;
  253. len = count > tbuf_size ? tbuf_size : count;
  254. while (count) {
  255. cond_resched();
  256. if (off + len >= vol->usable_leb_size)
  257. len = vol->usable_leb_size - off;
  258. err = copy_from_user(tbuf, buf, len);
  259. if (err) {
  260. err = -EFAULT;
  261. break;
  262. }
  263. err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
  264. if (err)
  265. break;
  266. off += len;
  267. if (off == vol->usable_leb_size) {
  268. lnum += 1;
  269. off -= vol->usable_leb_size;
  270. }
  271. count -= len;
  272. *offp += len;
  273. buf += len;
  274. len = count > tbuf_size ? tbuf_size : count;
  275. }
  276. vfree(tbuf);
  277. return err ? err : count_save - count;
  278. }
  279. static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
  280. size_t count, loff_t *offp)
  281. {
  282. int err = 0;
  283. struct ubi_volume_desc *desc = file->private_data;
  284. struct ubi_volume *vol = desc->vol;
  285. struct ubi_device *ubi = vol->ubi;
  286. if (!vol->updating && !vol->changing_leb)
  287. return vol_cdev_direct_write(file, buf, count, offp);
  288. if (vol->updating)
  289. err = ubi_more_update_data(ubi, vol, buf, count);
  290. else
  291. err = ubi_more_leb_change_data(ubi, vol, buf, count);
  292. if (err < 0) {
  293. ubi_err("cannot accept more %zd bytes of data, error %d",
  294. count, err);
  295. return err;
  296. }
  297. if (err) {
  298. /*
  299. * The operation is finished, @err contains number of actually
  300. * written bytes.
  301. */
  302. count = err;
  303. if (vol->changing_leb) {
  304. revoke_exclusive(desc, UBI_READWRITE);
  305. return count;
  306. }
  307. err = ubi_check_volume(ubi, vol->vol_id);
  308. if (err < 0)
  309. return err;
  310. if (err) {
  311. ubi_warn("volume %d on UBI device %d is corrupted",
  312. vol->vol_id, ubi->ubi_num);
  313. vol->corrupted = 1;
  314. }
  315. vol->checked = 1;
  316. ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
  317. revoke_exclusive(desc, UBI_READWRITE);
  318. }
  319. return count;
  320. }
  321. static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
  322. unsigned long arg)
  323. {
  324. int err = 0;
  325. struct ubi_volume_desc *desc = file->private_data;
  326. struct ubi_volume *vol = desc->vol;
  327. struct ubi_device *ubi = vol->ubi;
  328. void __user *argp = (void __user *)arg;
  329. switch (cmd) {
  330. /* Volume update command */
  331. case UBI_IOCVOLUP:
  332. {
  333. int64_t bytes, rsvd_bytes;
  334. if (!capable(CAP_SYS_RESOURCE)) {
  335. err = -EPERM;
  336. break;
  337. }
  338. err = copy_from_user(&bytes, argp, sizeof(int64_t));
  339. if (err) {
  340. err = -EFAULT;
  341. break;
  342. }
  343. if (desc->mode == UBI_READONLY) {
  344. err = -EROFS;
  345. break;
  346. }
  347. rsvd_bytes = (long long)vol->reserved_pebs *
  348. ubi->leb_size-vol->data_pad;
  349. if (bytes < 0 || bytes > rsvd_bytes) {
  350. err = -EINVAL;
  351. break;
  352. }
  353. err = get_exclusive(desc);
  354. if (err < 0)
  355. break;
  356. err = ubi_start_update(ubi, vol, bytes);
  357. if (bytes == 0)
  358. revoke_exclusive(desc, UBI_READWRITE);
  359. break;
  360. }
  361. /* Atomic logical eraseblock change command */
  362. case UBI_IOCEBCH:
  363. {
  364. struct ubi_leb_change_req req;
  365. err = copy_from_user(&req, argp,
  366. sizeof(struct ubi_leb_change_req));
  367. if (err) {
  368. err = -EFAULT;
  369. break;
  370. }
  371. if (desc->mode == UBI_READONLY ||
  372. vol->vol_type == UBI_STATIC_VOLUME) {
  373. err = -EROFS;
  374. break;
  375. }
  376. /* Validate the request */
  377. err = -EINVAL;
  378. if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
  379. req.bytes < 0 || req.lnum >= vol->usable_leb_size)
  380. break;
  381. err = get_exclusive(desc);
  382. if (err < 0)
  383. break;
  384. err = ubi_start_leb_change(ubi, vol, &req);
  385. if (req.bytes == 0)
  386. revoke_exclusive(desc, UBI_READWRITE);
  387. break;
  388. }
  389. /* Logical eraseblock erasure command */
  390. case UBI_IOCEBER:
  391. {
  392. int32_t lnum;
  393. err = get_user(lnum, (__user int32_t *)argp);
  394. if (err) {
  395. err = -EFAULT;
  396. break;
  397. }
  398. if (desc->mode == UBI_READONLY ||
  399. vol->vol_type == UBI_STATIC_VOLUME) {
  400. err = -EROFS;
  401. break;
  402. }
  403. if (lnum < 0 || lnum >= vol->reserved_pebs) {
  404. err = -EINVAL;
  405. break;
  406. }
  407. dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
  408. err = ubi_eba_unmap_leb(ubi, vol, lnum);
  409. if (err)
  410. break;
  411. err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
  412. break;
  413. }
  414. /* Logical eraseblock map command */
  415. case UBI_IOCEBMAP:
  416. {
  417. struct ubi_map_req req;
  418. err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
  419. if (err) {
  420. err = -EFAULT;
  421. break;
  422. }
  423. err = ubi_leb_map(desc, req.lnum);
  424. break;
  425. }
  426. /* Logical eraseblock un-map command */
  427. case UBI_IOCEBUNMAP:
  428. {
  429. int32_t lnum;
  430. err = get_user(lnum, (__user int32_t *)argp);
  431. if (err) {
  432. err = -EFAULT;
  433. break;
  434. }
  435. err = ubi_leb_unmap(desc, lnum);
  436. break;
  437. }
  438. /* Check if logical eraseblock is mapped command */
  439. case UBI_IOCEBISMAP:
  440. {
  441. int32_t lnum;
  442. err = get_user(lnum, (__user int32_t *)argp);
  443. if (err) {
  444. err = -EFAULT;
  445. break;
  446. }
  447. err = ubi_is_mapped(desc, lnum);
  448. break;
  449. }
  450. /* Set volume property command */
  451. case UBI_IOCSETVOLPROP:
  452. {
  453. struct ubi_set_vol_prop_req req;
  454. err = copy_from_user(&req, argp,
  455. sizeof(struct ubi_set_vol_prop_req));
  456. if (err) {
  457. err = -EFAULT;
  458. break;
  459. }
  460. switch (req.property) {
  461. case UBI_VOL_PROP_DIRECT_WRITE:
  462. mutex_lock(&ubi->device_mutex);
  463. desc->vol->direct_writes = !!req.value;
  464. mutex_unlock(&ubi->device_mutex);
  465. break;
  466. default:
  467. err = -EINVAL;
  468. break;
  469. }
  470. break;
  471. }
  472. default:
  473. err = -ENOTTY;
  474. break;
  475. }
  476. return err;
  477. }
  478. /**
  479. * verify_mkvol_req - verify volume creation request.
  480. * @ubi: UBI device description object
  481. * @req: the request to check
  482. *
  483. * This function zero if the request is correct, and %-EINVAL if not.
  484. */
  485. static int verify_mkvol_req(const struct ubi_device *ubi,
  486. const struct ubi_mkvol_req *req)
  487. {
  488. int n, err = -EINVAL;
  489. if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
  490. req->name_len < 0)
  491. goto bad;
  492. if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
  493. req->vol_id != UBI_VOL_NUM_AUTO)
  494. goto bad;
  495. if (req->alignment == 0)
  496. goto bad;
  497. if (req->bytes == 0)
  498. goto bad;
  499. if (req->vol_type != UBI_DYNAMIC_VOLUME &&
  500. req->vol_type != UBI_STATIC_VOLUME)
  501. goto bad;
  502. if (req->alignment > ubi->leb_size)
  503. goto bad;
  504. n = req->alignment & (ubi->min_io_size - 1);
  505. if (req->alignment != 1 && n)
  506. goto bad;
  507. if (!req->name[0] || !req->name_len)
  508. goto bad;
  509. if (req->name_len > UBI_VOL_NAME_MAX) {
  510. err = -ENAMETOOLONG;
  511. goto bad;
  512. }
  513. n = strnlen(req->name, req->name_len + 1);
  514. if (n != req->name_len)
  515. goto bad;
  516. return 0;
  517. bad:
  518. ubi_err("bad volume creation request");
  519. ubi_dump_mkvol_req(req);
  520. return err;
  521. }
  522. /**
  523. * verify_rsvol_req - verify volume re-size request.
  524. * @ubi: UBI device description object
  525. * @req: the request to check
  526. *
  527. * This function returns zero if the request is correct, and %-EINVAL if not.
  528. */
  529. static int verify_rsvol_req(const struct ubi_device *ubi,
  530. const struct ubi_rsvol_req *req)
  531. {
  532. if (req->bytes <= 0)
  533. return -EINVAL;
  534. if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
  535. return -EINVAL;
  536. return 0;
  537. }
  538. /**
  539. * rename_volumes - rename UBI volumes.
  540. * @ubi: UBI device description object
  541. * @req: volumes re-name request
  542. *
  543. * This is a helper function for the volume re-name IOCTL which validates the
  544. * the request, opens the volume and calls corresponding volumes management
  545. * function. Returns zero in case of success and a negative error code in case
  546. * of failure.
  547. */
  548. static int rename_volumes(struct ubi_device *ubi,
  549. struct ubi_rnvol_req *req)
  550. {
  551. int i, n, err;
  552. struct list_head rename_list;
  553. struct ubi_rename_entry *re, *re1;
  554. if (req->count < 0 || req->count > UBI_MAX_RNVOL)
  555. return -EINVAL;
  556. if (req->count == 0)
  557. return 0;
  558. /* Validate volume IDs and names in the request */
  559. for (i = 0; i < req->count; i++) {
  560. if (req->ents[i].vol_id < 0 ||
  561. req->ents[i].vol_id >= ubi->vtbl_slots)
  562. return -EINVAL;
  563. if (req->ents[i].name_len < 0)
  564. return -EINVAL;
  565. if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
  566. return -ENAMETOOLONG;
  567. req->ents[i].name[req->ents[i].name_len] = '\0';
  568. n = strlen(req->ents[i].name);
  569. if (n != req->ents[i].name_len)
  570. err = -EINVAL;
  571. }
  572. /* Make sure volume IDs and names are unique */
  573. for (i = 0; i < req->count - 1; i++) {
  574. for (n = i + 1; n < req->count; n++) {
  575. if (req->ents[i].vol_id == req->ents[n].vol_id) {
  576. ubi_err("duplicated volume id %d",
  577. req->ents[i].vol_id);
  578. return -EINVAL;
  579. }
  580. if (!strcmp(req->ents[i].name, req->ents[n].name)) {
  581. ubi_err("duplicated volume name \"%s\"",
  582. req->ents[i].name);
  583. return -EINVAL;
  584. }
  585. }
  586. }
  587. /* Create the re-name list */
  588. INIT_LIST_HEAD(&rename_list);
  589. for (i = 0; i < req->count; i++) {
  590. int vol_id = req->ents[i].vol_id;
  591. int name_len = req->ents[i].name_len;
  592. const char *name = req->ents[i].name;
  593. re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
  594. if (!re) {
  595. err = -ENOMEM;
  596. goto out_free;
  597. }
  598. re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
  599. if (IS_ERR(re->desc)) {
  600. err = PTR_ERR(re->desc);
  601. ubi_err("cannot open volume %d, error %d", vol_id, err);
  602. kfree(re);
  603. goto out_free;
  604. }
  605. /* Skip this re-naming if the name does not really change */
  606. if (re->desc->vol->name_len == name_len &&
  607. !memcmp(re->desc->vol->name, name, name_len)) {
  608. ubi_close_volume(re->desc);
  609. kfree(re);
  610. continue;
  611. }
  612. re->new_name_len = name_len;
  613. memcpy(re->new_name, name, name_len);
  614. list_add_tail(&re->list, &rename_list);
  615. dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
  616. vol_id, re->desc->vol->name, name);
  617. }
  618. if (list_empty(&rename_list))
  619. return 0;
  620. /* Find out the volumes which have to be removed */
  621. list_for_each_entry(re, &rename_list, list) {
  622. struct ubi_volume_desc *desc;
  623. int no_remove_needed = 0;
  624. /*
  625. * Volume @re->vol_id is going to be re-named to
  626. * @re->new_name, while its current name is @name. If a volume
  627. * with name @re->new_name currently exists, it has to be
  628. * removed, unless it is also re-named in the request (@req).
  629. */
  630. list_for_each_entry(re1, &rename_list, list) {
  631. if (re->new_name_len == re1->desc->vol->name_len &&
  632. !memcmp(re->new_name, re1->desc->vol->name,
  633. re1->desc->vol->name_len)) {
  634. no_remove_needed = 1;
  635. break;
  636. }
  637. }
  638. if (no_remove_needed)
  639. continue;
  640. /*
  641. * It seems we need to remove volume with name @re->new_name,
  642. * if it exists.
  643. */
  644. desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
  645. UBI_EXCLUSIVE);
  646. if (IS_ERR(desc)) {
  647. err = PTR_ERR(desc);
  648. if (err == -ENODEV)
  649. /* Re-naming into a non-existing volume name */
  650. continue;
  651. /* The volume exists but busy, or an error occurred */
  652. ubi_err("cannot open volume \"%s\", error %d",
  653. re->new_name, err);
  654. goto out_free;
  655. }
  656. re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
  657. if (!re1) {
  658. err = -ENOMEM;
  659. ubi_close_volume(desc);
  660. goto out_free;
  661. }
  662. re1->remove = 1;
  663. re1->desc = desc;
  664. list_add(&re1->list, &rename_list);
  665. dbg_gen("will remove volume %d, name \"%s\"",
  666. re1->desc->vol->vol_id, re1->desc->vol->name);
  667. }
  668. mutex_lock(&ubi->device_mutex);
  669. err = ubi_rename_volumes(ubi, &rename_list);
  670. mutex_unlock(&ubi->device_mutex);
  671. out_free:
  672. list_for_each_entry_safe(re, re1, &rename_list, list) {
  673. ubi_close_volume(re->desc);
  674. list_del(&re->list);
  675. kfree(re);
  676. }
  677. return err;
  678. }
  679. static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
  680. unsigned long arg)
  681. {
  682. int err = 0;
  683. struct ubi_device *ubi;
  684. struct ubi_volume_desc *desc;
  685. void __user *argp = (void __user *)arg;
  686. if (!capable(CAP_SYS_RESOURCE))
  687. return -EPERM;
  688. ubi = ubi_get_by_major(imajor(file->f_mapping->host));
  689. if (!ubi)
  690. return -ENODEV;
  691. switch (cmd) {
  692. /* Create volume command */
  693. case UBI_IOCMKVOL:
  694. {
  695. struct ubi_mkvol_req req;
  696. dbg_gen("create volume");
  697. err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
  698. if (err) {
  699. err = -EFAULT;
  700. break;
  701. }
  702. err = verify_mkvol_req(ubi, &req);
  703. if (err)
  704. break;
  705. mutex_lock(&ubi->device_mutex);
  706. err = ubi_create_volume(ubi, &req);
  707. mutex_unlock(&ubi->device_mutex);
  708. if (err)
  709. break;
  710. err = put_user(req.vol_id, (__user int32_t *)argp);
  711. if (err)
  712. err = -EFAULT;
  713. break;
  714. }
  715. /* Remove volume command */
  716. case UBI_IOCRMVOL:
  717. {
  718. int vol_id;
  719. dbg_gen("remove volume");
  720. err = get_user(vol_id, (__user int32_t *)argp);
  721. if (err) {
  722. err = -EFAULT;
  723. break;
  724. }
  725. desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
  726. if (IS_ERR(desc)) {
  727. err = PTR_ERR(desc);
  728. break;
  729. }
  730. mutex_lock(&ubi->device_mutex);
  731. err = ubi_remove_volume(desc, 0);
  732. mutex_unlock(&ubi->device_mutex);
  733. /*
  734. * The volume is deleted (unless an error occurred), and the
  735. * 'struct ubi_volume' object will be freed when
  736. * 'ubi_close_volume()' will call 'put_device()'.
  737. */
  738. ubi_close_volume(desc);
  739. break;
  740. }
  741. /* Re-size volume command */
  742. case UBI_IOCRSVOL:
  743. {
  744. int pebs;
  745. struct ubi_rsvol_req req;
  746. dbg_gen("re-size volume");
  747. err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
  748. if (err) {
  749. err = -EFAULT;
  750. break;
  751. }
  752. err = verify_rsvol_req(ubi, &req);
  753. if (err)
  754. break;
  755. desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
  756. if (IS_ERR(desc)) {
  757. err = PTR_ERR(desc);
  758. break;
  759. }
  760. pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
  761. desc->vol->usable_leb_size);
  762. mutex_lock(&ubi->device_mutex);
  763. err = ubi_resize_volume(desc, pebs);
  764. mutex_unlock(&ubi->device_mutex);
  765. ubi_close_volume(desc);
  766. break;
  767. }
  768. /* Re-name volumes command */
  769. case UBI_IOCRNVOL:
  770. {
  771. struct ubi_rnvol_req *req;
  772. dbg_gen("re-name volumes");
  773. req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
  774. if (!req) {
  775. err = -ENOMEM;
  776. break;
  777. };
  778. err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
  779. if (err) {
  780. err = -EFAULT;
  781. kfree(req);
  782. break;
  783. }
  784. err = rename_volumes(ubi, req);
  785. kfree(req);
  786. break;
  787. }
  788. default:
  789. err = -ENOTTY;
  790. break;
  791. }
  792. ubi_put_device(ubi);
  793. return err;
  794. }
  795. static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
  796. unsigned long arg)
  797. {
  798. int err = 0;
  799. void __user *argp = (void __user *)arg;
  800. if (!capable(CAP_SYS_RESOURCE))
  801. return -EPERM;
  802. switch (cmd) {
  803. /* Attach an MTD device command */
  804. case UBI_IOCATT:
  805. {
  806. struct ubi_attach_req req;
  807. struct mtd_info *mtd;
  808. dbg_gen("attach MTD device");
  809. err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
  810. if (err) {
  811. err = -EFAULT;
  812. break;
  813. }
  814. if (req.mtd_num < 0 ||
  815. (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
  816. err = -EINVAL;
  817. break;
  818. }
  819. mtd = get_mtd_device(NULL, req.mtd_num);
  820. if (IS_ERR(mtd)) {
  821. err = PTR_ERR(mtd);
  822. break;
  823. }
  824. /*
  825. * Note, further request verification is done by
  826. * 'ubi_attach_mtd_dev()'.
  827. */
  828. mutex_lock(&ubi_devices_mutex);
  829. err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
  830. req.max_beb_per1024);
  831. mutex_unlock(&ubi_devices_mutex);
  832. if (err < 0)
  833. put_mtd_device(mtd);
  834. else
  835. /* @err contains UBI device number */
  836. err = put_user(err, (__user int32_t *)argp);
  837. break;
  838. }
  839. /* Detach an MTD device command */
  840. case UBI_IOCDET:
  841. {
  842. int ubi_num;
  843. dbg_gen("detach MTD device");
  844. err = get_user(ubi_num, (__user int32_t *)argp);
  845. if (err) {
  846. err = -EFAULT;
  847. break;
  848. }
  849. mutex_lock(&ubi_devices_mutex);
  850. err = ubi_detach_mtd_dev(ubi_num, 0);
  851. mutex_unlock(&ubi_devices_mutex);
  852. break;
  853. }
  854. default:
  855. err = -ENOTTY;
  856. break;
  857. }
  858. return err;
  859. }
  860. #ifdef CONFIG_COMPAT
  861. static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
  862. unsigned long arg)
  863. {
  864. unsigned long translated_arg = (unsigned long)compat_ptr(arg);
  865. return vol_cdev_ioctl(file, cmd, translated_arg);
  866. }
  867. static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
  868. unsigned long arg)
  869. {
  870. unsigned long translated_arg = (unsigned long)compat_ptr(arg);
  871. return ubi_cdev_ioctl(file, cmd, translated_arg);
  872. }
  873. static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
  874. unsigned long arg)
  875. {
  876. unsigned long translated_arg = (unsigned long)compat_ptr(arg);
  877. return ctrl_cdev_ioctl(file, cmd, translated_arg);
  878. }
  879. #else
  880. #define vol_cdev_compat_ioctl NULL
  881. #define ubi_cdev_compat_ioctl NULL
  882. #define ctrl_cdev_compat_ioctl NULL
  883. #endif
  884. /* UBI volume character device operations */
  885. const struct file_operations ubi_vol_cdev_operations = {
  886. .owner = THIS_MODULE,
  887. .open = vol_cdev_open,
  888. .release = vol_cdev_release,
  889. .llseek = vol_cdev_llseek,
  890. .read = vol_cdev_read,
  891. .write = vol_cdev_write,
  892. .fsync = vol_cdev_fsync,
  893. .unlocked_ioctl = vol_cdev_ioctl,
  894. .compat_ioctl = vol_cdev_compat_ioctl,
  895. };
  896. /* UBI character device operations */
  897. const struct file_operations ubi_cdev_operations = {
  898. .owner = THIS_MODULE,
  899. .llseek = no_llseek,
  900. .unlocked_ioctl = ubi_cdev_ioctl,
  901. .compat_ioctl = ubi_cdev_compat_ioctl,
  902. };
  903. /* UBI control character device operations */
  904. const struct file_operations ubi_ctrl_cdev_operations = {
  905. .owner = THIS_MODULE,
  906. .unlocked_ioctl = ctrl_cdev_ioctl,
  907. .compat_ioctl = ctrl_cdev_compat_ioctl,
  908. .llseek = no_llseek,
  909. };