mtdchar.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * $Id: mtdchar.c,v 1.67 2005/02/08 17:45:51 nico Exp $
  3. *
  4. * Character-device access to raw MTD devices.
  5. *
  6. */
  7. #include <linux/config.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/mtd/mtd.h>
  11. #include <linux/mtd/compatmac.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/fs.h>
  15. #include <asm/uaccess.h>
  16. #ifdef CONFIG_DEVFS_FS
  17. #include <linux/devfs_fs_kernel.h>
  18. static void mtd_notify_add(struct mtd_info* mtd)
  19. {
  20. if (!mtd)
  21. return;
  22. devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
  23. S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
  24. devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
  25. S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index);
  26. }
  27. static void mtd_notify_remove(struct mtd_info* mtd)
  28. {
  29. if (!mtd)
  30. return;
  31. devfs_remove("mtd/%d", mtd->index);
  32. devfs_remove("mtd/%dro", mtd->index);
  33. }
  34. static struct mtd_notifier notifier = {
  35. .add = mtd_notify_add,
  36. .remove = mtd_notify_remove,
  37. };
  38. static inline void mtdchar_devfs_init(void)
  39. {
  40. devfs_mk_dir("mtd");
  41. register_mtd_user(&notifier);
  42. }
  43. static inline void mtdchar_devfs_exit(void)
  44. {
  45. unregister_mtd_user(&notifier);
  46. devfs_remove("mtd");
  47. }
  48. #else /* !DEVFS */
  49. #define mtdchar_devfs_init() do { } while(0)
  50. #define mtdchar_devfs_exit() do { } while(0)
  51. #endif
  52. /* Well... let's abuse the unused bits in file->f_mode for those */
  53. #define MTD_MODE_OTP_FACT 0x1000
  54. #define MTD_MODE_OTP_USER 0x2000
  55. #define MTD_MODE_MASK 0xf000
  56. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  57. {
  58. struct mtd_info *mtd = file->private_data;
  59. switch (orig) {
  60. case 0:
  61. /* SEEK_SET */
  62. file->f_pos = offset;
  63. break;
  64. case 1:
  65. /* SEEK_CUR */
  66. file->f_pos += offset;
  67. break;
  68. case 2:
  69. /* SEEK_END */
  70. file->f_pos =mtd->size + offset;
  71. break;
  72. default:
  73. return -EINVAL;
  74. }
  75. if (file->f_pos < 0)
  76. file->f_pos = 0;
  77. else if (file->f_pos >= mtd->size)
  78. file->f_pos = mtd->size - 1;
  79. return file->f_pos;
  80. }
  81. static int mtd_open(struct inode *inode, struct file *file)
  82. {
  83. int minor = iminor(inode);
  84. int devnum = minor >> 1;
  85. struct mtd_info *mtd;
  86. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  87. if (devnum >= MAX_MTD_DEVICES)
  88. return -ENODEV;
  89. /* You can't open the RO devices RW */
  90. if ((file->f_mode & 2) && (minor & 1))
  91. return -EACCES;
  92. /* make sure the locally abused bits are initialy clear */
  93. if (file->f_mode & MTD_MODE_MASK)
  94. return -EWOULDBLOCK;
  95. mtd = get_mtd_device(NULL, devnum);
  96. if (!mtd)
  97. return -ENODEV;
  98. if (MTD_ABSENT == mtd->type) {
  99. put_mtd_device(mtd);
  100. return -ENODEV;
  101. }
  102. file->private_data = mtd;
  103. /* You can't open it RW if it's not a writeable device */
  104. if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
  105. put_mtd_device(mtd);
  106. return -EACCES;
  107. }
  108. return 0;
  109. } /* mtd_open */
  110. /*====================================================================*/
  111. static int mtd_close(struct inode *inode, struct file *file)
  112. {
  113. struct mtd_info *mtd;
  114. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  115. mtd = file->private_data;
  116. if (mtd->sync)
  117. mtd->sync(mtd);
  118. put_mtd_device(mtd);
  119. return 0;
  120. } /* mtd_close */
  121. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  122. userspace buffer down and use it directly with readv/writev.
  123. */
  124. #define MAX_KMALLOC_SIZE 0x20000
  125. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  126. {
  127. struct mtd_info *mtd = file->private_data;
  128. size_t retlen=0;
  129. size_t total_retlen=0;
  130. int ret=0;
  131. int len;
  132. char *kbuf;
  133. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  134. if (*ppos + count > mtd->size)
  135. count = mtd->size - *ppos;
  136. if (!count)
  137. return 0;
  138. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  139. and pass them directly to the MTD functions */
  140. while (count) {
  141. if (count > MAX_KMALLOC_SIZE)
  142. len = MAX_KMALLOC_SIZE;
  143. else
  144. len = count;
  145. kbuf=kmalloc(len,GFP_KERNEL);
  146. if (!kbuf)
  147. return -ENOMEM;
  148. switch (file->f_mode & MTD_MODE_MASK) {
  149. case MTD_MODE_OTP_FACT:
  150. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  151. break;
  152. case MTD_MODE_OTP_USER:
  153. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  154. break;
  155. default:
  156. ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
  157. }
  158. /* Nand returns -EBADMSG on ecc errors, but it returns
  159. * the data. For our userspace tools it is important
  160. * to dump areas with ecc errors !
  161. * Userspace software which accesses NAND this way
  162. * must be aware of the fact that it deals with NAND
  163. */
  164. if (!ret || (ret == -EBADMSG)) {
  165. *ppos += retlen;
  166. if (copy_to_user(buf, kbuf, retlen)) {
  167. kfree(kbuf);
  168. return -EFAULT;
  169. }
  170. else
  171. total_retlen += retlen;
  172. count -= retlen;
  173. buf += retlen;
  174. if (retlen == 0)
  175. count = 0;
  176. }
  177. else {
  178. kfree(kbuf);
  179. return ret;
  180. }
  181. kfree(kbuf);
  182. }
  183. return total_retlen;
  184. } /* mtd_read */
  185. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  186. {
  187. struct mtd_info *mtd = file->private_data;
  188. char *kbuf;
  189. size_t retlen;
  190. size_t total_retlen=0;
  191. int ret=0;
  192. int len;
  193. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  194. if (*ppos == mtd->size)
  195. return -ENOSPC;
  196. if (*ppos + count > mtd->size)
  197. count = mtd->size - *ppos;
  198. if (!count)
  199. return 0;
  200. while (count) {
  201. if (count > MAX_KMALLOC_SIZE)
  202. len = MAX_KMALLOC_SIZE;
  203. else
  204. len = count;
  205. kbuf=kmalloc(len,GFP_KERNEL);
  206. if (!kbuf) {
  207. printk("kmalloc is null\n");
  208. return -ENOMEM;
  209. }
  210. if (copy_from_user(kbuf, buf, len)) {
  211. kfree(kbuf);
  212. return -EFAULT;
  213. }
  214. switch (file->f_mode & MTD_MODE_MASK) {
  215. case MTD_MODE_OTP_FACT:
  216. ret = -EROFS;
  217. break;
  218. case MTD_MODE_OTP_USER:
  219. if (!mtd->write_user_prot_reg) {
  220. ret = -EOPNOTSUPP;
  221. break;
  222. }
  223. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  224. break;
  225. default:
  226. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  227. }
  228. if (!ret) {
  229. *ppos += retlen;
  230. total_retlen += retlen;
  231. count -= retlen;
  232. buf += retlen;
  233. }
  234. else {
  235. kfree(kbuf);
  236. return ret;
  237. }
  238. kfree(kbuf);
  239. }
  240. return total_retlen;
  241. } /* mtd_write */
  242. /*======================================================================
  243. IOCTL calls for getting device parameters.
  244. ======================================================================*/
  245. static void mtdchar_erase_callback (struct erase_info *instr)
  246. {
  247. wake_up((wait_queue_head_t *)instr->priv);
  248. }
  249. static int mtd_ioctl(struct inode *inode, struct file *file,
  250. u_int cmd, u_long arg)
  251. {
  252. struct mtd_info *mtd = file->private_data;
  253. void __user *argp = (void __user *)arg;
  254. int ret = 0;
  255. u_long size;
  256. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  257. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  258. if (cmd & IOC_IN) {
  259. if (!access_ok(VERIFY_READ, argp, size))
  260. return -EFAULT;
  261. }
  262. if (cmd & IOC_OUT) {
  263. if (!access_ok(VERIFY_WRITE, argp, size))
  264. return -EFAULT;
  265. }
  266. switch (cmd) {
  267. case MEMGETREGIONCOUNT:
  268. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  269. return -EFAULT;
  270. break;
  271. case MEMGETREGIONINFO:
  272. {
  273. struct region_info_user ur;
  274. if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
  275. return -EFAULT;
  276. if (ur.regionindex >= mtd->numeraseregions)
  277. return -EINVAL;
  278. if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
  279. sizeof(struct mtd_erase_region_info)))
  280. return -EFAULT;
  281. break;
  282. }
  283. case MEMGETINFO:
  284. if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
  285. return -EFAULT;
  286. break;
  287. case MEMERASE:
  288. {
  289. struct erase_info *erase;
  290. if(!(file->f_mode & 2))
  291. return -EPERM;
  292. erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
  293. if (!erase)
  294. ret = -ENOMEM;
  295. else {
  296. wait_queue_head_t waitq;
  297. DECLARE_WAITQUEUE(wait, current);
  298. init_waitqueue_head(&waitq);
  299. memset (erase,0,sizeof(struct erase_info));
  300. if (copy_from_user(&erase->addr, argp,
  301. sizeof(struct erase_info_user))) {
  302. kfree(erase);
  303. return -EFAULT;
  304. }
  305. erase->mtd = mtd;
  306. erase->callback = mtdchar_erase_callback;
  307. erase->priv = (unsigned long)&waitq;
  308. /*
  309. FIXME: Allow INTERRUPTIBLE. Which means
  310. not having the wait_queue head on the stack.
  311. If the wq_head is on the stack, and we
  312. leave because we got interrupted, then the
  313. wq_head is no longer there when the
  314. callback routine tries to wake us up.
  315. */
  316. ret = mtd->erase(mtd, erase);
  317. if (!ret) {
  318. set_current_state(TASK_UNINTERRUPTIBLE);
  319. add_wait_queue(&waitq, &wait);
  320. if (erase->state != MTD_ERASE_DONE &&
  321. erase->state != MTD_ERASE_FAILED)
  322. schedule();
  323. remove_wait_queue(&waitq, &wait);
  324. set_current_state(TASK_RUNNING);
  325. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  326. }
  327. kfree(erase);
  328. }
  329. break;
  330. }
  331. case MEMWRITEOOB:
  332. {
  333. struct mtd_oob_buf buf;
  334. void *databuf;
  335. ssize_t retlen;
  336. if(!(file->f_mode & 2))
  337. return -EPERM;
  338. if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
  339. return -EFAULT;
  340. if (buf.length > 0x4096)
  341. return -EINVAL;
  342. if (!mtd->write_oob)
  343. ret = -EOPNOTSUPP;
  344. else
  345. ret = access_ok(VERIFY_READ, buf.ptr,
  346. buf.length) ? 0 : EFAULT;
  347. if (ret)
  348. return ret;
  349. databuf = kmalloc(buf.length, GFP_KERNEL);
  350. if (!databuf)
  351. return -ENOMEM;
  352. if (copy_from_user(databuf, buf.ptr, buf.length)) {
  353. kfree(databuf);
  354. return -EFAULT;
  355. }
  356. ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
  357. if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
  358. ret = -EFAULT;
  359. kfree(databuf);
  360. break;
  361. }
  362. case MEMREADOOB:
  363. {
  364. struct mtd_oob_buf buf;
  365. void *databuf;
  366. ssize_t retlen;
  367. if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
  368. return -EFAULT;
  369. if (buf.length > 0x4096)
  370. return -EINVAL;
  371. if (!mtd->read_oob)
  372. ret = -EOPNOTSUPP;
  373. else
  374. ret = access_ok(VERIFY_WRITE, buf.ptr,
  375. buf.length) ? 0 : -EFAULT;
  376. if (ret)
  377. return ret;
  378. databuf = kmalloc(buf.length, GFP_KERNEL);
  379. if (!databuf)
  380. return -ENOMEM;
  381. ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
  382. if (put_user(retlen, (uint32_t __user *)argp))
  383. ret = -EFAULT;
  384. else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
  385. ret = -EFAULT;
  386. kfree(databuf);
  387. break;
  388. }
  389. case MEMLOCK:
  390. {
  391. struct erase_info_user info;
  392. if (copy_from_user(&info, argp, sizeof(info)))
  393. return -EFAULT;
  394. if (!mtd->lock)
  395. ret = -EOPNOTSUPP;
  396. else
  397. ret = mtd->lock(mtd, info.start, info.length);
  398. break;
  399. }
  400. case MEMUNLOCK:
  401. {
  402. struct erase_info_user info;
  403. if (copy_from_user(&info, argp, sizeof(info)))
  404. return -EFAULT;
  405. if (!mtd->unlock)
  406. ret = -EOPNOTSUPP;
  407. else
  408. ret = mtd->unlock(mtd, info.start, info.length);
  409. break;
  410. }
  411. case MEMSETOOBSEL:
  412. {
  413. if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
  414. return -EFAULT;
  415. break;
  416. }
  417. case MEMGETOOBSEL:
  418. {
  419. if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
  420. return -EFAULT;
  421. break;
  422. }
  423. case MEMGETBADBLOCK:
  424. {
  425. loff_t offs;
  426. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  427. return -EFAULT;
  428. if (!mtd->block_isbad)
  429. ret = -EOPNOTSUPP;
  430. else
  431. return mtd->block_isbad(mtd, offs);
  432. break;
  433. }
  434. case MEMSETBADBLOCK:
  435. {
  436. loff_t offs;
  437. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  438. return -EFAULT;
  439. if (!mtd->block_markbad)
  440. ret = -EOPNOTSUPP;
  441. else
  442. return mtd->block_markbad(mtd, offs);
  443. break;
  444. }
  445. #ifdef CONFIG_MTD_OTP
  446. case OTPSELECT:
  447. {
  448. int mode;
  449. if (copy_from_user(&mode, argp, sizeof(int)))
  450. return -EFAULT;
  451. file->f_mode &= ~MTD_MODE_MASK;
  452. switch (mode) {
  453. case MTD_OTP_FACTORY:
  454. if (!mtd->read_fact_prot_reg)
  455. ret = -EOPNOTSUPP;
  456. else
  457. file->f_mode |= MTD_MODE_OTP_FACT;
  458. break;
  459. case MTD_OTP_USER:
  460. if (!mtd->read_fact_prot_reg)
  461. ret = -EOPNOTSUPP;
  462. else
  463. file->f_mode |= MTD_MODE_OTP_USER;
  464. break;
  465. default:
  466. ret = -EINVAL;
  467. case MTD_OTP_OFF:
  468. break;
  469. }
  470. break;
  471. }
  472. case OTPGETREGIONCOUNT:
  473. case OTPGETREGIONINFO:
  474. {
  475. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  476. if (!buf)
  477. return -ENOMEM;
  478. ret = -EOPNOTSUPP;
  479. switch (file->f_mode & MTD_MODE_MASK) {
  480. case MTD_MODE_OTP_FACT:
  481. if (mtd->get_fact_prot_info)
  482. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  483. break;
  484. case MTD_MODE_OTP_USER:
  485. if (mtd->get_user_prot_info)
  486. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  487. break;
  488. }
  489. if (ret >= 0) {
  490. if (cmd == OTPGETREGIONCOUNT) {
  491. int nbr = ret / sizeof(struct otp_info);
  492. ret = copy_to_user(argp, &nbr, sizeof(int));
  493. } else
  494. ret = copy_to_user(argp, buf, ret);
  495. if (ret)
  496. ret = -EFAULT;
  497. }
  498. kfree(buf);
  499. break;
  500. }
  501. case OTPLOCK:
  502. {
  503. struct otp_info info;
  504. if ((file->f_mode & MTD_MODE_MASK) != MTD_MODE_OTP_USER)
  505. return -EINVAL;
  506. if (copy_from_user(&info, argp, sizeof(info)))
  507. return -EFAULT;
  508. if (!mtd->lock_user_prot_reg)
  509. return -EOPNOTSUPP;
  510. ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
  511. break;
  512. }
  513. #endif
  514. default:
  515. ret = -ENOTTY;
  516. }
  517. return ret;
  518. } /* memory_ioctl */
  519. static struct file_operations mtd_fops = {
  520. .owner = THIS_MODULE,
  521. .llseek = mtd_lseek,
  522. .read = mtd_read,
  523. .write = mtd_write,
  524. .ioctl = mtd_ioctl,
  525. .open = mtd_open,
  526. .release = mtd_close,
  527. };
  528. static int __init init_mtdchar(void)
  529. {
  530. if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
  531. printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
  532. MTD_CHAR_MAJOR);
  533. return -EAGAIN;
  534. }
  535. mtdchar_devfs_init();
  536. return 0;
  537. }
  538. static void __exit cleanup_mtdchar(void)
  539. {
  540. mtdchar_devfs_exit();
  541. unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
  542. }
  543. module_init(init_mtdchar);
  544. module_exit(cleanup_mtdchar);
  545. MODULE_LICENSE("GPL");
  546. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  547. MODULE_DESCRIPTION("Direct character-device access to MTD devices");