mtdchar.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mtd/mtd.h>
  18. #include <linux/mtd/compatmac.h>
  19. #include <asm/uaccess.h>
  20. /*
  21. * Data structure to hold the pointer to the mtd device as well
  22. * as mode information ofr various use cases.
  23. */
  24. struct mtd_file_info {
  25. struct mtd_info *mtd;
  26. enum mtd_file_modes mode;
  27. };
  28. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  29. {
  30. struct mtd_file_info *mfi = file->private_data;
  31. struct mtd_info *mtd = mfi->mtd;
  32. switch (orig) {
  33. case SEEK_SET:
  34. break;
  35. case SEEK_CUR:
  36. offset += file->f_pos;
  37. break;
  38. case SEEK_END:
  39. offset += mtd->size;
  40. break;
  41. default:
  42. return -EINVAL;
  43. }
  44. if (offset >= 0 && offset <= mtd->size)
  45. return file->f_pos = offset;
  46. return -EINVAL;
  47. }
  48. static int mtd_open(struct inode *inode, struct file *file)
  49. {
  50. int minor = iminor(inode);
  51. int devnum = minor >> 1;
  52. int ret = 0;
  53. struct mtd_info *mtd;
  54. struct mtd_file_info *mfi;
  55. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  56. /* You can't open the RO devices RW */
  57. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  58. return -EACCES;
  59. lock_kernel();
  60. mtd = get_mtd_device(NULL, devnum);
  61. if (IS_ERR(mtd)) {
  62. ret = PTR_ERR(mtd);
  63. goto out;
  64. }
  65. if (mtd->type == MTD_ABSENT) {
  66. put_mtd_device(mtd);
  67. ret = -ENODEV;
  68. goto out;
  69. }
  70. if (mtd->backing_dev_info)
  71. file->f_mapping->backing_dev_info = mtd->backing_dev_info;
  72. /* You can't open it RW if it's not a writeable device */
  73. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  74. put_mtd_device(mtd);
  75. ret = -EACCES;
  76. goto out;
  77. }
  78. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  79. if (!mfi) {
  80. put_mtd_device(mtd);
  81. ret = -ENOMEM;
  82. goto out;
  83. }
  84. mfi->mtd = mtd;
  85. file->private_data = mfi;
  86. out:
  87. unlock_kernel();
  88. return ret;
  89. } /* mtd_open */
  90. /*====================================================================*/
  91. static int mtd_close(struct inode *inode, struct file *file)
  92. {
  93. struct mtd_file_info *mfi = file->private_data;
  94. struct mtd_info *mtd = mfi->mtd;
  95. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  96. /* Only sync if opened RW */
  97. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  98. mtd->sync(mtd);
  99. put_mtd_device(mtd);
  100. file->private_data = NULL;
  101. kfree(mfi);
  102. return 0;
  103. } /* mtd_close */
  104. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  105. userspace buffer down and use it directly with readv/writev.
  106. */
  107. #define MAX_KMALLOC_SIZE 0x20000
  108. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  109. {
  110. struct mtd_file_info *mfi = file->private_data;
  111. struct mtd_info *mtd = mfi->mtd;
  112. size_t retlen=0;
  113. size_t total_retlen=0;
  114. int ret=0;
  115. int len;
  116. char *kbuf;
  117. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  118. if (*ppos + count > mtd->size)
  119. count = mtd->size - *ppos;
  120. if (!count)
  121. return 0;
  122. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  123. and pass them directly to the MTD functions */
  124. if (count > MAX_KMALLOC_SIZE)
  125. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  126. else
  127. kbuf=kmalloc(count, GFP_KERNEL);
  128. if (!kbuf)
  129. return -ENOMEM;
  130. while (count) {
  131. if (count > MAX_KMALLOC_SIZE)
  132. len = MAX_KMALLOC_SIZE;
  133. else
  134. len = count;
  135. switch (mfi->mode) {
  136. case MTD_MODE_OTP_FACTORY:
  137. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  138. break;
  139. case MTD_MODE_OTP_USER:
  140. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  141. break;
  142. case MTD_MODE_RAW:
  143. {
  144. struct mtd_oob_ops ops;
  145. ops.mode = MTD_OOB_RAW;
  146. ops.datbuf = kbuf;
  147. ops.oobbuf = NULL;
  148. ops.len = len;
  149. ret = mtd->read_oob(mtd, *ppos, &ops);
  150. retlen = ops.retlen;
  151. break;
  152. }
  153. default:
  154. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  155. }
  156. /* Nand returns -EBADMSG on ecc errors, but it returns
  157. * the data. For our userspace tools it is important
  158. * to dump areas with ecc errors !
  159. * For kernel internal usage it also might return -EUCLEAN
  160. * to signal the caller that a bitflip has occured and has
  161. * been corrected by the ECC algorithm.
  162. * Userspace software which accesses NAND this way
  163. * must be aware of the fact that it deals with NAND
  164. */
  165. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  166. *ppos += retlen;
  167. if (copy_to_user(buf, kbuf, retlen)) {
  168. kfree(kbuf);
  169. return -EFAULT;
  170. }
  171. else
  172. total_retlen += retlen;
  173. count -= retlen;
  174. buf += retlen;
  175. if (retlen == 0)
  176. count = 0;
  177. }
  178. else {
  179. kfree(kbuf);
  180. return ret;
  181. }
  182. }
  183. kfree(kbuf);
  184. return total_retlen;
  185. } /* mtd_read */
  186. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  187. {
  188. struct mtd_file_info *mfi = file->private_data;
  189. struct mtd_info *mtd = mfi->mtd;
  190. char *kbuf;
  191. size_t retlen;
  192. size_t total_retlen=0;
  193. int ret=0;
  194. int len;
  195. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  196. if (*ppos == mtd->size)
  197. return -ENOSPC;
  198. if (*ppos + count > mtd->size)
  199. count = mtd->size - *ppos;
  200. if (!count)
  201. return 0;
  202. if (count > MAX_KMALLOC_SIZE)
  203. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  204. else
  205. kbuf=kmalloc(count, GFP_KERNEL);
  206. if (!kbuf)
  207. return -ENOMEM;
  208. while (count) {
  209. if (count > MAX_KMALLOC_SIZE)
  210. len = MAX_KMALLOC_SIZE;
  211. else
  212. len = count;
  213. if (copy_from_user(kbuf, buf, len)) {
  214. kfree(kbuf);
  215. return -EFAULT;
  216. }
  217. switch (mfi->mode) {
  218. case MTD_MODE_OTP_FACTORY:
  219. ret = -EROFS;
  220. break;
  221. case MTD_MODE_OTP_USER:
  222. if (!mtd->write_user_prot_reg) {
  223. ret = -EOPNOTSUPP;
  224. break;
  225. }
  226. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  227. break;
  228. case MTD_MODE_RAW:
  229. {
  230. struct mtd_oob_ops ops;
  231. ops.mode = MTD_OOB_RAW;
  232. ops.datbuf = kbuf;
  233. ops.oobbuf = NULL;
  234. ops.len = len;
  235. ret = mtd->write_oob(mtd, *ppos, &ops);
  236. retlen = ops.retlen;
  237. break;
  238. }
  239. default:
  240. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  241. }
  242. if (!ret) {
  243. *ppos += retlen;
  244. total_retlen += retlen;
  245. count -= retlen;
  246. buf += retlen;
  247. }
  248. else {
  249. kfree(kbuf);
  250. return ret;
  251. }
  252. }
  253. kfree(kbuf);
  254. return total_retlen;
  255. } /* mtd_write */
  256. /*======================================================================
  257. IOCTL calls for getting device parameters.
  258. ======================================================================*/
  259. static void mtdchar_erase_callback (struct erase_info *instr)
  260. {
  261. wake_up((wait_queue_head_t *)instr->priv);
  262. }
  263. #ifdef CONFIG_HAVE_MTD_OTP
  264. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  265. {
  266. struct mtd_info *mtd = mfi->mtd;
  267. int ret = 0;
  268. switch (mode) {
  269. case MTD_OTP_FACTORY:
  270. if (!mtd->read_fact_prot_reg)
  271. ret = -EOPNOTSUPP;
  272. else
  273. mfi->mode = MTD_MODE_OTP_FACTORY;
  274. break;
  275. case MTD_OTP_USER:
  276. if (!mtd->read_fact_prot_reg)
  277. ret = -EOPNOTSUPP;
  278. else
  279. mfi->mode = MTD_MODE_OTP_USER;
  280. break;
  281. default:
  282. ret = -EINVAL;
  283. case MTD_OTP_OFF:
  284. break;
  285. }
  286. return ret;
  287. }
  288. #else
  289. # define otp_select_filemode(f,m) -EOPNOTSUPP
  290. #endif
  291. static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
  292. uint64_t start, uint32_t length, void __user *ptr,
  293. uint32_t __user *retp)
  294. {
  295. struct mtd_oob_ops ops;
  296. uint32_t retlen;
  297. int ret = 0;
  298. if (!(file->f_mode & FMODE_WRITE))
  299. return -EPERM;
  300. if (length > 4096)
  301. return -EINVAL;
  302. if (!mtd->write_oob)
  303. ret = -EOPNOTSUPP;
  304. else
  305. ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
  306. if (ret)
  307. return ret;
  308. ops.ooblen = length;
  309. ops.ooboffs = start & (mtd->oobsize - 1);
  310. ops.datbuf = NULL;
  311. ops.mode = MTD_OOB_PLACE;
  312. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  313. return -EINVAL;
  314. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  315. if (!ops.oobbuf)
  316. return -ENOMEM;
  317. if (copy_from_user(ops.oobbuf, ptr, length)) {
  318. kfree(ops.oobbuf);
  319. return -EFAULT;
  320. }
  321. start &= ~((uint64_t)mtd->oobsize - 1);
  322. ret = mtd->write_oob(mtd, start, &ops);
  323. if (ops.oobretlen > 0xFFFFFFFFU)
  324. ret = -EOVERFLOW;
  325. retlen = ops.oobretlen;
  326. if (copy_to_user(retp, &retlen, sizeof(length)))
  327. ret = -EFAULT;
  328. kfree(ops.oobbuf);
  329. return ret;
  330. }
  331. static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
  332. uint32_t length, void __user *ptr, uint32_t __user *retp)
  333. {
  334. struct mtd_oob_ops ops;
  335. int ret = 0;
  336. if (length > 4096)
  337. return -EINVAL;
  338. if (!mtd->read_oob)
  339. ret = -EOPNOTSUPP;
  340. else
  341. ret = access_ok(VERIFY_WRITE, ptr,
  342. length) ? 0 : -EFAULT;
  343. if (ret)
  344. return ret;
  345. ops.ooblen = length;
  346. ops.ooboffs = start & (mtd->oobsize - 1);
  347. ops.datbuf = NULL;
  348. ops.mode = MTD_OOB_PLACE;
  349. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  350. return -EINVAL;
  351. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  352. if (!ops.oobbuf)
  353. return -ENOMEM;
  354. start &= ~((uint64_t)mtd->oobsize - 1);
  355. ret = mtd->read_oob(mtd, start, &ops);
  356. if (put_user(ops.oobretlen, retp))
  357. ret = -EFAULT;
  358. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  359. ops.oobretlen))
  360. ret = -EFAULT;
  361. kfree(ops.oobbuf);
  362. return ret;
  363. }
  364. static int mtd_ioctl(struct inode *inode, struct file *file,
  365. u_int cmd, u_long arg)
  366. {
  367. struct mtd_file_info *mfi = file->private_data;
  368. struct mtd_info *mtd = mfi->mtd;
  369. void __user *argp = (void __user *)arg;
  370. int ret = 0;
  371. u_long size;
  372. struct mtd_info_user info;
  373. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  374. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  375. if (cmd & IOC_IN) {
  376. if (!access_ok(VERIFY_READ, argp, size))
  377. return -EFAULT;
  378. }
  379. if (cmd & IOC_OUT) {
  380. if (!access_ok(VERIFY_WRITE, argp, size))
  381. return -EFAULT;
  382. }
  383. switch (cmd) {
  384. case MEMGETREGIONCOUNT:
  385. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  386. return -EFAULT;
  387. break;
  388. case MEMGETREGIONINFO:
  389. {
  390. uint32_t ur_idx;
  391. struct mtd_erase_region_info *kr;
  392. struct region_info_user __user *ur = argp;
  393. if (get_user(ur_idx, &(ur->regionindex)))
  394. return -EFAULT;
  395. kr = &(mtd->eraseregions[ur_idx]);
  396. if (put_user(kr->offset, &(ur->offset))
  397. || put_user(kr->erasesize, &(ur->erasesize))
  398. || put_user(kr->numblocks, &(ur->numblocks)))
  399. return -EFAULT;
  400. break;
  401. }
  402. case MEMGETINFO:
  403. info.type = mtd->type;
  404. info.flags = mtd->flags;
  405. info.size = mtd->size;
  406. info.erasesize = mtd->erasesize;
  407. info.writesize = mtd->writesize;
  408. info.oobsize = mtd->oobsize;
  409. /* The below fields are obsolete */
  410. info.ecctype = -1;
  411. info.eccsize = 0;
  412. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  413. return -EFAULT;
  414. break;
  415. case MEMERASE:
  416. case MEMERASE64:
  417. {
  418. struct erase_info *erase;
  419. if(!(file->f_mode & FMODE_WRITE))
  420. return -EPERM;
  421. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  422. if (!erase)
  423. ret = -ENOMEM;
  424. else {
  425. wait_queue_head_t waitq;
  426. DECLARE_WAITQUEUE(wait, current);
  427. init_waitqueue_head(&waitq);
  428. if (cmd == MEMERASE64) {
  429. struct erase_info_user64 einfo64;
  430. if (copy_from_user(&einfo64, argp,
  431. sizeof(struct erase_info_user64))) {
  432. kfree(erase);
  433. return -EFAULT;
  434. }
  435. erase->addr = einfo64.start;
  436. erase->len = einfo64.length;
  437. } else {
  438. struct erase_info_user einfo32;
  439. if (copy_from_user(&einfo32, argp,
  440. sizeof(struct erase_info_user))) {
  441. kfree(erase);
  442. return -EFAULT;
  443. }
  444. erase->addr = einfo32.start;
  445. erase->len = einfo32.length;
  446. }
  447. erase->mtd = mtd;
  448. erase->callback = mtdchar_erase_callback;
  449. erase->priv = (unsigned long)&waitq;
  450. /*
  451. FIXME: Allow INTERRUPTIBLE. Which means
  452. not having the wait_queue head on the stack.
  453. If the wq_head is on the stack, and we
  454. leave because we got interrupted, then the
  455. wq_head is no longer there when the
  456. callback routine tries to wake us up.
  457. */
  458. ret = mtd->erase(mtd, erase);
  459. if (!ret) {
  460. set_current_state(TASK_UNINTERRUPTIBLE);
  461. add_wait_queue(&waitq, &wait);
  462. if (erase->state != MTD_ERASE_DONE &&
  463. erase->state != MTD_ERASE_FAILED)
  464. schedule();
  465. remove_wait_queue(&waitq, &wait);
  466. set_current_state(TASK_RUNNING);
  467. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  468. }
  469. kfree(erase);
  470. }
  471. break;
  472. }
  473. case MEMWRITEOOB:
  474. {
  475. struct mtd_oob_buf buf;
  476. struct mtd_oob_buf __user *buf_user = argp;
  477. /* NOTE: writes return length to buf_user->length */
  478. if (copy_from_user(&buf, argp, sizeof(buf)))
  479. ret = -EFAULT;
  480. else
  481. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  482. buf.ptr, &buf_user->length);
  483. break;
  484. }
  485. case MEMREADOOB:
  486. {
  487. struct mtd_oob_buf buf;
  488. struct mtd_oob_buf __user *buf_user = argp;
  489. /* NOTE: writes return length to buf_user->start */
  490. if (copy_from_user(&buf, argp, sizeof(buf)))
  491. ret = -EFAULT;
  492. else
  493. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  494. buf.ptr, &buf_user->start);
  495. break;
  496. }
  497. case MEMWRITEOOB64:
  498. {
  499. struct mtd_oob_buf64 buf;
  500. struct mtd_oob_buf64 __user *buf_user = argp;
  501. if (copy_from_user(&buf, argp, sizeof(buf)))
  502. ret = -EFAULT;
  503. else
  504. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  505. (void __user *)(uintptr_t)buf.usr_ptr,
  506. &buf_user->length);
  507. break;
  508. }
  509. case MEMREADOOB64:
  510. {
  511. struct mtd_oob_buf64 buf;
  512. struct mtd_oob_buf64 __user *buf_user = argp;
  513. if (copy_from_user(&buf, argp, sizeof(buf)))
  514. ret = -EFAULT;
  515. else
  516. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  517. (void __user *)(uintptr_t)buf.usr_ptr,
  518. &buf_user->length);
  519. break;
  520. }
  521. case MEMLOCK:
  522. {
  523. struct erase_info_user einfo;
  524. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  525. return -EFAULT;
  526. if (!mtd->lock)
  527. ret = -EOPNOTSUPP;
  528. else
  529. ret = mtd->lock(mtd, einfo.start, einfo.length);
  530. break;
  531. }
  532. case MEMUNLOCK:
  533. {
  534. struct erase_info_user einfo;
  535. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  536. return -EFAULT;
  537. if (!mtd->unlock)
  538. ret = -EOPNOTSUPP;
  539. else
  540. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  541. break;
  542. }
  543. /* Legacy interface */
  544. case MEMGETOOBSEL:
  545. {
  546. struct nand_oobinfo oi;
  547. if (!mtd->ecclayout)
  548. return -EOPNOTSUPP;
  549. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  550. return -EINVAL;
  551. oi.useecc = MTD_NANDECC_AUTOPLACE;
  552. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  553. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  554. sizeof(oi.oobfree));
  555. oi.eccbytes = mtd->ecclayout->eccbytes;
  556. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  557. return -EFAULT;
  558. break;
  559. }
  560. case MEMGETBADBLOCK:
  561. {
  562. loff_t offs;
  563. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  564. return -EFAULT;
  565. if (!mtd->block_isbad)
  566. ret = -EOPNOTSUPP;
  567. else
  568. return mtd->block_isbad(mtd, offs);
  569. break;
  570. }
  571. case MEMSETBADBLOCK:
  572. {
  573. loff_t offs;
  574. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  575. return -EFAULT;
  576. if (!mtd->block_markbad)
  577. ret = -EOPNOTSUPP;
  578. else
  579. return mtd->block_markbad(mtd, offs);
  580. break;
  581. }
  582. #ifdef CONFIG_HAVE_MTD_OTP
  583. case OTPSELECT:
  584. {
  585. int mode;
  586. if (copy_from_user(&mode, argp, sizeof(int)))
  587. return -EFAULT;
  588. mfi->mode = MTD_MODE_NORMAL;
  589. ret = otp_select_filemode(mfi, mode);
  590. file->f_pos = 0;
  591. break;
  592. }
  593. case OTPGETREGIONCOUNT:
  594. case OTPGETREGIONINFO:
  595. {
  596. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  597. if (!buf)
  598. return -ENOMEM;
  599. ret = -EOPNOTSUPP;
  600. switch (mfi->mode) {
  601. case MTD_MODE_OTP_FACTORY:
  602. if (mtd->get_fact_prot_info)
  603. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  604. break;
  605. case MTD_MODE_OTP_USER:
  606. if (mtd->get_user_prot_info)
  607. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  608. break;
  609. default:
  610. break;
  611. }
  612. if (ret >= 0) {
  613. if (cmd == OTPGETREGIONCOUNT) {
  614. int nbr = ret / sizeof(struct otp_info);
  615. ret = copy_to_user(argp, &nbr, sizeof(int));
  616. } else
  617. ret = copy_to_user(argp, buf, ret);
  618. if (ret)
  619. ret = -EFAULT;
  620. }
  621. kfree(buf);
  622. break;
  623. }
  624. case OTPLOCK:
  625. {
  626. struct otp_info oinfo;
  627. if (mfi->mode != MTD_MODE_OTP_USER)
  628. return -EINVAL;
  629. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  630. return -EFAULT;
  631. if (!mtd->lock_user_prot_reg)
  632. return -EOPNOTSUPP;
  633. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  634. break;
  635. }
  636. #endif
  637. case ECCGETLAYOUT:
  638. {
  639. if (!mtd->ecclayout)
  640. return -EOPNOTSUPP;
  641. if (copy_to_user(argp, mtd->ecclayout,
  642. sizeof(struct nand_ecclayout)))
  643. return -EFAULT;
  644. break;
  645. }
  646. case ECCGETSTATS:
  647. {
  648. if (copy_to_user(argp, &mtd->ecc_stats,
  649. sizeof(struct mtd_ecc_stats)))
  650. return -EFAULT;
  651. break;
  652. }
  653. case MTDFILEMODE:
  654. {
  655. mfi->mode = 0;
  656. switch(arg) {
  657. case MTD_MODE_OTP_FACTORY:
  658. case MTD_MODE_OTP_USER:
  659. ret = otp_select_filemode(mfi, arg);
  660. break;
  661. case MTD_MODE_RAW:
  662. if (!mtd->read_oob || !mtd->write_oob)
  663. return -EOPNOTSUPP;
  664. mfi->mode = arg;
  665. case MTD_MODE_NORMAL:
  666. break;
  667. default:
  668. ret = -EINVAL;
  669. }
  670. file->f_pos = 0;
  671. break;
  672. }
  673. default:
  674. ret = -ENOTTY;
  675. }
  676. return ret;
  677. } /* memory_ioctl */
  678. #ifdef CONFIG_COMPAT
  679. struct mtd_oob_buf32 {
  680. u_int32_t start;
  681. u_int32_t length;
  682. compat_caddr_t ptr; /* unsigned char* */
  683. };
  684. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  685. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  686. static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  687. unsigned long arg)
  688. {
  689. struct inode *inode = file->f_path.dentry->d_inode;
  690. struct mtd_file_info *mfi = file->private_data;
  691. struct mtd_info *mtd = mfi->mtd;
  692. void __user *argp = compat_ptr(arg);
  693. int ret = 0;
  694. lock_kernel();
  695. switch (cmd) {
  696. case MEMWRITEOOB32:
  697. {
  698. struct mtd_oob_buf32 buf;
  699. struct mtd_oob_buf32 __user *buf_user = argp;
  700. if (copy_from_user(&buf, argp, sizeof(buf)))
  701. ret = -EFAULT;
  702. else
  703. ret = mtd_do_writeoob(file, mtd, buf.start,
  704. buf.length, compat_ptr(buf.ptr),
  705. &buf_user->length);
  706. break;
  707. }
  708. case MEMREADOOB32:
  709. {
  710. struct mtd_oob_buf32 buf;
  711. struct mtd_oob_buf32 __user *buf_user = argp;
  712. /* NOTE: writes return length to buf->start */
  713. if (copy_from_user(&buf, argp, sizeof(buf)))
  714. ret = -EFAULT;
  715. else
  716. ret = mtd_do_readoob(mtd, buf.start,
  717. buf.length, compat_ptr(buf.ptr),
  718. &buf_user->start);
  719. break;
  720. }
  721. default:
  722. ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
  723. }
  724. unlock_kernel();
  725. return ret;
  726. }
  727. #endif /* CONFIG_COMPAT */
  728. /*
  729. * try to determine where a shared mapping can be made
  730. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  731. * mappings)
  732. */
  733. #ifndef CONFIG_MMU
  734. static unsigned long mtd_get_unmapped_area(struct file *file,
  735. unsigned long addr,
  736. unsigned long len,
  737. unsigned long pgoff,
  738. unsigned long flags)
  739. {
  740. struct mtd_file_info *mfi = file->private_data;
  741. struct mtd_info *mtd = mfi->mtd;
  742. if (mtd->get_unmapped_area) {
  743. unsigned long offset;
  744. if (addr != 0)
  745. return (unsigned long) -EINVAL;
  746. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  747. return (unsigned long) -EINVAL;
  748. offset = pgoff << PAGE_SHIFT;
  749. if (offset > mtd->size - len)
  750. return (unsigned long) -EINVAL;
  751. return mtd->get_unmapped_area(mtd, len, offset, flags);
  752. }
  753. /* can't map directly */
  754. return (unsigned long) -ENOSYS;
  755. }
  756. #endif
  757. /*
  758. * set up a mapping for shared memory segments
  759. */
  760. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  761. {
  762. #ifdef CONFIG_MMU
  763. struct mtd_file_info *mfi = file->private_data;
  764. struct mtd_info *mtd = mfi->mtd;
  765. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  766. return 0;
  767. return -ENOSYS;
  768. #else
  769. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  770. #endif
  771. }
  772. static const struct file_operations mtd_fops = {
  773. .owner = THIS_MODULE,
  774. .llseek = mtd_lseek,
  775. .read = mtd_read,
  776. .write = mtd_write,
  777. .ioctl = mtd_ioctl,
  778. #ifdef CONFIG_COMPAT
  779. .compat_ioctl = mtd_compat_ioctl,
  780. #endif
  781. .open = mtd_open,
  782. .release = mtd_close,
  783. .mmap = mtd_mmap,
  784. #ifndef CONFIG_MMU
  785. .get_unmapped_area = mtd_get_unmapped_area,
  786. #endif
  787. };
  788. static int __init init_mtdchar(void)
  789. {
  790. int status;
  791. status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
  792. if (status < 0) {
  793. printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
  794. MTD_CHAR_MAJOR);
  795. }
  796. return status;
  797. }
  798. static void __exit cleanup_mtdchar(void)
  799. {
  800. unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
  801. }
  802. module_init(init_mtdchar);
  803. module_exit(cleanup_mtdchar);
  804. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  805. MODULE_LICENSE("GPL");
  806. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  807. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  808. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);