mtdchar.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/mtd/compatmac.h>
  18. #include <asm/uaccess.h>
  19. /*
  20. * Data structure to hold the pointer to the mtd device as well
  21. * as mode information ofr various use cases.
  22. */
  23. struct mtd_file_info {
  24. struct mtd_info *mtd;
  25. enum mtd_file_modes mode;
  26. };
  27. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  28. {
  29. struct mtd_file_info *mfi = file->private_data;
  30. struct mtd_info *mtd = mfi->mtd;
  31. switch (orig) {
  32. case SEEK_SET:
  33. break;
  34. case SEEK_CUR:
  35. offset += file->f_pos;
  36. break;
  37. case SEEK_END:
  38. offset += mtd->size;
  39. break;
  40. default:
  41. return -EINVAL;
  42. }
  43. if (offset >= 0 && offset <= mtd->size)
  44. return file->f_pos = offset;
  45. return -EINVAL;
  46. }
  47. static int mtd_open(struct inode *inode, struct file *file)
  48. {
  49. int minor = iminor(inode);
  50. int devnum = minor >> 1;
  51. int ret = 0;
  52. struct mtd_info *mtd;
  53. struct mtd_file_info *mfi;
  54. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  55. if (devnum >= MAX_MTD_DEVICES)
  56. return -ENODEV;
  57. /* You can't open the RO devices RW */
  58. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  59. return -EACCES;
  60. lock_kernel();
  61. mtd = get_mtd_device(NULL, devnum);
  62. if (IS_ERR(mtd)) {
  63. ret = PTR_ERR(mtd);
  64. goto out;
  65. }
  66. if (mtd->type == MTD_ABSENT) {
  67. put_mtd_device(mtd);
  68. ret = -ENODEV;
  69. goto out;
  70. }
  71. if (mtd->backing_dev_info)
  72. file->f_mapping->backing_dev_info = mtd->backing_dev_info;
  73. /* You can't open it RW if it's not a writeable device */
  74. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  75. put_mtd_device(mtd);
  76. ret = -EACCES;
  77. goto out;
  78. }
  79. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  80. if (!mfi) {
  81. put_mtd_device(mtd);
  82. ret = -ENOMEM;
  83. goto out;
  84. }
  85. mfi->mtd = mtd;
  86. file->private_data = mfi;
  87. out:
  88. unlock_kernel();
  89. return ret;
  90. } /* mtd_open */
  91. /*====================================================================*/
  92. static int mtd_close(struct inode *inode, struct file *file)
  93. {
  94. struct mtd_file_info *mfi = file->private_data;
  95. struct mtd_info *mtd = mfi->mtd;
  96. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  97. /* Only sync if opened RW */
  98. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  99. mtd->sync(mtd);
  100. put_mtd_device(mtd);
  101. file->private_data = NULL;
  102. kfree(mfi);
  103. return 0;
  104. } /* mtd_close */
  105. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  106. userspace buffer down and use it directly with readv/writev.
  107. */
  108. #define MAX_KMALLOC_SIZE 0x20000
  109. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  110. {
  111. struct mtd_file_info *mfi = file->private_data;
  112. struct mtd_info *mtd = mfi->mtd;
  113. size_t retlen=0;
  114. size_t total_retlen=0;
  115. int ret=0;
  116. int len;
  117. char *kbuf;
  118. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  119. if (*ppos + count > mtd->size)
  120. count = mtd->size - *ppos;
  121. if (!count)
  122. return 0;
  123. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  124. and pass them directly to the MTD functions */
  125. if (count > MAX_KMALLOC_SIZE)
  126. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  127. else
  128. kbuf=kmalloc(count, GFP_KERNEL);
  129. if (!kbuf)
  130. return -ENOMEM;
  131. while (count) {
  132. if (count > MAX_KMALLOC_SIZE)
  133. len = MAX_KMALLOC_SIZE;
  134. else
  135. len = count;
  136. switch (mfi->mode) {
  137. case MTD_MODE_OTP_FACTORY:
  138. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  139. break;
  140. case MTD_MODE_OTP_USER:
  141. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  142. break;
  143. case MTD_MODE_RAW:
  144. {
  145. struct mtd_oob_ops ops;
  146. ops.mode = MTD_OOB_RAW;
  147. ops.datbuf = kbuf;
  148. ops.oobbuf = NULL;
  149. ops.len = len;
  150. ret = mtd->read_oob(mtd, *ppos, &ops);
  151. retlen = ops.retlen;
  152. break;
  153. }
  154. default:
  155. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  156. }
  157. /* Nand returns -EBADMSG on ecc errors, but it returns
  158. * the data. For our userspace tools it is important
  159. * to dump areas with ecc errors !
  160. * For kernel internal usage it also might return -EUCLEAN
  161. * to signal the caller that a bitflip has occured and has
  162. * been corrected by the ECC algorithm.
  163. * Userspace software which accesses NAND this way
  164. * must be aware of the fact that it deals with NAND
  165. */
  166. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  167. *ppos += retlen;
  168. if (copy_to_user(buf, kbuf, retlen)) {
  169. kfree(kbuf);
  170. return -EFAULT;
  171. }
  172. else
  173. total_retlen += retlen;
  174. count -= retlen;
  175. buf += retlen;
  176. if (retlen == 0)
  177. count = 0;
  178. }
  179. else {
  180. kfree(kbuf);
  181. return ret;
  182. }
  183. }
  184. kfree(kbuf);
  185. return total_retlen;
  186. } /* mtd_read */
  187. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  188. {
  189. struct mtd_file_info *mfi = file->private_data;
  190. struct mtd_info *mtd = mfi->mtd;
  191. char *kbuf;
  192. size_t retlen;
  193. size_t total_retlen=0;
  194. int ret=0;
  195. int len;
  196. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  197. if (*ppos == mtd->size)
  198. return -ENOSPC;
  199. if (*ppos + count > mtd->size)
  200. count = mtd->size - *ppos;
  201. if (!count)
  202. return 0;
  203. if (count > MAX_KMALLOC_SIZE)
  204. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  205. else
  206. kbuf=kmalloc(count, GFP_KERNEL);
  207. if (!kbuf)
  208. return -ENOMEM;
  209. while (count) {
  210. if (count > MAX_KMALLOC_SIZE)
  211. len = MAX_KMALLOC_SIZE;
  212. else
  213. len = count;
  214. if (copy_from_user(kbuf, buf, len)) {
  215. kfree(kbuf);
  216. return -EFAULT;
  217. }
  218. switch (mfi->mode) {
  219. case MTD_MODE_OTP_FACTORY:
  220. ret = -EROFS;
  221. break;
  222. case MTD_MODE_OTP_USER:
  223. if (!mtd->write_user_prot_reg) {
  224. ret = -EOPNOTSUPP;
  225. break;
  226. }
  227. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  228. break;
  229. case MTD_MODE_RAW:
  230. {
  231. struct mtd_oob_ops ops;
  232. ops.mode = MTD_OOB_RAW;
  233. ops.datbuf = kbuf;
  234. ops.oobbuf = NULL;
  235. ops.len = len;
  236. ret = mtd->write_oob(mtd, *ppos, &ops);
  237. retlen = ops.retlen;
  238. break;
  239. }
  240. default:
  241. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  242. }
  243. if (!ret) {
  244. *ppos += retlen;
  245. total_retlen += retlen;
  246. count -= retlen;
  247. buf += retlen;
  248. }
  249. else {
  250. kfree(kbuf);
  251. return ret;
  252. }
  253. }
  254. kfree(kbuf);
  255. return total_retlen;
  256. } /* mtd_write */
  257. /*======================================================================
  258. IOCTL calls for getting device parameters.
  259. ======================================================================*/
  260. static void mtdchar_erase_callback (struct erase_info *instr)
  261. {
  262. wake_up((wait_queue_head_t *)instr->priv);
  263. }
  264. #ifdef CONFIG_HAVE_MTD_OTP
  265. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  266. {
  267. struct mtd_info *mtd = mfi->mtd;
  268. int ret = 0;
  269. switch (mode) {
  270. case MTD_OTP_FACTORY:
  271. if (!mtd->read_fact_prot_reg)
  272. ret = -EOPNOTSUPP;
  273. else
  274. mfi->mode = MTD_MODE_OTP_FACTORY;
  275. break;
  276. case MTD_OTP_USER:
  277. if (!mtd->read_fact_prot_reg)
  278. ret = -EOPNOTSUPP;
  279. else
  280. mfi->mode = MTD_MODE_OTP_USER;
  281. break;
  282. default:
  283. ret = -EINVAL;
  284. case MTD_OTP_OFF:
  285. break;
  286. }
  287. return ret;
  288. }
  289. #else
  290. # define otp_select_filemode(f,m) -EOPNOTSUPP
  291. #endif
  292. static int mtd_ioctl(struct inode *inode, struct file *file,
  293. u_int cmd, u_long arg)
  294. {
  295. struct mtd_file_info *mfi = file->private_data;
  296. struct mtd_info *mtd = mfi->mtd;
  297. void __user *argp = (void __user *)arg;
  298. int ret = 0;
  299. u_long size;
  300. struct mtd_info_user info;
  301. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  302. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  303. if (cmd & IOC_IN) {
  304. if (!access_ok(VERIFY_READ, argp, size))
  305. return -EFAULT;
  306. }
  307. if (cmd & IOC_OUT) {
  308. if (!access_ok(VERIFY_WRITE, argp, size))
  309. return -EFAULT;
  310. }
  311. switch (cmd) {
  312. case MEMGETREGIONCOUNT:
  313. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  314. return -EFAULT;
  315. break;
  316. case MEMGETREGIONINFO:
  317. {
  318. uint32_t ur_idx;
  319. struct mtd_erase_region_info *kr;
  320. struct region_info_user *ur = (struct region_info_user *) argp;
  321. if (get_user(ur_idx, &(ur->regionindex)))
  322. return -EFAULT;
  323. kr = &(mtd->eraseregions[ur_idx]);
  324. if (put_user(kr->offset, &(ur->offset))
  325. || put_user(kr->erasesize, &(ur->erasesize))
  326. || put_user(kr->numblocks, &(ur->numblocks)))
  327. return -EFAULT;
  328. break;
  329. }
  330. case MEMGETINFO:
  331. info.type = mtd->type;
  332. info.flags = mtd->flags;
  333. info.size = mtd->size;
  334. info.erasesize = mtd->erasesize;
  335. info.writesize = mtd->writesize;
  336. info.oobsize = mtd->oobsize;
  337. /* The below fields are obsolete */
  338. info.ecctype = -1;
  339. info.eccsize = 0;
  340. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  341. return -EFAULT;
  342. break;
  343. case MEMERASE:
  344. {
  345. struct erase_info *erase;
  346. if(!(file->f_mode & FMODE_WRITE))
  347. return -EPERM;
  348. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  349. if (!erase)
  350. ret = -ENOMEM;
  351. else {
  352. struct erase_info_user einfo;
  353. wait_queue_head_t waitq;
  354. DECLARE_WAITQUEUE(wait, current);
  355. init_waitqueue_head(&waitq);
  356. if (copy_from_user(&einfo, argp,
  357. sizeof(struct erase_info_user))) {
  358. kfree(erase);
  359. return -EFAULT;
  360. }
  361. erase->addr = einfo.start;
  362. erase->len = einfo.length;
  363. erase->mtd = mtd;
  364. erase->callback = mtdchar_erase_callback;
  365. erase->priv = (unsigned long)&waitq;
  366. /*
  367. FIXME: Allow INTERRUPTIBLE. Which means
  368. not having the wait_queue head on the stack.
  369. If the wq_head is on the stack, and we
  370. leave because we got interrupted, then the
  371. wq_head is no longer there when the
  372. callback routine tries to wake us up.
  373. */
  374. ret = mtd->erase(mtd, erase);
  375. if (!ret) {
  376. set_current_state(TASK_UNINTERRUPTIBLE);
  377. add_wait_queue(&waitq, &wait);
  378. if (erase->state != MTD_ERASE_DONE &&
  379. erase->state != MTD_ERASE_FAILED)
  380. schedule();
  381. remove_wait_queue(&waitq, &wait);
  382. set_current_state(TASK_RUNNING);
  383. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  384. }
  385. kfree(erase);
  386. }
  387. break;
  388. }
  389. case MEMWRITEOOB:
  390. {
  391. struct mtd_oob_buf buf;
  392. struct mtd_oob_ops ops;
  393. struct mtd_oob_buf __user *user_buf = argp;
  394. uint32_t retlen;
  395. if(!(file->f_mode & FMODE_WRITE))
  396. return -EPERM;
  397. if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
  398. return -EFAULT;
  399. if (buf.length > 4096)
  400. return -EINVAL;
  401. if (!mtd->write_oob)
  402. ret = -EOPNOTSUPP;
  403. else
  404. ret = access_ok(VERIFY_READ, buf.ptr,
  405. buf.length) ? 0 : EFAULT;
  406. if (ret)
  407. return ret;
  408. ops.ooblen = buf.length;
  409. ops.ooboffs = buf.start & (mtd->oobsize - 1);
  410. ops.datbuf = NULL;
  411. ops.mode = MTD_OOB_PLACE;
  412. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  413. return -EINVAL;
  414. ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
  415. if (!ops.oobbuf)
  416. return -ENOMEM;
  417. if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
  418. kfree(ops.oobbuf);
  419. return -EFAULT;
  420. }
  421. buf.start &= ~(mtd->oobsize - 1);
  422. ret = mtd->write_oob(mtd, buf.start, &ops);
  423. if (ops.oobretlen > 0xFFFFFFFFU)
  424. ret = -EOVERFLOW;
  425. retlen = ops.oobretlen;
  426. if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
  427. ret = -EFAULT;
  428. kfree(ops.oobbuf);
  429. break;
  430. }
  431. case MEMREADOOB:
  432. {
  433. struct mtd_oob_buf buf;
  434. struct mtd_oob_ops ops;
  435. if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
  436. return -EFAULT;
  437. if (buf.length > 4096)
  438. return -EINVAL;
  439. if (!mtd->read_oob)
  440. ret = -EOPNOTSUPP;
  441. else
  442. ret = access_ok(VERIFY_WRITE, buf.ptr,
  443. buf.length) ? 0 : -EFAULT;
  444. if (ret)
  445. return ret;
  446. ops.ooblen = buf.length;
  447. ops.ooboffs = buf.start & (mtd->oobsize - 1);
  448. ops.datbuf = NULL;
  449. ops.mode = MTD_OOB_PLACE;
  450. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  451. return -EINVAL;
  452. ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
  453. if (!ops.oobbuf)
  454. return -ENOMEM;
  455. buf.start &= ~(mtd->oobsize - 1);
  456. ret = mtd->read_oob(mtd, buf.start, &ops);
  457. if (put_user(ops.oobretlen, (uint32_t __user *)argp))
  458. ret = -EFAULT;
  459. else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
  460. ops.oobretlen))
  461. ret = -EFAULT;
  462. kfree(ops.oobbuf);
  463. break;
  464. }
  465. case MEMLOCK:
  466. {
  467. struct erase_info_user einfo;
  468. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  469. return -EFAULT;
  470. if (!mtd->lock)
  471. ret = -EOPNOTSUPP;
  472. else
  473. ret = mtd->lock(mtd, einfo.start, einfo.length);
  474. break;
  475. }
  476. case MEMUNLOCK:
  477. {
  478. struct erase_info_user einfo;
  479. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  480. return -EFAULT;
  481. if (!mtd->unlock)
  482. ret = -EOPNOTSUPP;
  483. else
  484. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  485. break;
  486. }
  487. /* Legacy interface */
  488. case MEMGETOOBSEL:
  489. {
  490. struct nand_oobinfo oi;
  491. if (!mtd->ecclayout)
  492. return -EOPNOTSUPP;
  493. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  494. return -EINVAL;
  495. oi.useecc = MTD_NANDECC_AUTOPLACE;
  496. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  497. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  498. sizeof(oi.oobfree));
  499. oi.eccbytes = mtd->ecclayout->eccbytes;
  500. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  501. return -EFAULT;
  502. break;
  503. }
  504. case MEMGETBADBLOCK:
  505. {
  506. loff_t offs;
  507. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  508. return -EFAULT;
  509. if (!mtd->block_isbad)
  510. ret = -EOPNOTSUPP;
  511. else
  512. return mtd->block_isbad(mtd, offs);
  513. break;
  514. }
  515. case MEMSETBADBLOCK:
  516. {
  517. loff_t offs;
  518. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  519. return -EFAULT;
  520. if (!mtd->block_markbad)
  521. ret = -EOPNOTSUPP;
  522. else
  523. return mtd->block_markbad(mtd, offs);
  524. break;
  525. }
  526. #ifdef CONFIG_HAVE_MTD_OTP
  527. case OTPSELECT:
  528. {
  529. int mode;
  530. if (copy_from_user(&mode, argp, sizeof(int)))
  531. return -EFAULT;
  532. mfi->mode = MTD_MODE_NORMAL;
  533. ret = otp_select_filemode(mfi, mode);
  534. file->f_pos = 0;
  535. break;
  536. }
  537. case OTPGETREGIONCOUNT:
  538. case OTPGETREGIONINFO:
  539. {
  540. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  541. if (!buf)
  542. return -ENOMEM;
  543. ret = -EOPNOTSUPP;
  544. switch (mfi->mode) {
  545. case MTD_MODE_OTP_FACTORY:
  546. if (mtd->get_fact_prot_info)
  547. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  548. break;
  549. case MTD_MODE_OTP_USER:
  550. if (mtd->get_user_prot_info)
  551. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  552. break;
  553. default:
  554. break;
  555. }
  556. if (ret >= 0) {
  557. if (cmd == OTPGETREGIONCOUNT) {
  558. int nbr = ret / sizeof(struct otp_info);
  559. ret = copy_to_user(argp, &nbr, sizeof(int));
  560. } else
  561. ret = copy_to_user(argp, buf, ret);
  562. if (ret)
  563. ret = -EFAULT;
  564. }
  565. kfree(buf);
  566. break;
  567. }
  568. case OTPLOCK:
  569. {
  570. struct otp_info oinfo;
  571. if (mfi->mode != MTD_MODE_OTP_USER)
  572. return -EINVAL;
  573. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  574. return -EFAULT;
  575. if (!mtd->lock_user_prot_reg)
  576. return -EOPNOTSUPP;
  577. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  578. break;
  579. }
  580. #endif
  581. case ECCGETLAYOUT:
  582. {
  583. if (!mtd->ecclayout)
  584. return -EOPNOTSUPP;
  585. if (copy_to_user(argp, mtd->ecclayout,
  586. sizeof(struct nand_ecclayout)))
  587. return -EFAULT;
  588. break;
  589. }
  590. case ECCGETSTATS:
  591. {
  592. if (copy_to_user(argp, &mtd->ecc_stats,
  593. sizeof(struct mtd_ecc_stats)))
  594. return -EFAULT;
  595. break;
  596. }
  597. case MTDFILEMODE:
  598. {
  599. mfi->mode = 0;
  600. switch(arg) {
  601. case MTD_MODE_OTP_FACTORY:
  602. case MTD_MODE_OTP_USER:
  603. ret = otp_select_filemode(mfi, arg);
  604. break;
  605. case MTD_MODE_RAW:
  606. if (!mtd->read_oob || !mtd->write_oob)
  607. return -EOPNOTSUPP;
  608. mfi->mode = arg;
  609. case MTD_MODE_NORMAL:
  610. break;
  611. default:
  612. ret = -EINVAL;
  613. }
  614. file->f_pos = 0;
  615. break;
  616. }
  617. default:
  618. ret = -ENOTTY;
  619. }
  620. return ret;
  621. } /* memory_ioctl */
  622. /*
  623. * try to determine where a shared mapping can be made
  624. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  625. * mappings)
  626. */
  627. #ifndef CONFIG_MMU
  628. static unsigned long mtd_get_unmapped_area(struct file *file,
  629. unsigned long addr,
  630. unsigned long len,
  631. unsigned long pgoff,
  632. unsigned long flags)
  633. {
  634. struct mtd_file_info *mfi = file->private_data;
  635. struct mtd_info *mtd = mfi->mtd;
  636. if (mtd->get_unmapped_area) {
  637. unsigned long offset;
  638. if (addr != 0)
  639. return (unsigned long) -EINVAL;
  640. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  641. return (unsigned long) -EINVAL;
  642. offset = pgoff << PAGE_SHIFT;
  643. if (offset > mtd->size - len)
  644. return (unsigned long) -EINVAL;
  645. return mtd->get_unmapped_area(mtd, len, offset, flags);
  646. }
  647. /* can't map directly */
  648. return (unsigned long) -ENOSYS;
  649. }
  650. #endif
  651. /*
  652. * set up a mapping for shared memory segments
  653. */
  654. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  655. {
  656. #ifdef CONFIG_MMU
  657. struct mtd_file_info *mfi = file->private_data;
  658. struct mtd_info *mtd = mfi->mtd;
  659. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  660. return 0;
  661. return -ENOSYS;
  662. #else
  663. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  664. #endif
  665. }
  666. static const struct file_operations mtd_fops = {
  667. .owner = THIS_MODULE,
  668. .llseek = mtd_lseek,
  669. .read = mtd_read,
  670. .write = mtd_write,
  671. .ioctl = mtd_ioctl,
  672. .open = mtd_open,
  673. .release = mtd_close,
  674. .mmap = mtd_mmap,
  675. #ifndef CONFIG_MMU
  676. .get_unmapped_area = mtd_get_unmapped_area,
  677. #endif
  678. };
  679. static int __init init_mtdchar(void)
  680. {
  681. int status;
  682. status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
  683. if (status < 0) {
  684. printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
  685. MTD_CHAR_MAJOR);
  686. }
  687. return status;
  688. }
  689. static void __exit cleanup_mtdchar(void)
  690. {
  691. unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
  692. }
  693. module_init(init_mtdchar);
  694. module_exit(cleanup_mtdchar);
  695. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  696. MODULE_LICENSE("GPL");
  697. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  698. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  699. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);