mtdchar.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mount.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/compatmac.h>
  20. #include <asm/uaccess.h>
  21. #define MTD_INODE_FS_MAGIC 0x11307854
  22. static struct vfsmount *mtd_inode_mnt __read_mostly;
  23. /*
  24. * Data structure to hold the pointer to the mtd device as well
  25. * as mode information ofr various use cases.
  26. */
  27. struct mtd_file_info {
  28. struct mtd_info *mtd;
  29. struct inode *ino;
  30. enum mtd_file_modes mode;
  31. };
  32. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  33. {
  34. struct mtd_file_info *mfi = file->private_data;
  35. struct mtd_info *mtd = mfi->mtd;
  36. switch (orig) {
  37. case SEEK_SET:
  38. break;
  39. case SEEK_CUR:
  40. offset += file->f_pos;
  41. break;
  42. case SEEK_END:
  43. offset += mtd->size;
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. if (offset >= 0 && offset <= mtd->size)
  49. return file->f_pos = offset;
  50. return -EINVAL;
  51. }
  52. static int mtd_open(struct inode *inode, struct file *file)
  53. {
  54. int minor = iminor(inode);
  55. int devnum = minor >> 1;
  56. int ret = 0;
  57. struct mtd_info *mtd;
  58. struct mtd_file_info *mfi;
  59. struct inode *mtd_ino;
  60. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  61. /* You can't open the RO devices RW */
  62. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  63. return -EACCES;
  64. lock_kernel();
  65. mtd = get_mtd_device(NULL, devnum);
  66. if (IS_ERR(mtd)) {
  67. ret = PTR_ERR(mtd);
  68. goto out;
  69. }
  70. if (mtd->type == MTD_ABSENT) {
  71. put_mtd_device(mtd);
  72. ret = -ENODEV;
  73. goto out;
  74. }
  75. mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
  76. if (!mtd_ino) {
  77. put_mtd_device(mtd);
  78. ret = -ENOMEM;
  79. goto out;
  80. }
  81. if (mtd_ino->i_state & I_NEW) {
  82. mtd_ino->i_private = mtd;
  83. mtd_ino->i_mode = S_IFCHR;
  84. mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
  85. unlock_new_inode(mtd_ino);
  86. }
  87. file->f_mapping = mtd_ino->i_mapping;
  88. /* You can't open it RW if it's not a writeable device */
  89. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  90. iput(mtd_ino);
  91. put_mtd_device(mtd);
  92. ret = -EACCES;
  93. goto out;
  94. }
  95. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  96. if (!mfi) {
  97. iput(mtd_ino);
  98. put_mtd_device(mtd);
  99. ret = -ENOMEM;
  100. goto out;
  101. }
  102. mfi->ino = mtd_ino;
  103. mfi->mtd = mtd;
  104. file->private_data = mfi;
  105. out:
  106. unlock_kernel();
  107. return ret;
  108. } /* mtd_open */
  109. /*====================================================================*/
  110. static int mtd_close(struct inode *inode, struct file *file)
  111. {
  112. struct mtd_file_info *mfi = file->private_data;
  113. struct mtd_info *mtd = mfi->mtd;
  114. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  115. /* Only sync if opened RW */
  116. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  117. mtd->sync(mtd);
  118. iput(mfi->ino);
  119. put_mtd_device(mtd);
  120. file->private_data = NULL;
  121. kfree(mfi);
  122. return 0;
  123. } /* mtd_close */
  124. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  125. userspace buffer down and use it directly with readv/writev.
  126. */
  127. #define MAX_KMALLOC_SIZE 0x20000
  128. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  129. {
  130. struct mtd_file_info *mfi = file->private_data;
  131. struct mtd_info *mtd = mfi->mtd;
  132. size_t retlen=0;
  133. size_t total_retlen=0;
  134. int ret=0;
  135. int len;
  136. char *kbuf;
  137. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  138. if (*ppos + count > mtd->size)
  139. count = mtd->size - *ppos;
  140. if (!count)
  141. return 0;
  142. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  143. and pass them directly to the MTD functions */
  144. if (count > MAX_KMALLOC_SIZE)
  145. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  146. else
  147. kbuf=kmalloc(count, GFP_KERNEL);
  148. if (!kbuf)
  149. return -ENOMEM;
  150. while (count) {
  151. if (count > MAX_KMALLOC_SIZE)
  152. len = MAX_KMALLOC_SIZE;
  153. else
  154. len = count;
  155. switch (mfi->mode) {
  156. case MTD_MODE_OTP_FACTORY:
  157. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  158. break;
  159. case MTD_MODE_OTP_USER:
  160. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  161. break;
  162. case MTD_MODE_RAW:
  163. {
  164. struct mtd_oob_ops ops;
  165. ops.mode = MTD_OOB_RAW;
  166. ops.datbuf = kbuf;
  167. ops.oobbuf = NULL;
  168. ops.len = len;
  169. ret = mtd->read_oob(mtd, *ppos, &ops);
  170. retlen = ops.retlen;
  171. break;
  172. }
  173. default:
  174. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  175. }
  176. /* Nand returns -EBADMSG on ecc errors, but it returns
  177. * the data. For our userspace tools it is important
  178. * to dump areas with ecc errors !
  179. * For kernel internal usage it also might return -EUCLEAN
  180. * to signal the caller that a bitflip has occured and has
  181. * been corrected by the ECC algorithm.
  182. * Userspace software which accesses NAND this way
  183. * must be aware of the fact that it deals with NAND
  184. */
  185. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  186. *ppos += retlen;
  187. if (copy_to_user(buf, kbuf, retlen)) {
  188. kfree(kbuf);
  189. return -EFAULT;
  190. }
  191. else
  192. total_retlen += retlen;
  193. count -= retlen;
  194. buf += retlen;
  195. if (retlen == 0)
  196. count = 0;
  197. }
  198. else {
  199. kfree(kbuf);
  200. return ret;
  201. }
  202. }
  203. kfree(kbuf);
  204. return total_retlen;
  205. } /* mtd_read */
  206. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  207. {
  208. struct mtd_file_info *mfi = file->private_data;
  209. struct mtd_info *mtd = mfi->mtd;
  210. char *kbuf;
  211. size_t retlen;
  212. size_t total_retlen=0;
  213. int ret=0;
  214. int len;
  215. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  216. if (*ppos == mtd->size)
  217. return -ENOSPC;
  218. if (*ppos + count > mtd->size)
  219. count = mtd->size - *ppos;
  220. if (!count)
  221. return 0;
  222. if (count > MAX_KMALLOC_SIZE)
  223. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  224. else
  225. kbuf=kmalloc(count, GFP_KERNEL);
  226. if (!kbuf)
  227. return -ENOMEM;
  228. while (count) {
  229. if (count > MAX_KMALLOC_SIZE)
  230. len = MAX_KMALLOC_SIZE;
  231. else
  232. len = count;
  233. if (copy_from_user(kbuf, buf, len)) {
  234. kfree(kbuf);
  235. return -EFAULT;
  236. }
  237. switch (mfi->mode) {
  238. case MTD_MODE_OTP_FACTORY:
  239. ret = -EROFS;
  240. break;
  241. case MTD_MODE_OTP_USER:
  242. if (!mtd->write_user_prot_reg) {
  243. ret = -EOPNOTSUPP;
  244. break;
  245. }
  246. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  247. break;
  248. case MTD_MODE_RAW:
  249. {
  250. struct mtd_oob_ops ops;
  251. ops.mode = MTD_OOB_RAW;
  252. ops.datbuf = kbuf;
  253. ops.oobbuf = NULL;
  254. ops.len = len;
  255. ret = mtd->write_oob(mtd, *ppos, &ops);
  256. retlen = ops.retlen;
  257. break;
  258. }
  259. default:
  260. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  261. }
  262. if (!ret) {
  263. *ppos += retlen;
  264. total_retlen += retlen;
  265. count -= retlen;
  266. buf += retlen;
  267. }
  268. else {
  269. kfree(kbuf);
  270. return ret;
  271. }
  272. }
  273. kfree(kbuf);
  274. return total_retlen;
  275. } /* mtd_write */
  276. /*======================================================================
  277. IOCTL calls for getting device parameters.
  278. ======================================================================*/
  279. static void mtdchar_erase_callback (struct erase_info *instr)
  280. {
  281. wake_up((wait_queue_head_t *)instr->priv);
  282. }
  283. #ifdef CONFIG_HAVE_MTD_OTP
  284. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  285. {
  286. struct mtd_info *mtd = mfi->mtd;
  287. int ret = 0;
  288. switch (mode) {
  289. case MTD_OTP_FACTORY:
  290. if (!mtd->read_fact_prot_reg)
  291. ret = -EOPNOTSUPP;
  292. else
  293. mfi->mode = MTD_MODE_OTP_FACTORY;
  294. break;
  295. case MTD_OTP_USER:
  296. if (!mtd->read_fact_prot_reg)
  297. ret = -EOPNOTSUPP;
  298. else
  299. mfi->mode = MTD_MODE_OTP_USER;
  300. break;
  301. default:
  302. ret = -EINVAL;
  303. case MTD_OTP_OFF:
  304. break;
  305. }
  306. return ret;
  307. }
  308. #else
  309. # define otp_select_filemode(f,m) -EOPNOTSUPP
  310. #endif
  311. static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
  312. uint64_t start, uint32_t length, void __user *ptr,
  313. uint32_t __user *retp)
  314. {
  315. struct mtd_oob_ops ops;
  316. uint32_t retlen;
  317. int ret = 0;
  318. if (!(file->f_mode & FMODE_WRITE))
  319. return -EPERM;
  320. if (length > 4096)
  321. return -EINVAL;
  322. if (!mtd->write_oob)
  323. ret = -EOPNOTSUPP;
  324. else
  325. ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
  326. if (ret)
  327. return ret;
  328. ops.ooblen = length;
  329. ops.ooboffs = start & (mtd->oobsize - 1);
  330. ops.datbuf = NULL;
  331. ops.mode = MTD_OOB_PLACE;
  332. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  333. return -EINVAL;
  334. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  335. if (!ops.oobbuf)
  336. return -ENOMEM;
  337. if (copy_from_user(ops.oobbuf, ptr, length)) {
  338. kfree(ops.oobbuf);
  339. return -EFAULT;
  340. }
  341. start &= ~((uint64_t)mtd->oobsize - 1);
  342. ret = mtd->write_oob(mtd, start, &ops);
  343. if (ops.oobretlen > 0xFFFFFFFFU)
  344. ret = -EOVERFLOW;
  345. retlen = ops.oobretlen;
  346. if (copy_to_user(retp, &retlen, sizeof(length)))
  347. ret = -EFAULT;
  348. kfree(ops.oobbuf);
  349. return ret;
  350. }
  351. static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
  352. uint32_t length, void __user *ptr, uint32_t __user *retp)
  353. {
  354. struct mtd_oob_ops ops;
  355. int ret = 0;
  356. if (length > 4096)
  357. return -EINVAL;
  358. if (!mtd->read_oob)
  359. ret = -EOPNOTSUPP;
  360. else
  361. ret = access_ok(VERIFY_WRITE, ptr,
  362. length) ? 0 : -EFAULT;
  363. if (ret)
  364. return ret;
  365. ops.ooblen = length;
  366. ops.ooboffs = start & (mtd->oobsize - 1);
  367. ops.datbuf = NULL;
  368. ops.mode = MTD_OOB_PLACE;
  369. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  370. return -EINVAL;
  371. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  372. if (!ops.oobbuf)
  373. return -ENOMEM;
  374. start &= ~((uint64_t)mtd->oobsize - 1);
  375. ret = mtd->read_oob(mtd, start, &ops);
  376. if (put_user(ops.oobretlen, retp))
  377. ret = -EFAULT;
  378. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  379. ops.oobretlen))
  380. ret = -EFAULT;
  381. kfree(ops.oobbuf);
  382. return ret;
  383. }
  384. static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
  385. {
  386. struct mtd_file_info *mfi = file->private_data;
  387. struct mtd_info *mtd = mfi->mtd;
  388. void __user *argp = (void __user *)arg;
  389. int ret = 0;
  390. u_long size;
  391. struct mtd_info_user info;
  392. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  393. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  394. if (cmd & IOC_IN) {
  395. if (!access_ok(VERIFY_READ, argp, size))
  396. return -EFAULT;
  397. }
  398. if (cmd & IOC_OUT) {
  399. if (!access_ok(VERIFY_WRITE, argp, size))
  400. return -EFAULT;
  401. }
  402. switch (cmd) {
  403. case MEMGETREGIONCOUNT:
  404. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  405. return -EFAULT;
  406. break;
  407. case MEMGETREGIONINFO:
  408. {
  409. uint32_t ur_idx;
  410. struct mtd_erase_region_info *kr;
  411. struct region_info_user __user *ur = argp;
  412. if (get_user(ur_idx, &(ur->regionindex)))
  413. return -EFAULT;
  414. kr = &(mtd->eraseregions[ur_idx]);
  415. if (put_user(kr->offset, &(ur->offset))
  416. || put_user(kr->erasesize, &(ur->erasesize))
  417. || put_user(kr->numblocks, &(ur->numblocks)))
  418. return -EFAULT;
  419. break;
  420. }
  421. case MEMGETINFO:
  422. info.type = mtd->type;
  423. info.flags = mtd->flags;
  424. info.size = mtd->size;
  425. info.erasesize = mtd->erasesize;
  426. info.writesize = mtd->writesize;
  427. info.oobsize = mtd->oobsize;
  428. /* The below fields are obsolete */
  429. info.ecctype = -1;
  430. info.eccsize = 0;
  431. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  432. return -EFAULT;
  433. break;
  434. case MEMERASE:
  435. case MEMERASE64:
  436. {
  437. struct erase_info *erase;
  438. if(!(file->f_mode & FMODE_WRITE))
  439. return -EPERM;
  440. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  441. if (!erase)
  442. ret = -ENOMEM;
  443. else {
  444. wait_queue_head_t waitq;
  445. DECLARE_WAITQUEUE(wait, current);
  446. init_waitqueue_head(&waitq);
  447. if (cmd == MEMERASE64) {
  448. struct erase_info_user64 einfo64;
  449. if (copy_from_user(&einfo64, argp,
  450. sizeof(struct erase_info_user64))) {
  451. kfree(erase);
  452. return -EFAULT;
  453. }
  454. erase->addr = einfo64.start;
  455. erase->len = einfo64.length;
  456. } else {
  457. struct erase_info_user einfo32;
  458. if (copy_from_user(&einfo32, argp,
  459. sizeof(struct erase_info_user))) {
  460. kfree(erase);
  461. return -EFAULT;
  462. }
  463. erase->addr = einfo32.start;
  464. erase->len = einfo32.length;
  465. }
  466. erase->mtd = mtd;
  467. erase->callback = mtdchar_erase_callback;
  468. erase->priv = (unsigned long)&waitq;
  469. /*
  470. FIXME: Allow INTERRUPTIBLE. Which means
  471. not having the wait_queue head on the stack.
  472. If the wq_head is on the stack, and we
  473. leave because we got interrupted, then the
  474. wq_head is no longer there when the
  475. callback routine tries to wake us up.
  476. */
  477. ret = mtd->erase(mtd, erase);
  478. if (!ret) {
  479. set_current_state(TASK_UNINTERRUPTIBLE);
  480. add_wait_queue(&waitq, &wait);
  481. if (erase->state != MTD_ERASE_DONE &&
  482. erase->state != MTD_ERASE_FAILED)
  483. schedule();
  484. remove_wait_queue(&waitq, &wait);
  485. set_current_state(TASK_RUNNING);
  486. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  487. }
  488. kfree(erase);
  489. }
  490. break;
  491. }
  492. case MEMWRITEOOB:
  493. {
  494. struct mtd_oob_buf buf;
  495. struct mtd_oob_buf __user *buf_user = argp;
  496. /* NOTE: writes return length to buf_user->length */
  497. if (copy_from_user(&buf, argp, sizeof(buf)))
  498. ret = -EFAULT;
  499. else
  500. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  501. buf.ptr, &buf_user->length);
  502. break;
  503. }
  504. case MEMREADOOB:
  505. {
  506. struct mtd_oob_buf buf;
  507. struct mtd_oob_buf __user *buf_user = argp;
  508. /* NOTE: writes return length to buf_user->start */
  509. if (copy_from_user(&buf, argp, sizeof(buf)))
  510. ret = -EFAULT;
  511. else
  512. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  513. buf.ptr, &buf_user->start);
  514. break;
  515. }
  516. case MEMWRITEOOB64:
  517. {
  518. struct mtd_oob_buf64 buf;
  519. struct mtd_oob_buf64 __user *buf_user = argp;
  520. if (copy_from_user(&buf, argp, sizeof(buf)))
  521. ret = -EFAULT;
  522. else
  523. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  524. (void __user *)(uintptr_t)buf.usr_ptr,
  525. &buf_user->length);
  526. break;
  527. }
  528. case MEMREADOOB64:
  529. {
  530. struct mtd_oob_buf64 buf;
  531. struct mtd_oob_buf64 __user *buf_user = argp;
  532. if (copy_from_user(&buf, argp, sizeof(buf)))
  533. ret = -EFAULT;
  534. else
  535. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  536. (void __user *)(uintptr_t)buf.usr_ptr,
  537. &buf_user->length);
  538. break;
  539. }
  540. case MEMLOCK:
  541. {
  542. struct erase_info_user einfo;
  543. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  544. return -EFAULT;
  545. if (!mtd->lock)
  546. ret = -EOPNOTSUPP;
  547. else
  548. ret = mtd->lock(mtd, einfo.start, einfo.length);
  549. break;
  550. }
  551. case MEMUNLOCK:
  552. {
  553. struct erase_info_user einfo;
  554. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  555. return -EFAULT;
  556. if (!mtd->unlock)
  557. ret = -EOPNOTSUPP;
  558. else
  559. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  560. break;
  561. }
  562. /* Legacy interface */
  563. case MEMGETOOBSEL:
  564. {
  565. struct nand_oobinfo oi;
  566. if (!mtd->ecclayout)
  567. return -EOPNOTSUPP;
  568. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  569. return -EINVAL;
  570. oi.useecc = MTD_NANDECC_AUTOPLACE;
  571. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  572. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  573. sizeof(oi.oobfree));
  574. oi.eccbytes = mtd->ecclayout->eccbytes;
  575. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  576. return -EFAULT;
  577. break;
  578. }
  579. case MEMGETBADBLOCK:
  580. {
  581. loff_t offs;
  582. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  583. return -EFAULT;
  584. if (!mtd->block_isbad)
  585. ret = -EOPNOTSUPP;
  586. else
  587. return mtd->block_isbad(mtd, offs);
  588. break;
  589. }
  590. case MEMSETBADBLOCK:
  591. {
  592. loff_t offs;
  593. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  594. return -EFAULT;
  595. if (!mtd->block_markbad)
  596. ret = -EOPNOTSUPP;
  597. else
  598. return mtd->block_markbad(mtd, offs);
  599. break;
  600. }
  601. #ifdef CONFIG_HAVE_MTD_OTP
  602. case OTPSELECT:
  603. {
  604. int mode;
  605. if (copy_from_user(&mode, argp, sizeof(int)))
  606. return -EFAULT;
  607. mfi->mode = MTD_MODE_NORMAL;
  608. ret = otp_select_filemode(mfi, mode);
  609. file->f_pos = 0;
  610. break;
  611. }
  612. case OTPGETREGIONCOUNT:
  613. case OTPGETREGIONINFO:
  614. {
  615. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  616. if (!buf)
  617. return -ENOMEM;
  618. ret = -EOPNOTSUPP;
  619. switch (mfi->mode) {
  620. case MTD_MODE_OTP_FACTORY:
  621. if (mtd->get_fact_prot_info)
  622. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  623. break;
  624. case MTD_MODE_OTP_USER:
  625. if (mtd->get_user_prot_info)
  626. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  627. break;
  628. default:
  629. break;
  630. }
  631. if (ret >= 0) {
  632. if (cmd == OTPGETREGIONCOUNT) {
  633. int nbr = ret / sizeof(struct otp_info);
  634. ret = copy_to_user(argp, &nbr, sizeof(int));
  635. } else
  636. ret = copy_to_user(argp, buf, ret);
  637. if (ret)
  638. ret = -EFAULT;
  639. }
  640. kfree(buf);
  641. break;
  642. }
  643. case OTPLOCK:
  644. {
  645. struct otp_info oinfo;
  646. if (mfi->mode != MTD_MODE_OTP_USER)
  647. return -EINVAL;
  648. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  649. return -EFAULT;
  650. if (!mtd->lock_user_prot_reg)
  651. return -EOPNOTSUPP;
  652. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  653. break;
  654. }
  655. #endif
  656. case ECCGETLAYOUT:
  657. {
  658. if (!mtd->ecclayout)
  659. return -EOPNOTSUPP;
  660. if (copy_to_user(argp, mtd->ecclayout,
  661. sizeof(struct nand_ecclayout)))
  662. return -EFAULT;
  663. break;
  664. }
  665. case ECCGETSTATS:
  666. {
  667. if (copy_to_user(argp, &mtd->ecc_stats,
  668. sizeof(struct mtd_ecc_stats)))
  669. return -EFAULT;
  670. break;
  671. }
  672. case MTDFILEMODE:
  673. {
  674. mfi->mode = 0;
  675. switch(arg) {
  676. case MTD_MODE_OTP_FACTORY:
  677. case MTD_MODE_OTP_USER:
  678. ret = otp_select_filemode(mfi, arg);
  679. break;
  680. case MTD_MODE_RAW:
  681. if (!mtd->read_oob || !mtd->write_oob)
  682. return -EOPNOTSUPP;
  683. mfi->mode = arg;
  684. case MTD_MODE_NORMAL:
  685. break;
  686. default:
  687. ret = -EINVAL;
  688. }
  689. file->f_pos = 0;
  690. break;
  691. }
  692. default:
  693. ret = -ENOTTY;
  694. }
  695. return ret;
  696. } /* memory_ioctl */
  697. static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
  698. {
  699. int ret;
  700. lock_kernel();
  701. ret = mtd_ioctl(file, cmd, arg);
  702. unlock_kernel();
  703. return ret;
  704. }
  705. #ifdef CONFIG_COMPAT
  706. struct mtd_oob_buf32 {
  707. u_int32_t start;
  708. u_int32_t length;
  709. compat_caddr_t ptr; /* unsigned char* */
  710. };
  711. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  712. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  713. static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  714. unsigned long arg)
  715. {
  716. struct mtd_file_info *mfi = file->private_data;
  717. struct mtd_info *mtd = mfi->mtd;
  718. void __user *argp = compat_ptr(arg);
  719. int ret = 0;
  720. lock_kernel();
  721. switch (cmd) {
  722. case MEMWRITEOOB32:
  723. {
  724. struct mtd_oob_buf32 buf;
  725. struct mtd_oob_buf32 __user *buf_user = argp;
  726. if (copy_from_user(&buf, argp, sizeof(buf)))
  727. ret = -EFAULT;
  728. else
  729. ret = mtd_do_writeoob(file, mtd, buf.start,
  730. buf.length, compat_ptr(buf.ptr),
  731. &buf_user->length);
  732. break;
  733. }
  734. case MEMREADOOB32:
  735. {
  736. struct mtd_oob_buf32 buf;
  737. struct mtd_oob_buf32 __user *buf_user = argp;
  738. /* NOTE: writes return length to buf->start */
  739. if (copy_from_user(&buf, argp, sizeof(buf)))
  740. ret = -EFAULT;
  741. else
  742. ret = mtd_do_readoob(mtd, buf.start,
  743. buf.length, compat_ptr(buf.ptr),
  744. &buf_user->start);
  745. break;
  746. }
  747. default:
  748. ret = mtd_ioctl(file, cmd, (unsigned long)argp);
  749. }
  750. unlock_kernel();
  751. return ret;
  752. }
  753. #endif /* CONFIG_COMPAT */
  754. /*
  755. * try to determine where a shared mapping can be made
  756. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  757. * mappings)
  758. */
  759. #ifndef CONFIG_MMU
  760. static unsigned long mtd_get_unmapped_area(struct file *file,
  761. unsigned long addr,
  762. unsigned long len,
  763. unsigned long pgoff,
  764. unsigned long flags)
  765. {
  766. struct mtd_file_info *mfi = file->private_data;
  767. struct mtd_info *mtd = mfi->mtd;
  768. if (mtd->get_unmapped_area) {
  769. unsigned long offset;
  770. if (addr != 0)
  771. return (unsigned long) -EINVAL;
  772. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  773. return (unsigned long) -EINVAL;
  774. offset = pgoff << PAGE_SHIFT;
  775. if (offset > mtd->size - len)
  776. return (unsigned long) -EINVAL;
  777. return mtd->get_unmapped_area(mtd, len, offset, flags);
  778. }
  779. /* can't map directly */
  780. return (unsigned long) -ENOSYS;
  781. }
  782. #endif
  783. /*
  784. * set up a mapping for shared memory segments
  785. */
  786. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  787. {
  788. #ifdef CONFIG_MMU
  789. struct mtd_file_info *mfi = file->private_data;
  790. struct mtd_info *mtd = mfi->mtd;
  791. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  792. return 0;
  793. return -ENOSYS;
  794. #else
  795. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  796. #endif
  797. }
  798. static const struct file_operations mtd_fops = {
  799. .owner = THIS_MODULE,
  800. .llseek = mtd_lseek,
  801. .read = mtd_read,
  802. .write = mtd_write,
  803. .unlocked_ioctl = mtd_unlocked_ioctl,
  804. #ifdef CONFIG_COMPAT
  805. .compat_ioctl = mtd_compat_ioctl,
  806. #endif
  807. .open = mtd_open,
  808. .release = mtd_close,
  809. .mmap = mtd_mmap,
  810. #ifndef CONFIG_MMU
  811. .get_unmapped_area = mtd_get_unmapped_area,
  812. #endif
  813. };
  814. static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
  815. const char *dev_name, void *data,
  816. struct vfsmount *mnt)
  817. {
  818. return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
  819. mnt);
  820. }
  821. static struct file_system_type mtd_inodefs_type = {
  822. .name = "mtd_inodefs",
  823. .get_sb = mtd_inodefs_get_sb,
  824. .kill_sb = kill_anon_super,
  825. };
  826. static void mtdchar_notify_add(struct mtd_info *mtd)
  827. {
  828. }
  829. static void mtdchar_notify_remove(struct mtd_info *mtd)
  830. {
  831. struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
  832. if (mtd_ino) {
  833. /* Destroy the inode if it exists */
  834. mtd_ino->i_nlink = 0;
  835. iput(mtd_ino);
  836. }
  837. }
  838. static struct mtd_notifier mtdchar_notifier = {
  839. .add = mtdchar_notify_add,
  840. .remove = mtdchar_notify_remove,
  841. };
  842. static int __init init_mtdchar(void)
  843. {
  844. int ret;
  845. ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
  846. "mtd", &mtd_fops);
  847. if (ret < 0) {
  848. pr_notice("Can't allocate major number %d for "
  849. "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
  850. return ret;
  851. }
  852. ret = register_filesystem(&mtd_inodefs_type);
  853. if (ret) {
  854. pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
  855. goto err_unregister_chdev;
  856. }
  857. mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
  858. if (IS_ERR(mtd_inode_mnt)) {
  859. ret = PTR_ERR(mtd_inode_mnt);
  860. pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
  861. goto err_unregister_filesystem;
  862. }
  863. register_mtd_user(&mtdchar_notifier);
  864. return ret;
  865. err_unregister_filesystem:
  866. unregister_filesystem(&mtd_inodefs_type);
  867. err_unregister_chdev:
  868. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  869. return ret;
  870. }
  871. static void __exit cleanup_mtdchar(void)
  872. {
  873. unregister_mtd_user(&mtdchar_notifier);
  874. mntput(mtd_inode_mnt);
  875. unregister_filesystem(&mtd_inodefs_type);
  876. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  877. }
  878. module_init(init_mtdchar);
  879. module_exit(cleanup_mtdchar);
  880. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  881. MODULE_LICENSE("GPL");
  882. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  883. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  884. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);