mtdchar.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mount.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/compatmac.h>
  20. #include <asm/uaccess.h>
  21. #define MTD_INODE_FS_MAGIC 0x11307854
  22. static struct vfsmount *mtd_inode_mnt __read_mostly;
  23. /*
  24. * Data structure to hold the pointer to the mtd device as well
  25. * as mode information ofr various use cases.
  26. */
  27. struct mtd_file_info {
  28. struct mtd_info *mtd;
  29. struct inode *ino;
  30. enum mtd_file_modes mode;
  31. };
  32. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  33. {
  34. struct mtd_file_info *mfi = file->private_data;
  35. struct mtd_info *mtd = mfi->mtd;
  36. switch (orig) {
  37. case SEEK_SET:
  38. break;
  39. case SEEK_CUR:
  40. offset += file->f_pos;
  41. break;
  42. case SEEK_END:
  43. offset += mtd->size;
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. if (offset >= 0 && offset <= mtd->size)
  49. return file->f_pos = offset;
  50. return -EINVAL;
  51. }
  52. static int mtd_open(struct inode *inode, struct file *file)
  53. {
  54. int minor = iminor(inode);
  55. int devnum = minor >> 1;
  56. int ret = 0;
  57. struct mtd_info *mtd;
  58. struct mtd_file_info *mfi;
  59. struct inode *mtd_ino;
  60. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  61. /* You can't open the RO devices RW */
  62. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  63. return -EACCES;
  64. lock_kernel();
  65. mtd = get_mtd_device(NULL, devnum);
  66. if (IS_ERR(mtd)) {
  67. ret = PTR_ERR(mtd);
  68. goto out;
  69. }
  70. if (mtd->type == MTD_ABSENT) {
  71. put_mtd_device(mtd);
  72. ret = -ENODEV;
  73. goto out;
  74. }
  75. mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
  76. if (!mtd_ino) {
  77. put_mtd_device(mtd);
  78. ret = -ENOMEM;
  79. goto out;
  80. }
  81. if (mtd_ino->i_state & I_NEW) {
  82. mtd_ino->i_private = mtd;
  83. mtd_ino->i_mode = S_IFCHR;
  84. mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
  85. unlock_new_inode(mtd_ino);
  86. }
  87. file->f_mapping = mtd_ino->i_mapping;
  88. /* You can't open it RW if it's not a writeable device */
  89. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  90. iput(mtd_ino);
  91. put_mtd_device(mtd);
  92. ret = -EACCES;
  93. goto out;
  94. }
  95. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  96. if (!mfi) {
  97. iput(mtd_ino);
  98. put_mtd_device(mtd);
  99. ret = -ENOMEM;
  100. goto out;
  101. }
  102. mfi->ino = mtd_ino;
  103. mfi->mtd = mtd;
  104. file->private_data = mfi;
  105. out:
  106. unlock_kernel();
  107. return ret;
  108. } /* mtd_open */
  109. /*====================================================================*/
  110. static int mtd_close(struct inode *inode, struct file *file)
  111. {
  112. struct mtd_file_info *mfi = file->private_data;
  113. struct mtd_info *mtd = mfi->mtd;
  114. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  115. /* Only sync if opened RW */
  116. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  117. mtd->sync(mtd);
  118. iput(mfi->ino);
  119. put_mtd_device(mtd);
  120. file->private_data = NULL;
  121. kfree(mfi);
  122. return 0;
  123. } /* mtd_close */
  124. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  125. userspace buffer down and use it directly with readv/writev.
  126. */
  127. #define MAX_KMALLOC_SIZE 0x20000
  128. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  129. {
  130. struct mtd_file_info *mfi = file->private_data;
  131. struct mtd_info *mtd = mfi->mtd;
  132. size_t retlen=0;
  133. size_t total_retlen=0;
  134. int ret=0;
  135. int len;
  136. char *kbuf;
  137. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  138. if (*ppos + count > mtd->size)
  139. count = mtd->size - *ppos;
  140. if (!count)
  141. return 0;
  142. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  143. and pass them directly to the MTD functions */
  144. if (count > MAX_KMALLOC_SIZE)
  145. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  146. else
  147. kbuf=kmalloc(count, GFP_KERNEL);
  148. if (!kbuf)
  149. return -ENOMEM;
  150. while (count) {
  151. if (count > MAX_KMALLOC_SIZE)
  152. len = MAX_KMALLOC_SIZE;
  153. else
  154. len = count;
  155. switch (mfi->mode) {
  156. case MTD_MODE_OTP_FACTORY:
  157. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  158. break;
  159. case MTD_MODE_OTP_USER:
  160. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  161. break;
  162. case MTD_MODE_RAW:
  163. {
  164. struct mtd_oob_ops ops;
  165. ops.mode = MTD_OOB_RAW;
  166. ops.datbuf = kbuf;
  167. ops.oobbuf = NULL;
  168. ops.len = len;
  169. ret = mtd->read_oob(mtd, *ppos, &ops);
  170. retlen = ops.retlen;
  171. break;
  172. }
  173. default:
  174. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  175. }
  176. /* Nand returns -EBADMSG on ecc errors, but it returns
  177. * the data. For our userspace tools it is important
  178. * to dump areas with ecc errors !
  179. * For kernel internal usage it also might return -EUCLEAN
  180. * to signal the caller that a bitflip has occured and has
  181. * been corrected by the ECC algorithm.
  182. * Userspace software which accesses NAND this way
  183. * must be aware of the fact that it deals with NAND
  184. */
  185. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  186. *ppos += retlen;
  187. if (copy_to_user(buf, kbuf, retlen)) {
  188. kfree(kbuf);
  189. return -EFAULT;
  190. }
  191. else
  192. total_retlen += retlen;
  193. count -= retlen;
  194. buf += retlen;
  195. if (retlen == 0)
  196. count = 0;
  197. }
  198. else {
  199. kfree(kbuf);
  200. return ret;
  201. }
  202. }
  203. kfree(kbuf);
  204. return total_retlen;
  205. } /* mtd_read */
  206. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  207. {
  208. struct mtd_file_info *mfi = file->private_data;
  209. struct mtd_info *mtd = mfi->mtd;
  210. char *kbuf;
  211. size_t retlen;
  212. size_t total_retlen=0;
  213. int ret=0;
  214. int len;
  215. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  216. if (*ppos == mtd->size)
  217. return -ENOSPC;
  218. if (*ppos + count > mtd->size)
  219. count = mtd->size - *ppos;
  220. if (!count)
  221. return 0;
  222. if (count > MAX_KMALLOC_SIZE)
  223. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  224. else
  225. kbuf=kmalloc(count, GFP_KERNEL);
  226. if (!kbuf)
  227. return -ENOMEM;
  228. while (count) {
  229. if (count > MAX_KMALLOC_SIZE)
  230. len = MAX_KMALLOC_SIZE;
  231. else
  232. len = count;
  233. if (copy_from_user(kbuf, buf, len)) {
  234. kfree(kbuf);
  235. return -EFAULT;
  236. }
  237. switch (mfi->mode) {
  238. case MTD_MODE_OTP_FACTORY:
  239. ret = -EROFS;
  240. break;
  241. case MTD_MODE_OTP_USER:
  242. if (!mtd->write_user_prot_reg) {
  243. ret = -EOPNOTSUPP;
  244. break;
  245. }
  246. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  247. break;
  248. case MTD_MODE_RAW:
  249. {
  250. struct mtd_oob_ops ops;
  251. ops.mode = MTD_OOB_RAW;
  252. ops.datbuf = kbuf;
  253. ops.oobbuf = NULL;
  254. ops.len = len;
  255. ret = mtd->write_oob(mtd, *ppos, &ops);
  256. retlen = ops.retlen;
  257. break;
  258. }
  259. default:
  260. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  261. }
  262. if (!ret) {
  263. *ppos += retlen;
  264. total_retlen += retlen;
  265. count -= retlen;
  266. buf += retlen;
  267. }
  268. else {
  269. kfree(kbuf);
  270. return ret;
  271. }
  272. }
  273. kfree(kbuf);
  274. return total_retlen;
  275. } /* mtd_write */
  276. /*======================================================================
  277. IOCTL calls for getting device parameters.
  278. ======================================================================*/
  279. static void mtdchar_erase_callback (struct erase_info *instr)
  280. {
  281. wake_up((wait_queue_head_t *)instr->priv);
  282. }
  283. #ifdef CONFIG_HAVE_MTD_OTP
  284. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  285. {
  286. struct mtd_info *mtd = mfi->mtd;
  287. int ret = 0;
  288. switch (mode) {
  289. case MTD_OTP_FACTORY:
  290. if (!mtd->read_fact_prot_reg)
  291. ret = -EOPNOTSUPP;
  292. else
  293. mfi->mode = MTD_MODE_OTP_FACTORY;
  294. break;
  295. case MTD_OTP_USER:
  296. if (!mtd->read_fact_prot_reg)
  297. ret = -EOPNOTSUPP;
  298. else
  299. mfi->mode = MTD_MODE_OTP_USER;
  300. break;
  301. default:
  302. ret = -EINVAL;
  303. case MTD_OTP_OFF:
  304. break;
  305. }
  306. return ret;
  307. }
  308. #else
  309. # define otp_select_filemode(f,m) -EOPNOTSUPP
  310. #endif
  311. static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
  312. uint64_t start, uint32_t length, void __user *ptr,
  313. uint32_t __user *retp)
  314. {
  315. struct mtd_oob_ops ops;
  316. uint32_t retlen;
  317. int ret = 0;
  318. if (!(file->f_mode & FMODE_WRITE))
  319. return -EPERM;
  320. if (length > 4096)
  321. return -EINVAL;
  322. if (!mtd->write_oob)
  323. ret = -EOPNOTSUPP;
  324. else
  325. ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
  326. if (ret)
  327. return ret;
  328. ops.ooblen = length;
  329. ops.ooboffs = start & (mtd->oobsize - 1);
  330. ops.datbuf = NULL;
  331. ops.mode = MTD_OOB_PLACE;
  332. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  333. return -EINVAL;
  334. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  335. if (!ops.oobbuf)
  336. return -ENOMEM;
  337. if (copy_from_user(ops.oobbuf, ptr, length)) {
  338. kfree(ops.oobbuf);
  339. return -EFAULT;
  340. }
  341. start &= ~((uint64_t)mtd->oobsize - 1);
  342. ret = mtd->write_oob(mtd, start, &ops);
  343. if (ops.oobretlen > 0xFFFFFFFFU)
  344. ret = -EOVERFLOW;
  345. retlen = ops.oobretlen;
  346. if (copy_to_user(retp, &retlen, sizeof(length)))
  347. ret = -EFAULT;
  348. kfree(ops.oobbuf);
  349. return ret;
  350. }
  351. static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
  352. uint32_t length, void __user *ptr, uint32_t __user *retp)
  353. {
  354. struct mtd_oob_ops ops;
  355. int ret = 0;
  356. if (length > 4096)
  357. return -EINVAL;
  358. if (!mtd->read_oob)
  359. ret = -EOPNOTSUPP;
  360. else
  361. ret = access_ok(VERIFY_WRITE, ptr,
  362. length) ? 0 : -EFAULT;
  363. if (ret)
  364. return ret;
  365. ops.ooblen = length;
  366. ops.ooboffs = start & (mtd->oobsize - 1);
  367. ops.datbuf = NULL;
  368. ops.mode = MTD_OOB_PLACE;
  369. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  370. return -EINVAL;
  371. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  372. if (!ops.oobbuf)
  373. return -ENOMEM;
  374. start &= ~((uint64_t)mtd->oobsize - 1);
  375. ret = mtd->read_oob(mtd, start, &ops);
  376. if (put_user(ops.oobretlen, retp))
  377. ret = -EFAULT;
  378. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  379. ops.oobretlen))
  380. ret = -EFAULT;
  381. kfree(ops.oobbuf);
  382. return ret;
  383. }
  384. static int mtd_ioctl(struct inode *inode, struct file *file,
  385. u_int cmd, u_long arg)
  386. {
  387. struct mtd_file_info *mfi = file->private_data;
  388. struct mtd_info *mtd = mfi->mtd;
  389. void __user *argp = (void __user *)arg;
  390. int ret = 0;
  391. u_long size;
  392. struct mtd_info_user info;
  393. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  394. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  395. if (cmd & IOC_IN) {
  396. if (!access_ok(VERIFY_READ, argp, size))
  397. return -EFAULT;
  398. }
  399. if (cmd & IOC_OUT) {
  400. if (!access_ok(VERIFY_WRITE, argp, size))
  401. return -EFAULT;
  402. }
  403. switch (cmd) {
  404. case MEMGETREGIONCOUNT:
  405. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  406. return -EFAULT;
  407. break;
  408. case MEMGETREGIONINFO:
  409. {
  410. uint32_t ur_idx;
  411. struct mtd_erase_region_info *kr;
  412. struct region_info_user __user *ur = argp;
  413. if (get_user(ur_idx, &(ur->regionindex)))
  414. return -EFAULT;
  415. kr = &(mtd->eraseregions[ur_idx]);
  416. if (put_user(kr->offset, &(ur->offset))
  417. || put_user(kr->erasesize, &(ur->erasesize))
  418. || put_user(kr->numblocks, &(ur->numblocks)))
  419. return -EFAULT;
  420. break;
  421. }
  422. case MEMGETINFO:
  423. info.type = mtd->type;
  424. info.flags = mtd->flags;
  425. info.size = mtd->size;
  426. info.erasesize = mtd->erasesize;
  427. info.writesize = mtd->writesize;
  428. info.oobsize = mtd->oobsize;
  429. /* The below fields are obsolete */
  430. info.ecctype = -1;
  431. info.eccsize = 0;
  432. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  433. return -EFAULT;
  434. break;
  435. case MEMERASE:
  436. case MEMERASE64:
  437. {
  438. struct erase_info *erase;
  439. if(!(file->f_mode & FMODE_WRITE))
  440. return -EPERM;
  441. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  442. if (!erase)
  443. ret = -ENOMEM;
  444. else {
  445. wait_queue_head_t waitq;
  446. DECLARE_WAITQUEUE(wait, current);
  447. init_waitqueue_head(&waitq);
  448. if (cmd == MEMERASE64) {
  449. struct erase_info_user64 einfo64;
  450. if (copy_from_user(&einfo64, argp,
  451. sizeof(struct erase_info_user64))) {
  452. kfree(erase);
  453. return -EFAULT;
  454. }
  455. erase->addr = einfo64.start;
  456. erase->len = einfo64.length;
  457. } else {
  458. struct erase_info_user einfo32;
  459. if (copy_from_user(&einfo32, argp,
  460. sizeof(struct erase_info_user))) {
  461. kfree(erase);
  462. return -EFAULT;
  463. }
  464. erase->addr = einfo32.start;
  465. erase->len = einfo32.length;
  466. }
  467. erase->mtd = mtd;
  468. erase->callback = mtdchar_erase_callback;
  469. erase->priv = (unsigned long)&waitq;
  470. /*
  471. FIXME: Allow INTERRUPTIBLE. Which means
  472. not having the wait_queue head on the stack.
  473. If the wq_head is on the stack, and we
  474. leave because we got interrupted, then the
  475. wq_head is no longer there when the
  476. callback routine tries to wake us up.
  477. */
  478. ret = mtd->erase(mtd, erase);
  479. if (!ret) {
  480. set_current_state(TASK_UNINTERRUPTIBLE);
  481. add_wait_queue(&waitq, &wait);
  482. if (erase->state != MTD_ERASE_DONE &&
  483. erase->state != MTD_ERASE_FAILED)
  484. schedule();
  485. remove_wait_queue(&waitq, &wait);
  486. set_current_state(TASK_RUNNING);
  487. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  488. }
  489. kfree(erase);
  490. }
  491. break;
  492. }
  493. case MEMWRITEOOB:
  494. {
  495. struct mtd_oob_buf buf;
  496. struct mtd_oob_buf __user *buf_user = argp;
  497. /* NOTE: writes return length to buf_user->length */
  498. if (copy_from_user(&buf, argp, sizeof(buf)))
  499. ret = -EFAULT;
  500. else
  501. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  502. buf.ptr, &buf_user->length);
  503. break;
  504. }
  505. case MEMREADOOB:
  506. {
  507. struct mtd_oob_buf buf;
  508. struct mtd_oob_buf __user *buf_user = argp;
  509. /* NOTE: writes return length to buf_user->start */
  510. if (copy_from_user(&buf, argp, sizeof(buf)))
  511. ret = -EFAULT;
  512. else
  513. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  514. buf.ptr, &buf_user->start);
  515. break;
  516. }
  517. case MEMWRITEOOB64:
  518. {
  519. struct mtd_oob_buf64 buf;
  520. struct mtd_oob_buf64 __user *buf_user = argp;
  521. if (copy_from_user(&buf, argp, sizeof(buf)))
  522. ret = -EFAULT;
  523. else
  524. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  525. (void __user *)(uintptr_t)buf.usr_ptr,
  526. &buf_user->length);
  527. break;
  528. }
  529. case MEMREADOOB64:
  530. {
  531. struct mtd_oob_buf64 buf;
  532. struct mtd_oob_buf64 __user *buf_user = argp;
  533. if (copy_from_user(&buf, argp, sizeof(buf)))
  534. ret = -EFAULT;
  535. else
  536. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  537. (void __user *)(uintptr_t)buf.usr_ptr,
  538. &buf_user->length);
  539. break;
  540. }
  541. case MEMLOCK:
  542. {
  543. struct erase_info_user einfo;
  544. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  545. return -EFAULT;
  546. if (!mtd->lock)
  547. ret = -EOPNOTSUPP;
  548. else
  549. ret = mtd->lock(mtd, einfo.start, einfo.length);
  550. break;
  551. }
  552. case MEMUNLOCK:
  553. {
  554. struct erase_info_user einfo;
  555. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  556. return -EFAULT;
  557. if (!mtd->unlock)
  558. ret = -EOPNOTSUPP;
  559. else
  560. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  561. break;
  562. }
  563. /* Legacy interface */
  564. case MEMGETOOBSEL:
  565. {
  566. struct nand_oobinfo oi;
  567. if (!mtd->ecclayout)
  568. return -EOPNOTSUPP;
  569. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  570. return -EINVAL;
  571. oi.useecc = MTD_NANDECC_AUTOPLACE;
  572. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  573. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  574. sizeof(oi.oobfree));
  575. oi.eccbytes = mtd->ecclayout->eccbytes;
  576. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  577. return -EFAULT;
  578. break;
  579. }
  580. case MEMGETBADBLOCK:
  581. {
  582. loff_t offs;
  583. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  584. return -EFAULT;
  585. if (!mtd->block_isbad)
  586. ret = -EOPNOTSUPP;
  587. else
  588. return mtd->block_isbad(mtd, offs);
  589. break;
  590. }
  591. case MEMSETBADBLOCK:
  592. {
  593. loff_t offs;
  594. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  595. return -EFAULT;
  596. if (!mtd->block_markbad)
  597. ret = -EOPNOTSUPP;
  598. else
  599. return mtd->block_markbad(mtd, offs);
  600. break;
  601. }
  602. #ifdef CONFIG_HAVE_MTD_OTP
  603. case OTPSELECT:
  604. {
  605. int mode;
  606. if (copy_from_user(&mode, argp, sizeof(int)))
  607. return -EFAULT;
  608. mfi->mode = MTD_MODE_NORMAL;
  609. ret = otp_select_filemode(mfi, mode);
  610. file->f_pos = 0;
  611. break;
  612. }
  613. case OTPGETREGIONCOUNT:
  614. case OTPGETREGIONINFO:
  615. {
  616. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  617. if (!buf)
  618. return -ENOMEM;
  619. ret = -EOPNOTSUPP;
  620. switch (mfi->mode) {
  621. case MTD_MODE_OTP_FACTORY:
  622. if (mtd->get_fact_prot_info)
  623. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  624. break;
  625. case MTD_MODE_OTP_USER:
  626. if (mtd->get_user_prot_info)
  627. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  628. break;
  629. default:
  630. break;
  631. }
  632. if (ret >= 0) {
  633. if (cmd == OTPGETREGIONCOUNT) {
  634. int nbr = ret / sizeof(struct otp_info);
  635. ret = copy_to_user(argp, &nbr, sizeof(int));
  636. } else
  637. ret = copy_to_user(argp, buf, ret);
  638. if (ret)
  639. ret = -EFAULT;
  640. }
  641. kfree(buf);
  642. break;
  643. }
  644. case OTPLOCK:
  645. {
  646. struct otp_info oinfo;
  647. if (mfi->mode != MTD_MODE_OTP_USER)
  648. return -EINVAL;
  649. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  650. return -EFAULT;
  651. if (!mtd->lock_user_prot_reg)
  652. return -EOPNOTSUPP;
  653. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  654. break;
  655. }
  656. #endif
  657. case ECCGETLAYOUT:
  658. {
  659. if (!mtd->ecclayout)
  660. return -EOPNOTSUPP;
  661. if (copy_to_user(argp, mtd->ecclayout,
  662. sizeof(struct nand_ecclayout)))
  663. return -EFAULT;
  664. break;
  665. }
  666. case ECCGETSTATS:
  667. {
  668. if (copy_to_user(argp, &mtd->ecc_stats,
  669. sizeof(struct mtd_ecc_stats)))
  670. return -EFAULT;
  671. break;
  672. }
  673. case MTDFILEMODE:
  674. {
  675. mfi->mode = 0;
  676. switch(arg) {
  677. case MTD_MODE_OTP_FACTORY:
  678. case MTD_MODE_OTP_USER:
  679. ret = otp_select_filemode(mfi, arg);
  680. break;
  681. case MTD_MODE_RAW:
  682. if (!mtd->read_oob || !mtd->write_oob)
  683. return -EOPNOTSUPP;
  684. mfi->mode = arg;
  685. case MTD_MODE_NORMAL:
  686. break;
  687. default:
  688. ret = -EINVAL;
  689. }
  690. file->f_pos = 0;
  691. break;
  692. }
  693. default:
  694. ret = -ENOTTY;
  695. }
  696. return ret;
  697. } /* memory_ioctl */
  698. #ifdef CONFIG_COMPAT
  699. struct mtd_oob_buf32 {
  700. u_int32_t start;
  701. u_int32_t length;
  702. compat_caddr_t ptr; /* unsigned char* */
  703. };
  704. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  705. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  706. static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  707. unsigned long arg)
  708. {
  709. struct inode *inode = file->f_path.dentry->d_inode;
  710. struct mtd_file_info *mfi = file->private_data;
  711. struct mtd_info *mtd = mfi->mtd;
  712. void __user *argp = compat_ptr(arg);
  713. int ret = 0;
  714. lock_kernel();
  715. switch (cmd) {
  716. case MEMWRITEOOB32:
  717. {
  718. struct mtd_oob_buf32 buf;
  719. struct mtd_oob_buf32 __user *buf_user = argp;
  720. if (copy_from_user(&buf, argp, sizeof(buf)))
  721. ret = -EFAULT;
  722. else
  723. ret = mtd_do_writeoob(file, mtd, buf.start,
  724. buf.length, compat_ptr(buf.ptr),
  725. &buf_user->length);
  726. break;
  727. }
  728. case MEMREADOOB32:
  729. {
  730. struct mtd_oob_buf32 buf;
  731. struct mtd_oob_buf32 __user *buf_user = argp;
  732. /* NOTE: writes return length to buf->start */
  733. if (copy_from_user(&buf, argp, sizeof(buf)))
  734. ret = -EFAULT;
  735. else
  736. ret = mtd_do_readoob(mtd, buf.start,
  737. buf.length, compat_ptr(buf.ptr),
  738. &buf_user->start);
  739. break;
  740. }
  741. default:
  742. ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
  743. }
  744. unlock_kernel();
  745. return ret;
  746. }
  747. #endif /* CONFIG_COMPAT */
  748. /*
  749. * try to determine where a shared mapping can be made
  750. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  751. * mappings)
  752. */
  753. #ifndef CONFIG_MMU
  754. static unsigned long mtd_get_unmapped_area(struct file *file,
  755. unsigned long addr,
  756. unsigned long len,
  757. unsigned long pgoff,
  758. unsigned long flags)
  759. {
  760. struct mtd_file_info *mfi = file->private_data;
  761. struct mtd_info *mtd = mfi->mtd;
  762. if (mtd->get_unmapped_area) {
  763. unsigned long offset;
  764. if (addr != 0)
  765. return (unsigned long) -EINVAL;
  766. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  767. return (unsigned long) -EINVAL;
  768. offset = pgoff << PAGE_SHIFT;
  769. if (offset > mtd->size - len)
  770. return (unsigned long) -EINVAL;
  771. return mtd->get_unmapped_area(mtd, len, offset, flags);
  772. }
  773. /* can't map directly */
  774. return (unsigned long) -ENOSYS;
  775. }
  776. #endif
  777. /*
  778. * set up a mapping for shared memory segments
  779. */
  780. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  781. {
  782. #ifdef CONFIG_MMU
  783. struct mtd_file_info *mfi = file->private_data;
  784. struct mtd_info *mtd = mfi->mtd;
  785. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  786. return 0;
  787. return -ENOSYS;
  788. #else
  789. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  790. #endif
  791. }
  792. static const struct file_operations mtd_fops = {
  793. .owner = THIS_MODULE,
  794. .llseek = mtd_lseek,
  795. .read = mtd_read,
  796. .write = mtd_write,
  797. .ioctl = mtd_ioctl,
  798. #ifdef CONFIG_COMPAT
  799. .compat_ioctl = mtd_compat_ioctl,
  800. #endif
  801. .open = mtd_open,
  802. .release = mtd_close,
  803. .mmap = mtd_mmap,
  804. #ifndef CONFIG_MMU
  805. .get_unmapped_area = mtd_get_unmapped_area,
  806. #endif
  807. };
  808. static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
  809. const char *dev_name, void *data,
  810. struct vfsmount *mnt)
  811. {
  812. return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
  813. mnt);
  814. }
  815. static struct file_system_type mtd_inodefs_type = {
  816. .name = "mtd_inodefs",
  817. .get_sb = mtd_inodefs_get_sb,
  818. .kill_sb = kill_anon_super,
  819. };
  820. static void mtdchar_notify_add(struct mtd_info *mtd)
  821. {
  822. }
  823. static void mtdchar_notify_remove(struct mtd_info *mtd)
  824. {
  825. struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
  826. if (mtd_ino) {
  827. /* Destroy the inode if it exists */
  828. mtd_ino->i_nlink = 0;
  829. iput(mtd_ino);
  830. }
  831. }
  832. static struct mtd_notifier mtdchar_notifier = {
  833. .add = mtdchar_notify_add,
  834. .remove = mtdchar_notify_remove,
  835. };
  836. static int __init init_mtdchar(void)
  837. {
  838. int ret;
  839. ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
  840. "mtd", &mtd_fops);
  841. if (ret < 0) {
  842. pr_notice("Can't allocate major number %d for "
  843. "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
  844. return ret;
  845. }
  846. ret = register_filesystem(&mtd_inodefs_type);
  847. if (ret) {
  848. pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
  849. goto err_unregister_chdev;
  850. }
  851. mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
  852. if (IS_ERR(mtd_inode_mnt)) {
  853. ret = PTR_ERR(mtd_inode_mnt);
  854. pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
  855. goto err_unregister_filesystem;
  856. }
  857. register_mtd_user(&mtdchar_notifier);
  858. return ret;
  859. err_unregister_filesystem:
  860. unregister_filesystem(&mtd_inodefs_type);
  861. err_unregister_chdev:
  862. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  863. return ret;
  864. }
  865. static void __exit cleanup_mtdchar(void)
  866. {
  867. unregister_mtd_user(&mtdchar_notifier);
  868. mntput(mtd_inode_mnt);
  869. unregister_filesystem(&mtd_inodefs_type);
  870. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  871. }
  872. module_init(init_mtdchar);
  873. module_exit(cleanup_mtdchar);
  874. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  875. MODULE_LICENSE("GPL");
  876. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  877. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  878. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);