mtdchar.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mount.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/compatmac.h>
  20. #include <asm/uaccess.h>
  21. #define MTD_INODE_FS_MAGIC 0x11307854
  22. static struct vfsmount *mtd_inode_mnt __read_mostly;
  23. /*
  24. * Data structure to hold the pointer to the mtd device as well
  25. * as mode information ofr various use cases.
  26. */
  27. struct mtd_file_info {
  28. struct mtd_info *mtd;
  29. struct inode *ino;
  30. enum mtd_file_modes mode;
  31. };
  32. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  33. {
  34. struct mtd_file_info *mfi = file->private_data;
  35. struct mtd_info *mtd = mfi->mtd;
  36. switch (orig) {
  37. case SEEK_SET:
  38. break;
  39. case SEEK_CUR:
  40. offset += file->f_pos;
  41. break;
  42. case SEEK_END:
  43. offset += mtd->size;
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. if (offset >= 0 && offset <= mtd->size)
  49. return file->f_pos = offset;
  50. return -EINVAL;
  51. }
  52. static int mtd_open(struct inode *inode, struct file *file)
  53. {
  54. int minor = iminor(inode);
  55. int devnum = minor >> 1;
  56. int ret = 0;
  57. struct mtd_info *mtd;
  58. struct mtd_file_info *mfi;
  59. struct inode *mtd_ino;
  60. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  61. /* You can't open the RO devices RW */
  62. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  63. return -EACCES;
  64. lock_kernel();
  65. mtd = get_mtd_device(NULL, devnum);
  66. if (IS_ERR(mtd)) {
  67. ret = PTR_ERR(mtd);
  68. goto out;
  69. }
  70. if (mtd->type == MTD_ABSENT) {
  71. put_mtd_device(mtd);
  72. ret = -ENODEV;
  73. goto out;
  74. }
  75. mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
  76. if (!mtd_ino) {
  77. put_mtd_device(mtd);
  78. ret = -ENOMEM;
  79. goto out;
  80. }
  81. if (mtd_ino->i_state & I_NEW) {
  82. mtd_ino->i_private = mtd;
  83. mtd_ino->i_mode = S_IFCHR;
  84. mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
  85. unlock_new_inode(mtd_ino);
  86. }
  87. file->f_mapping = mtd_ino->i_mapping;
  88. /* You can't open it RW if it's not a writeable device */
  89. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  90. iput(mtd_ino);
  91. put_mtd_device(mtd);
  92. ret = -EACCES;
  93. goto out;
  94. }
  95. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  96. if (!mfi) {
  97. iput(mtd_ino);
  98. put_mtd_device(mtd);
  99. ret = -ENOMEM;
  100. goto out;
  101. }
  102. mfi->ino = mtd_ino;
  103. mfi->mtd = mtd;
  104. file->private_data = mfi;
  105. out:
  106. unlock_kernel();
  107. return ret;
  108. } /* mtd_open */
  109. /*====================================================================*/
  110. static int mtd_close(struct inode *inode, struct file *file)
  111. {
  112. struct mtd_file_info *mfi = file->private_data;
  113. struct mtd_info *mtd = mfi->mtd;
  114. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  115. /* Only sync if opened RW */
  116. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  117. mtd->sync(mtd);
  118. iput(mfi->ino);
  119. put_mtd_device(mtd);
  120. file->private_data = NULL;
  121. kfree(mfi);
  122. return 0;
  123. } /* mtd_close */
  124. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  125. userspace buffer down and use it directly with readv/writev.
  126. */
  127. #define MAX_KMALLOC_SIZE 0x20000
  128. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  129. {
  130. struct mtd_file_info *mfi = file->private_data;
  131. struct mtd_info *mtd = mfi->mtd;
  132. size_t retlen=0;
  133. size_t total_retlen=0;
  134. int ret=0;
  135. int len;
  136. char *kbuf;
  137. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  138. if (*ppos + count > mtd->size)
  139. count = mtd->size - *ppos;
  140. if (!count)
  141. return 0;
  142. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  143. and pass them directly to the MTD functions */
  144. if (count > MAX_KMALLOC_SIZE)
  145. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  146. else
  147. kbuf=kmalloc(count, GFP_KERNEL);
  148. if (!kbuf)
  149. return -ENOMEM;
  150. while (count) {
  151. if (count > MAX_KMALLOC_SIZE)
  152. len = MAX_KMALLOC_SIZE;
  153. else
  154. len = count;
  155. switch (mfi->mode) {
  156. case MTD_MODE_OTP_FACTORY:
  157. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  158. break;
  159. case MTD_MODE_OTP_USER:
  160. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  161. break;
  162. case MTD_MODE_RAW:
  163. {
  164. struct mtd_oob_ops ops;
  165. ops.mode = MTD_OOB_RAW;
  166. ops.datbuf = kbuf;
  167. ops.oobbuf = NULL;
  168. ops.len = len;
  169. ret = mtd->read_oob(mtd, *ppos, &ops);
  170. retlen = ops.retlen;
  171. break;
  172. }
  173. default:
  174. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  175. }
  176. /* Nand returns -EBADMSG on ecc errors, but it returns
  177. * the data. For our userspace tools it is important
  178. * to dump areas with ecc errors !
  179. * For kernel internal usage it also might return -EUCLEAN
  180. * to signal the caller that a bitflip has occured and has
  181. * been corrected by the ECC algorithm.
  182. * Userspace software which accesses NAND this way
  183. * must be aware of the fact that it deals with NAND
  184. */
  185. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  186. *ppos += retlen;
  187. if (copy_to_user(buf, kbuf, retlen)) {
  188. kfree(kbuf);
  189. return -EFAULT;
  190. }
  191. else
  192. total_retlen += retlen;
  193. count -= retlen;
  194. buf += retlen;
  195. if (retlen == 0)
  196. count = 0;
  197. }
  198. else {
  199. kfree(kbuf);
  200. return ret;
  201. }
  202. }
  203. kfree(kbuf);
  204. return total_retlen;
  205. } /* mtd_read */
  206. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  207. {
  208. struct mtd_file_info *mfi = file->private_data;
  209. struct mtd_info *mtd = mfi->mtd;
  210. char *kbuf;
  211. size_t retlen;
  212. size_t total_retlen=0;
  213. int ret=0;
  214. int len;
  215. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  216. if (*ppos == mtd->size)
  217. return -ENOSPC;
  218. if (*ppos + count > mtd->size)
  219. count = mtd->size - *ppos;
  220. if (!count)
  221. return 0;
  222. if (count > MAX_KMALLOC_SIZE)
  223. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  224. else
  225. kbuf=kmalloc(count, GFP_KERNEL);
  226. if (!kbuf)
  227. return -ENOMEM;
  228. while (count) {
  229. if (count > MAX_KMALLOC_SIZE)
  230. len = MAX_KMALLOC_SIZE;
  231. else
  232. len = count;
  233. if (copy_from_user(kbuf, buf, len)) {
  234. kfree(kbuf);
  235. return -EFAULT;
  236. }
  237. switch (mfi->mode) {
  238. case MTD_MODE_OTP_FACTORY:
  239. ret = -EROFS;
  240. break;
  241. case MTD_MODE_OTP_USER:
  242. if (!mtd->write_user_prot_reg) {
  243. ret = -EOPNOTSUPP;
  244. break;
  245. }
  246. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  247. break;
  248. case MTD_MODE_RAW:
  249. {
  250. struct mtd_oob_ops ops;
  251. ops.mode = MTD_OOB_RAW;
  252. ops.datbuf = kbuf;
  253. ops.oobbuf = NULL;
  254. ops.len = len;
  255. ret = mtd->write_oob(mtd, *ppos, &ops);
  256. retlen = ops.retlen;
  257. break;
  258. }
  259. default:
  260. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  261. }
  262. if (!ret) {
  263. *ppos += retlen;
  264. total_retlen += retlen;
  265. count -= retlen;
  266. buf += retlen;
  267. }
  268. else {
  269. kfree(kbuf);
  270. return ret;
  271. }
  272. }
  273. kfree(kbuf);
  274. return total_retlen;
  275. } /* mtd_write */
  276. /*======================================================================
  277. IOCTL calls for getting device parameters.
  278. ======================================================================*/
  279. static void mtdchar_erase_callback (struct erase_info *instr)
  280. {
  281. wake_up((wait_queue_head_t *)instr->priv);
  282. }
  283. #ifdef CONFIG_HAVE_MTD_OTP
  284. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  285. {
  286. struct mtd_info *mtd = mfi->mtd;
  287. int ret = 0;
  288. switch (mode) {
  289. case MTD_OTP_FACTORY:
  290. if (!mtd->read_fact_prot_reg)
  291. ret = -EOPNOTSUPP;
  292. else
  293. mfi->mode = MTD_MODE_OTP_FACTORY;
  294. break;
  295. case MTD_OTP_USER:
  296. if (!mtd->read_fact_prot_reg)
  297. ret = -EOPNOTSUPP;
  298. else
  299. mfi->mode = MTD_MODE_OTP_USER;
  300. break;
  301. default:
  302. ret = -EINVAL;
  303. case MTD_OTP_OFF:
  304. break;
  305. }
  306. return ret;
  307. }
  308. #else
  309. # define otp_select_filemode(f,m) -EOPNOTSUPP
  310. #endif
  311. static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
  312. uint64_t start, uint32_t length, void __user *ptr,
  313. uint32_t __user *retp)
  314. {
  315. struct mtd_oob_ops ops;
  316. uint32_t retlen;
  317. int ret = 0;
  318. if (!(file->f_mode & FMODE_WRITE))
  319. return -EPERM;
  320. if (length > 4096)
  321. return -EINVAL;
  322. if (!mtd->write_oob)
  323. ret = -EOPNOTSUPP;
  324. else
  325. ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
  326. if (ret)
  327. return ret;
  328. ops.ooblen = length;
  329. ops.ooboffs = start & (mtd->oobsize - 1);
  330. ops.datbuf = NULL;
  331. ops.mode = MTD_OOB_PLACE;
  332. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  333. return -EINVAL;
  334. ops.oobbuf = memdup_user(ptr, length);
  335. if (IS_ERR(ops.oobbuf))
  336. return PTR_ERR(ops.oobbuf);
  337. start &= ~((uint64_t)mtd->oobsize - 1);
  338. ret = mtd->write_oob(mtd, start, &ops);
  339. if (ops.oobretlen > 0xFFFFFFFFU)
  340. ret = -EOVERFLOW;
  341. retlen = ops.oobretlen;
  342. if (copy_to_user(retp, &retlen, sizeof(length)))
  343. ret = -EFAULT;
  344. kfree(ops.oobbuf);
  345. return ret;
  346. }
  347. static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
  348. uint32_t length, void __user *ptr, uint32_t __user *retp)
  349. {
  350. struct mtd_oob_ops ops;
  351. int ret = 0;
  352. if (length > 4096)
  353. return -EINVAL;
  354. if (!mtd->read_oob)
  355. ret = -EOPNOTSUPP;
  356. else
  357. ret = access_ok(VERIFY_WRITE, ptr,
  358. length) ? 0 : -EFAULT;
  359. if (ret)
  360. return ret;
  361. ops.ooblen = length;
  362. ops.ooboffs = start & (mtd->oobsize - 1);
  363. ops.datbuf = NULL;
  364. ops.mode = MTD_OOB_PLACE;
  365. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  366. return -EINVAL;
  367. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  368. if (!ops.oobbuf)
  369. return -ENOMEM;
  370. start &= ~((uint64_t)mtd->oobsize - 1);
  371. ret = mtd->read_oob(mtd, start, &ops);
  372. if (put_user(ops.oobretlen, retp))
  373. ret = -EFAULT;
  374. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  375. ops.oobretlen))
  376. ret = -EFAULT;
  377. kfree(ops.oobbuf);
  378. return ret;
  379. }
  380. static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
  381. {
  382. struct mtd_file_info *mfi = file->private_data;
  383. struct mtd_info *mtd = mfi->mtd;
  384. void __user *argp = (void __user *)arg;
  385. int ret = 0;
  386. u_long size;
  387. struct mtd_info_user info;
  388. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  389. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  390. if (cmd & IOC_IN) {
  391. if (!access_ok(VERIFY_READ, argp, size))
  392. return -EFAULT;
  393. }
  394. if (cmd & IOC_OUT) {
  395. if (!access_ok(VERIFY_WRITE, argp, size))
  396. return -EFAULT;
  397. }
  398. switch (cmd) {
  399. case MEMGETREGIONCOUNT:
  400. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  401. return -EFAULT;
  402. break;
  403. case MEMGETREGIONINFO:
  404. {
  405. uint32_t ur_idx;
  406. struct mtd_erase_region_info *kr;
  407. struct region_info_user __user *ur = argp;
  408. if (get_user(ur_idx, &(ur->regionindex)))
  409. return -EFAULT;
  410. kr = &(mtd->eraseregions[ur_idx]);
  411. if (put_user(kr->offset, &(ur->offset))
  412. || put_user(kr->erasesize, &(ur->erasesize))
  413. || put_user(kr->numblocks, &(ur->numblocks)))
  414. return -EFAULT;
  415. break;
  416. }
  417. case MEMGETINFO:
  418. info.type = mtd->type;
  419. info.flags = mtd->flags;
  420. info.size = mtd->size;
  421. info.erasesize = mtd->erasesize;
  422. info.writesize = mtd->writesize;
  423. info.oobsize = mtd->oobsize;
  424. /* The below fields are obsolete */
  425. info.ecctype = -1;
  426. info.eccsize = 0;
  427. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  428. return -EFAULT;
  429. break;
  430. case MEMERASE:
  431. case MEMERASE64:
  432. {
  433. struct erase_info *erase;
  434. if(!(file->f_mode & FMODE_WRITE))
  435. return -EPERM;
  436. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  437. if (!erase)
  438. ret = -ENOMEM;
  439. else {
  440. wait_queue_head_t waitq;
  441. DECLARE_WAITQUEUE(wait, current);
  442. init_waitqueue_head(&waitq);
  443. if (cmd == MEMERASE64) {
  444. struct erase_info_user64 einfo64;
  445. if (copy_from_user(&einfo64, argp,
  446. sizeof(struct erase_info_user64))) {
  447. kfree(erase);
  448. return -EFAULT;
  449. }
  450. erase->addr = einfo64.start;
  451. erase->len = einfo64.length;
  452. } else {
  453. struct erase_info_user einfo32;
  454. if (copy_from_user(&einfo32, argp,
  455. sizeof(struct erase_info_user))) {
  456. kfree(erase);
  457. return -EFAULT;
  458. }
  459. erase->addr = einfo32.start;
  460. erase->len = einfo32.length;
  461. }
  462. erase->mtd = mtd;
  463. erase->callback = mtdchar_erase_callback;
  464. erase->priv = (unsigned long)&waitq;
  465. /*
  466. FIXME: Allow INTERRUPTIBLE. Which means
  467. not having the wait_queue head on the stack.
  468. If the wq_head is on the stack, and we
  469. leave because we got interrupted, then the
  470. wq_head is no longer there when the
  471. callback routine tries to wake us up.
  472. */
  473. ret = mtd->erase(mtd, erase);
  474. if (!ret) {
  475. set_current_state(TASK_UNINTERRUPTIBLE);
  476. add_wait_queue(&waitq, &wait);
  477. if (erase->state != MTD_ERASE_DONE &&
  478. erase->state != MTD_ERASE_FAILED)
  479. schedule();
  480. remove_wait_queue(&waitq, &wait);
  481. set_current_state(TASK_RUNNING);
  482. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  483. }
  484. kfree(erase);
  485. }
  486. break;
  487. }
  488. case MEMWRITEOOB:
  489. {
  490. struct mtd_oob_buf buf;
  491. struct mtd_oob_buf __user *buf_user = argp;
  492. /* NOTE: writes return length to buf_user->length */
  493. if (copy_from_user(&buf, argp, sizeof(buf)))
  494. ret = -EFAULT;
  495. else
  496. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  497. buf.ptr, &buf_user->length);
  498. break;
  499. }
  500. case MEMREADOOB:
  501. {
  502. struct mtd_oob_buf buf;
  503. struct mtd_oob_buf __user *buf_user = argp;
  504. /* NOTE: writes return length to buf_user->start */
  505. if (copy_from_user(&buf, argp, sizeof(buf)))
  506. ret = -EFAULT;
  507. else
  508. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  509. buf.ptr, &buf_user->start);
  510. break;
  511. }
  512. case MEMWRITEOOB64:
  513. {
  514. struct mtd_oob_buf64 buf;
  515. struct mtd_oob_buf64 __user *buf_user = argp;
  516. if (copy_from_user(&buf, argp, sizeof(buf)))
  517. ret = -EFAULT;
  518. else
  519. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  520. (void __user *)(uintptr_t)buf.usr_ptr,
  521. &buf_user->length);
  522. break;
  523. }
  524. case MEMREADOOB64:
  525. {
  526. struct mtd_oob_buf64 buf;
  527. struct mtd_oob_buf64 __user *buf_user = argp;
  528. if (copy_from_user(&buf, argp, sizeof(buf)))
  529. ret = -EFAULT;
  530. else
  531. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  532. (void __user *)(uintptr_t)buf.usr_ptr,
  533. &buf_user->length);
  534. break;
  535. }
  536. case MEMLOCK:
  537. {
  538. struct erase_info_user einfo;
  539. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  540. return -EFAULT;
  541. if (!mtd->lock)
  542. ret = -EOPNOTSUPP;
  543. else
  544. ret = mtd->lock(mtd, einfo.start, einfo.length);
  545. break;
  546. }
  547. case MEMUNLOCK:
  548. {
  549. struct erase_info_user einfo;
  550. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  551. return -EFAULT;
  552. if (!mtd->unlock)
  553. ret = -EOPNOTSUPP;
  554. else
  555. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  556. break;
  557. }
  558. /* Legacy interface */
  559. case MEMGETOOBSEL:
  560. {
  561. struct nand_oobinfo oi;
  562. if (!mtd->ecclayout)
  563. return -EOPNOTSUPP;
  564. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  565. return -EINVAL;
  566. oi.useecc = MTD_NANDECC_AUTOPLACE;
  567. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  568. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  569. sizeof(oi.oobfree));
  570. oi.eccbytes = mtd->ecclayout->eccbytes;
  571. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  572. return -EFAULT;
  573. break;
  574. }
  575. case MEMGETBADBLOCK:
  576. {
  577. loff_t offs;
  578. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  579. return -EFAULT;
  580. if (!mtd->block_isbad)
  581. ret = -EOPNOTSUPP;
  582. else
  583. return mtd->block_isbad(mtd, offs);
  584. break;
  585. }
  586. case MEMSETBADBLOCK:
  587. {
  588. loff_t offs;
  589. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  590. return -EFAULT;
  591. if (!mtd->block_markbad)
  592. ret = -EOPNOTSUPP;
  593. else
  594. return mtd->block_markbad(mtd, offs);
  595. break;
  596. }
  597. #ifdef CONFIG_HAVE_MTD_OTP
  598. case OTPSELECT:
  599. {
  600. int mode;
  601. if (copy_from_user(&mode, argp, sizeof(int)))
  602. return -EFAULT;
  603. mfi->mode = MTD_MODE_NORMAL;
  604. ret = otp_select_filemode(mfi, mode);
  605. file->f_pos = 0;
  606. break;
  607. }
  608. case OTPGETREGIONCOUNT:
  609. case OTPGETREGIONINFO:
  610. {
  611. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  612. if (!buf)
  613. return -ENOMEM;
  614. ret = -EOPNOTSUPP;
  615. switch (mfi->mode) {
  616. case MTD_MODE_OTP_FACTORY:
  617. if (mtd->get_fact_prot_info)
  618. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  619. break;
  620. case MTD_MODE_OTP_USER:
  621. if (mtd->get_user_prot_info)
  622. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  623. break;
  624. default:
  625. break;
  626. }
  627. if (ret >= 0) {
  628. if (cmd == OTPGETREGIONCOUNT) {
  629. int nbr = ret / sizeof(struct otp_info);
  630. ret = copy_to_user(argp, &nbr, sizeof(int));
  631. } else
  632. ret = copy_to_user(argp, buf, ret);
  633. if (ret)
  634. ret = -EFAULT;
  635. }
  636. kfree(buf);
  637. break;
  638. }
  639. case OTPLOCK:
  640. {
  641. struct otp_info oinfo;
  642. if (mfi->mode != MTD_MODE_OTP_USER)
  643. return -EINVAL;
  644. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  645. return -EFAULT;
  646. if (!mtd->lock_user_prot_reg)
  647. return -EOPNOTSUPP;
  648. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  649. break;
  650. }
  651. #endif
  652. case ECCGETLAYOUT:
  653. {
  654. if (!mtd->ecclayout)
  655. return -EOPNOTSUPP;
  656. if (copy_to_user(argp, mtd->ecclayout,
  657. sizeof(struct nand_ecclayout)))
  658. return -EFAULT;
  659. break;
  660. }
  661. case ECCGETSTATS:
  662. {
  663. if (copy_to_user(argp, &mtd->ecc_stats,
  664. sizeof(struct mtd_ecc_stats)))
  665. return -EFAULT;
  666. break;
  667. }
  668. case MTDFILEMODE:
  669. {
  670. mfi->mode = 0;
  671. switch(arg) {
  672. case MTD_MODE_OTP_FACTORY:
  673. case MTD_MODE_OTP_USER:
  674. ret = otp_select_filemode(mfi, arg);
  675. break;
  676. case MTD_MODE_RAW:
  677. if (!mtd->read_oob || !mtd->write_oob)
  678. return -EOPNOTSUPP;
  679. mfi->mode = arg;
  680. case MTD_MODE_NORMAL:
  681. break;
  682. default:
  683. ret = -EINVAL;
  684. }
  685. file->f_pos = 0;
  686. break;
  687. }
  688. default:
  689. ret = -ENOTTY;
  690. }
  691. return ret;
  692. } /* memory_ioctl */
  693. static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
  694. {
  695. int ret;
  696. lock_kernel();
  697. ret = mtd_ioctl(file, cmd, arg);
  698. unlock_kernel();
  699. return ret;
  700. }
  701. #ifdef CONFIG_COMPAT
  702. struct mtd_oob_buf32 {
  703. u_int32_t start;
  704. u_int32_t length;
  705. compat_caddr_t ptr; /* unsigned char* */
  706. };
  707. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  708. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  709. static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  710. unsigned long arg)
  711. {
  712. struct mtd_file_info *mfi = file->private_data;
  713. struct mtd_info *mtd = mfi->mtd;
  714. void __user *argp = compat_ptr(arg);
  715. int ret = 0;
  716. lock_kernel();
  717. switch (cmd) {
  718. case MEMWRITEOOB32:
  719. {
  720. struct mtd_oob_buf32 buf;
  721. struct mtd_oob_buf32 __user *buf_user = argp;
  722. if (copy_from_user(&buf, argp, sizeof(buf)))
  723. ret = -EFAULT;
  724. else
  725. ret = mtd_do_writeoob(file, mtd, buf.start,
  726. buf.length, compat_ptr(buf.ptr),
  727. &buf_user->length);
  728. break;
  729. }
  730. case MEMREADOOB32:
  731. {
  732. struct mtd_oob_buf32 buf;
  733. struct mtd_oob_buf32 __user *buf_user = argp;
  734. /* NOTE: writes return length to buf->start */
  735. if (copy_from_user(&buf, argp, sizeof(buf)))
  736. ret = -EFAULT;
  737. else
  738. ret = mtd_do_readoob(mtd, buf.start,
  739. buf.length, compat_ptr(buf.ptr),
  740. &buf_user->start);
  741. break;
  742. }
  743. default:
  744. ret = mtd_ioctl(file, cmd, (unsigned long)argp);
  745. }
  746. unlock_kernel();
  747. return ret;
  748. }
  749. #endif /* CONFIG_COMPAT */
  750. /*
  751. * try to determine where a shared mapping can be made
  752. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  753. * mappings)
  754. */
  755. #ifndef CONFIG_MMU
  756. static unsigned long mtd_get_unmapped_area(struct file *file,
  757. unsigned long addr,
  758. unsigned long len,
  759. unsigned long pgoff,
  760. unsigned long flags)
  761. {
  762. struct mtd_file_info *mfi = file->private_data;
  763. struct mtd_info *mtd = mfi->mtd;
  764. if (mtd->get_unmapped_area) {
  765. unsigned long offset;
  766. if (addr != 0)
  767. return (unsigned long) -EINVAL;
  768. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  769. return (unsigned long) -EINVAL;
  770. offset = pgoff << PAGE_SHIFT;
  771. if (offset > mtd->size - len)
  772. return (unsigned long) -EINVAL;
  773. return mtd->get_unmapped_area(mtd, len, offset, flags);
  774. }
  775. /* can't map directly */
  776. return (unsigned long) -ENOSYS;
  777. }
  778. #endif
  779. /*
  780. * set up a mapping for shared memory segments
  781. */
  782. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  783. {
  784. #ifdef CONFIG_MMU
  785. struct mtd_file_info *mfi = file->private_data;
  786. struct mtd_info *mtd = mfi->mtd;
  787. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  788. return 0;
  789. return -ENOSYS;
  790. #else
  791. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  792. #endif
  793. }
  794. static const struct file_operations mtd_fops = {
  795. .owner = THIS_MODULE,
  796. .llseek = mtd_lseek,
  797. .read = mtd_read,
  798. .write = mtd_write,
  799. .unlocked_ioctl = mtd_unlocked_ioctl,
  800. #ifdef CONFIG_COMPAT
  801. .compat_ioctl = mtd_compat_ioctl,
  802. #endif
  803. .open = mtd_open,
  804. .release = mtd_close,
  805. .mmap = mtd_mmap,
  806. #ifndef CONFIG_MMU
  807. .get_unmapped_area = mtd_get_unmapped_area,
  808. #endif
  809. };
  810. static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
  811. const char *dev_name, void *data,
  812. struct vfsmount *mnt)
  813. {
  814. return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
  815. mnt);
  816. }
  817. static struct file_system_type mtd_inodefs_type = {
  818. .name = "mtd_inodefs",
  819. .get_sb = mtd_inodefs_get_sb,
  820. .kill_sb = kill_anon_super,
  821. };
  822. static void mtdchar_notify_add(struct mtd_info *mtd)
  823. {
  824. }
  825. static void mtdchar_notify_remove(struct mtd_info *mtd)
  826. {
  827. struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
  828. if (mtd_ino) {
  829. /* Destroy the inode if it exists */
  830. mtd_ino->i_nlink = 0;
  831. iput(mtd_ino);
  832. }
  833. }
  834. static struct mtd_notifier mtdchar_notifier = {
  835. .add = mtdchar_notify_add,
  836. .remove = mtdchar_notify_remove,
  837. };
  838. static int __init init_mtdchar(void)
  839. {
  840. int ret;
  841. ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
  842. "mtd", &mtd_fops);
  843. if (ret < 0) {
  844. pr_notice("Can't allocate major number %d for "
  845. "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
  846. return ret;
  847. }
  848. ret = register_filesystem(&mtd_inodefs_type);
  849. if (ret) {
  850. pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
  851. goto err_unregister_chdev;
  852. }
  853. mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
  854. if (IS_ERR(mtd_inode_mnt)) {
  855. ret = PTR_ERR(mtd_inode_mnt);
  856. pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
  857. goto err_unregister_filesystem;
  858. }
  859. register_mtd_user(&mtdchar_notifier);
  860. return ret;
  861. err_unregister_filesystem:
  862. unregister_filesystem(&mtd_inodefs_type);
  863. err_unregister_chdev:
  864. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  865. return ret;
  866. }
  867. static void __exit cleanup_mtdchar(void)
  868. {
  869. unregister_mtd_user(&mtdchar_notifier);
  870. mntput(mtd_inode_mnt);
  871. unregister_filesystem(&mtd_inodefs_type);
  872. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  873. }
  874. module_init(init_mtdchar);
  875. module_exit(cleanup_mtdchar);
  876. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  877. MODULE_LICENSE("GPL");
  878. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  879. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  880. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);