mtdchar.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * Character-device access to raw MTD devices.
  3. *
  4. */
  5. #include <linux/device.h>
  6. #include <linux/fs.h>
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/compat.h>
  17. #include <linux/mount.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/compatmac.h>
  20. #include <asm/uaccess.h>
  21. #define MTD_INODE_FS_MAGIC 0x11307854
  22. static struct vfsmount *mtd_inode_mnt __read_mostly;
  23. /*
  24. * Data structure to hold the pointer to the mtd device as well
  25. * as mode information ofr various use cases.
  26. */
  27. struct mtd_file_info {
  28. struct mtd_info *mtd;
  29. struct inode *ino;
  30. enum mtd_file_modes mode;
  31. };
  32. static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
  33. {
  34. struct mtd_file_info *mfi = file->private_data;
  35. struct mtd_info *mtd = mfi->mtd;
  36. switch (orig) {
  37. case SEEK_SET:
  38. break;
  39. case SEEK_CUR:
  40. offset += file->f_pos;
  41. break;
  42. case SEEK_END:
  43. offset += mtd->size;
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. if (offset >= 0 && offset <= mtd->size)
  49. return file->f_pos = offset;
  50. return -EINVAL;
  51. }
  52. static int mtd_open(struct inode *inode, struct file *file)
  53. {
  54. int minor = iminor(inode);
  55. int devnum = minor >> 1;
  56. int ret = 0;
  57. struct mtd_info *mtd;
  58. struct mtd_file_info *mfi;
  59. struct inode *mtd_ino;
  60. DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
  61. /* You can't open the RO devices RW */
  62. if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  63. return -EACCES;
  64. lock_kernel();
  65. mtd = get_mtd_device(NULL, devnum);
  66. if (IS_ERR(mtd)) {
  67. ret = PTR_ERR(mtd);
  68. goto out;
  69. }
  70. if (mtd->type == MTD_ABSENT) {
  71. put_mtd_device(mtd);
  72. ret = -ENODEV;
  73. goto out;
  74. }
  75. mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
  76. if (!mtd_ino) {
  77. put_mtd_device(mtd);
  78. ret = -ENOMEM;
  79. goto out;
  80. }
  81. if (mtd_ino->i_state & I_NEW) {
  82. mtd_ino->i_private = mtd;
  83. mtd_ino->i_mode = S_IFCHR;
  84. mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
  85. unlock_new_inode(mtd_ino);
  86. }
  87. file->f_mapping = mtd_ino->i_mapping;
  88. /* You can't open it RW if it's not a writeable device */
  89. if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  90. iput(mtd_ino);
  91. put_mtd_device(mtd);
  92. ret = -EACCES;
  93. goto out;
  94. }
  95. mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  96. if (!mfi) {
  97. iput(mtd_ino);
  98. put_mtd_device(mtd);
  99. ret = -ENOMEM;
  100. goto out;
  101. }
  102. mfi->ino = mtd_ino;
  103. mfi->mtd = mtd;
  104. file->private_data = mfi;
  105. out:
  106. unlock_kernel();
  107. return ret;
  108. } /* mtd_open */
  109. /*====================================================================*/
  110. static int mtd_close(struct inode *inode, struct file *file)
  111. {
  112. struct mtd_file_info *mfi = file->private_data;
  113. struct mtd_info *mtd = mfi->mtd;
  114. DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
  115. /* Only sync if opened RW */
  116. if ((file->f_mode & FMODE_WRITE) && mtd->sync)
  117. mtd->sync(mtd);
  118. iput(mfi->ino);
  119. put_mtd_device(mtd);
  120. file->private_data = NULL;
  121. kfree(mfi);
  122. return 0;
  123. } /* mtd_close */
  124. /* FIXME: This _really_ needs to die. In 2.5, we should lock the
  125. userspace buffer down and use it directly with readv/writev.
  126. */
  127. #define MAX_KMALLOC_SIZE 0x20000
  128. static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  129. {
  130. struct mtd_file_info *mfi = file->private_data;
  131. struct mtd_info *mtd = mfi->mtd;
  132. size_t retlen=0;
  133. size_t total_retlen=0;
  134. int ret=0;
  135. int len;
  136. char *kbuf;
  137. DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
  138. if (*ppos + count > mtd->size)
  139. count = mtd->size - *ppos;
  140. if (!count)
  141. return 0;
  142. /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
  143. and pass them directly to the MTD functions */
  144. if (count > MAX_KMALLOC_SIZE)
  145. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  146. else
  147. kbuf=kmalloc(count, GFP_KERNEL);
  148. if (!kbuf)
  149. return -ENOMEM;
  150. while (count) {
  151. if (count > MAX_KMALLOC_SIZE)
  152. len = MAX_KMALLOC_SIZE;
  153. else
  154. len = count;
  155. switch (mfi->mode) {
  156. case MTD_MODE_OTP_FACTORY:
  157. ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  158. break;
  159. case MTD_MODE_OTP_USER:
  160. ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  161. break;
  162. case MTD_MODE_RAW:
  163. {
  164. struct mtd_oob_ops ops;
  165. ops.mode = MTD_OOB_RAW;
  166. ops.datbuf = kbuf;
  167. ops.oobbuf = NULL;
  168. ops.len = len;
  169. ret = mtd->read_oob(mtd, *ppos, &ops);
  170. retlen = ops.retlen;
  171. break;
  172. }
  173. default:
  174. ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
  175. }
  176. /* Nand returns -EBADMSG on ecc errors, but it returns
  177. * the data. For our userspace tools it is important
  178. * to dump areas with ecc errors !
  179. * For kernel internal usage it also might return -EUCLEAN
  180. * to signal the caller that a bitflip has occured and has
  181. * been corrected by the ECC algorithm.
  182. * Userspace software which accesses NAND this way
  183. * must be aware of the fact that it deals with NAND
  184. */
  185. if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
  186. *ppos += retlen;
  187. if (copy_to_user(buf, kbuf, retlen)) {
  188. kfree(kbuf);
  189. return -EFAULT;
  190. }
  191. else
  192. total_retlen += retlen;
  193. count -= retlen;
  194. buf += retlen;
  195. if (retlen == 0)
  196. count = 0;
  197. }
  198. else {
  199. kfree(kbuf);
  200. return ret;
  201. }
  202. }
  203. kfree(kbuf);
  204. return total_retlen;
  205. } /* mtd_read */
  206. static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  207. {
  208. struct mtd_file_info *mfi = file->private_data;
  209. struct mtd_info *mtd = mfi->mtd;
  210. char *kbuf;
  211. size_t retlen;
  212. size_t total_retlen=0;
  213. int ret=0;
  214. int len;
  215. DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
  216. if (*ppos == mtd->size)
  217. return -ENOSPC;
  218. if (*ppos + count > mtd->size)
  219. count = mtd->size - *ppos;
  220. if (!count)
  221. return 0;
  222. if (count > MAX_KMALLOC_SIZE)
  223. kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
  224. else
  225. kbuf=kmalloc(count, GFP_KERNEL);
  226. if (!kbuf)
  227. return -ENOMEM;
  228. while (count) {
  229. if (count > MAX_KMALLOC_SIZE)
  230. len = MAX_KMALLOC_SIZE;
  231. else
  232. len = count;
  233. if (copy_from_user(kbuf, buf, len)) {
  234. kfree(kbuf);
  235. return -EFAULT;
  236. }
  237. switch (mfi->mode) {
  238. case MTD_MODE_OTP_FACTORY:
  239. ret = -EROFS;
  240. break;
  241. case MTD_MODE_OTP_USER:
  242. if (!mtd->write_user_prot_reg) {
  243. ret = -EOPNOTSUPP;
  244. break;
  245. }
  246. ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
  247. break;
  248. case MTD_MODE_RAW:
  249. {
  250. struct mtd_oob_ops ops;
  251. ops.mode = MTD_OOB_RAW;
  252. ops.datbuf = kbuf;
  253. ops.oobbuf = NULL;
  254. ops.len = len;
  255. ret = mtd->write_oob(mtd, *ppos, &ops);
  256. retlen = ops.retlen;
  257. break;
  258. }
  259. default:
  260. ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
  261. }
  262. if (!ret) {
  263. *ppos += retlen;
  264. total_retlen += retlen;
  265. count -= retlen;
  266. buf += retlen;
  267. }
  268. else {
  269. kfree(kbuf);
  270. return ret;
  271. }
  272. }
  273. kfree(kbuf);
  274. return total_retlen;
  275. } /* mtd_write */
  276. /*======================================================================
  277. IOCTL calls for getting device parameters.
  278. ======================================================================*/
  279. static void mtdchar_erase_callback (struct erase_info *instr)
  280. {
  281. wake_up((wait_queue_head_t *)instr->priv);
  282. }
  283. #ifdef CONFIG_HAVE_MTD_OTP
  284. static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
  285. {
  286. struct mtd_info *mtd = mfi->mtd;
  287. int ret = 0;
  288. switch (mode) {
  289. case MTD_OTP_FACTORY:
  290. if (!mtd->read_fact_prot_reg)
  291. ret = -EOPNOTSUPP;
  292. else
  293. mfi->mode = MTD_MODE_OTP_FACTORY;
  294. break;
  295. case MTD_OTP_USER:
  296. if (!mtd->read_fact_prot_reg)
  297. ret = -EOPNOTSUPP;
  298. else
  299. mfi->mode = MTD_MODE_OTP_USER;
  300. break;
  301. default:
  302. ret = -EINVAL;
  303. case MTD_OTP_OFF:
  304. break;
  305. }
  306. return ret;
  307. }
  308. #else
  309. # define otp_select_filemode(f,m) -EOPNOTSUPP
  310. #endif
  311. static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
  312. uint64_t start, uint32_t length, void __user *ptr,
  313. uint32_t __user *retp)
  314. {
  315. struct mtd_oob_ops ops;
  316. uint32_t retlen;
  317. int ret = 0;
  318. if (!(file->f_mode & FMODE_WRITE))
  319. return -EPERM;
  320. if (length > 4096)
  321. return -EINVAL;
  322. if (!mtd->write_oob)
  323. ret = -EOPNOTSUPP;
  324. else
  325. ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
  326. if (ret)
  327. return ret;
  328. ops.ooblen = length;
  329. ops.ooboffs = start & (mtd->oobsize - 1);
  330. ops.datbuf = NULL;
  331. ops.mode = MTD_OOB_PLACE;
  332. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  333. return -EINVAL;
  334. ops.oobbuf = memdup_user(ptr, length);
  335. if (IS_ERR(ops.oobbuf))
  336. return PTR_ERR(ops.oobbuf);
  337. start &= ~((uint64_t)mtd->oobsize - 1);
  338. ret = mtd->write_oob(mtd, start, &ops);
  339. if (ops.oobretlen > 0xFFFFFFFFU)
  340. ret = -EOVERFLOW;
  341. retlen = ops.oobretlen;
  342. if (copy_to_user(retp, &retlen, sizeof(length)))
  343. ret = -EFAULT;
  344. kfree(ops.oobbuf);
  345. return ret;
  346. }
  347. static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
  348. uint32_t length, void __user *ptr, uint32_t __user *retp)
  349. {
  350. struct mtd_oob_ops ops;
  351. int ret = 0;
  352. if (length > 4096)
  353. return -EINVAL;
  354. if (!mtd->read_oob)
  355. ret = -EOPNOTSUPP;
  356. else
  357. ret = access_ok(VERIFY_WRITE, ptr,
  358. length) ? 0 : -EFAULT;
  359. if (ret)
  360. return ret;
  361. ops.ooblen = length;
  362. ops.ooboffs = start & (mtd->oobsize - 1);
  363. ops.datbuf = NULL;
  364. ops.mode = MTD_OOB_PLACE;
  365. if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
  366. return -EINVAL;
  367. ops.oobbuf = kmalloc(length, GFP_KERNEL);
  368. if (!ops.oobbuf)
  369. return -ENOMEM;
  370. start &= ~((uint64_t)mtd->oobsize - 1);
  371. ret = mtd->read_oob(mtd, start, &ops);
  372. if (put_user(ops.oobretlen, retp))
  373. ret = -EFAULT;
  374. else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
  375. ops.oobretlen))
  376. ret = -EFAULT;
  377. kfree(ops.oobbuf);
  378. return ret;
  379. }
  380. static int mtd_ioctl(struct inode *inode, struct file *file,
  381. u_int cmd, u_long arg)
  382. {
  383. struct mtd_file_info *mfi = file->private_data;
  384. struct mtd_info *mtd = mfi->mtd;
  385. void __user *argp = (void __user *)arg;
  386. int ret = 0;
  387. u_long size;
  388. struct mtd_info_user info;
  389. DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
  390. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  391. if (cmd & IOC_IN) {
  392. if (!access_ok(VERIFY_READ, argp, size))
  393. return -EFAULT;
  394. }
  395. if (cmd & IOC_OUT) {
  396. if (!access_ok(VERIFY_WRITE, argp, size))
  397. return -EFAULT;
  398. }
  399. switch (cmd) {
  400. case MEMGETREGIONCOUNT:
  401. if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
  402. return -EFAULT;
  403. break;
  404. case MEMGETREGIONINFO:
  405. {
  406. uint32_t ur_idx;
  407. struct mtd_erase_region_info *kr;
  408. struct region_info_user __user *ur = argp;
  409. if (get_user(ur_idx, &(ur->regionindex)))
  410. return -EFAULT;
  411. kr = &(mtd->eraseregions[ur_idx]);
  412. if (put_user(kr->offset, &(ur->offset))
  413. || put_user(kr->erasesize, &(ur->erasesize))
  414. || put_user(kr->numblocks, &(ur->numblocks)))
  415. return -EFAULT;
  416. break;
  417. }
  418. case MEMGETINFO:
  419. info.type = mtd->type;
  420. info.flags = mtd->flags;
  421. info.size = mtd->size;
  422. info.erasesize = mtd->erasesize;
  423. info.writesize = mtd->writesize;
  424. info.oobsize = mtd->oobsize;
  425. /* The below fields are obsolete */
  426. info.ecctype = -1;
  427. info.eccsize = 0;
  428. if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
  429. return -EFAULT;
  430. break;
  431. case MEMERASE:
  432. case MEMERASE64:
  433. {
  434. struct erase_info *erase;
  435. if(!(file->f_mode & FMODE_WRITE))
  436. return -EPERM;
  437. erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
  438. if (!erase)
  439. ret = -ENOMEM;
  440. else {
  441. wait_queue_head_t waitq;
  442. DECLARE_WAITQUEUE(wait, current);
  443. init_waitqueue_head(&waitq);
  444. if (cmd == MEMERASE64) {
  445. struct erase_info_user64 einfo64;
  446. if (copy_from_user(&einfo64, argp,
  447. sizeof(struct erase_info_user64))) {
  448. kfree(erase);
  449. return -EFAULT;
  450. }
  451. erase->addr = einfo64.start;
  452. erase->len = einfo64.length;
  453. } else {
  454. struct erase_info_user einfo32;
  455. if (copy_from_user(&einfo32, argp,
  456. sizeof(struct erase_info_user))) {
  457. kfree(erase);
  458. return -EFAULT;
  459. }
  460. erase->addr = einfo32.start;
  461. erase->len = einfo32.length;
  462. }
  463. erase->mtd = mtd;
  464. erase->callback = mtdchar_erase_callback;
  465. erase->priv = (unsigned long)&waitq;
  466. /*
  467. FIXME: Allow INTERRUPTIBLE. Which means
  468. not having the wait_queue head on the stack.
  469. If the wq_head is on the stack, and we
  470. leave because we got interrupted, then the
  471. wq_head is no longer there when the
  472. callback routine tries to wake us up.
  473. */
  474. ret = mtd->erase(mtd, erase);
  475. if (!ret) {
  476. set_current_state(TASK_UNINTERRUPTIBLE);
  477. add_wait_queue(&waitq, &wait);
  478. if (erase->state != MTD_ERASE_DONE &&
  479. erase->state != MTD_ERASE_FAILED)
  480. schedule();
  481. remove_wait_queue(&waitq, &wait);
  482. set_current_state(TASK_RUNNING);
  483. ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
  484. }
  485. kfree(erase);
  486. }
  487. break;
  488. }
  489. case MEMWRITEOOB:
  490. {
  491. struct mtd_oob_buf buf;
  492. struct mtd_oob_buf __user *buf_user = argp;
  493. /* NOTE: writes return length to buf_user->length */
  494. if (copy_from_user(&buf, argp, sizeof(buf)))
  495. ret = -EFAULT;
  496. else
  497. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  498. buf.ptr, &buf_user->length);
  499. break;
  500. }
  501. case MEMREADOOB:
  502. {
  503. struct mtd_oob_buf buf;
  504. struct mtd_oob_buf __user *buf_user = argp;
  505. /* NOTE: writes return length to buf_user->start */
  506. if (copy_from_user(&buf, argp, sizeof(buf)))
  507. ret = -EFAULT;
  508. else
  509. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  510. buf.ptr, &buf_user->start);
  511. break;
  512. }
  513. case MEMWRITEOOB64:
  514. {
  515. struct mtd_oob_buf64 buf;
  516. struct mtd_oob_buf64 __user *buf_user = argp;
  517. if (copy_from_user(&buf, argp, sizeof(buf)))
  518. ret = -EFAULT;
  519. else
  520. ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
  521. (void __user *)(uintptr_t)buf.usr_ptr,
  522. &buf_user->length);
  523. break;
  524. }
  525. case MEMREADOOB64:
  526. {
  527. struct mtd_oob_buf64 buf;
  528. struct mtd_oob_buf64 __user *buf_user = argp;
  529. if (copy_from_user(&buf, argp, sizeof(buf)))
  530. ret = -EFAULT;
  531. else
  532. ret = mtd_do_readoob(mtd, buf.start, buf.length,
  533. (void __user *)(uintptr_t)buf.usr_ptr,
  534. &buf_user->length);
  535. break;
  536. }
  537. case MEMLOCK:
  538. {
  539. struct erase_info_user einfo;
  540. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  541. return -EFAULT;
  542. if (!mtd->lock)
  543. ret = -EOPNOTSUPP;
  544. else
  545. ret = mtd->lock(mtd, einfo.start, einfo.length);
  546. break;
  547. }
  548. case MEMUNLOCK:
  549. {
  550. struct erase_info_user einfo;
  551. if (copy_from_user(&einfo, argp, sizeof(einfo)))
  552. return -EFAULT;
  553. if (!mtd->unlock)
  554. ret = -EOPNOTSUPP;
  555. else
  556. ret = mtd->unlock(mtd, einfo.start, einfo.length);
  557. break;
  558. }
  559. /* Legacy interface */
  560. case MEMGETOOBSEL:
  561. {
  562. struct nand_oobinfo oi;
  563. if (!mtd->ecclayout)
  564. return -EOPNOTSUPP;
  565. if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
  566. return -EINVAL;
  567. oi.useecc = MTD_NANDECC_AUTOPLACE;
  568. memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
  569. memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
  570. sizeof(oi.oobfree));
  571. oi.eccbytes = mtd->ecclayout->eccbytes;
  572. if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
  573. return -EFAULT;
  574. break;
  575. }
  576. case MEMGETBADBLOCK:
  577. {
  578. loff_t offs;
  579. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  580. return -EFAULT;
  581. if (!mtd->block_isbad)
  582. ret = -EOPNOTSUPP;
  583. else
  584. return mtd->block_isbad(mtd, offs);
  585. break;
  586. }
  587. case MEMSETBADBLOCK:
  588. {
  589. loff_t offs;
  590. if (copy_from_user(&offs, argp, sizeof(loff_t)))
  591. return -EFAULT;
  592. if (!mtd->block_markbad)
  593. ret = -EOPNOTSUPP;
  594. else
  595. return mtd->block_markbad(mtd, offs);
  596. break;
  597. }
  598. #ifdef CONFIG_HAVE_MTD_OTP
  599. case OTPSELECT:
  600. {
  601. int mode;
  602. if (copy_from_user(&mode, argp, sizeof(int)))
  603. return -EFAULT;
  604. mfi->mode = MTD_MODE_NORMAL;
  605. ret = otp_select_filemode(mfi, mode);
  606. file->f_pos = 0;
  607. break;
  608. }
  609. case OTPGETREGIONCOUNT:
  610. case OTPGETREGIONINFO:
  611. {
  612. struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
  613. if (!buf)
  614. return -ENOMEM;
  615. ret = -EOPNOTSUPP;
  616. switch (mfi->mode) {
  617. case MTD_MODE_OTP_FACTORY:
  618. if (mtd->get_fact_prot_info)
  619. ret = mtd->get_fact_prot_info(mtd, buf, 4096);
  620. break;
  621. case MTD_MODE_OTP_USER:
  622. if (mtd->get_user_prot_info)
  623. ret = mtd->get_user_prot_info(mtd, buf, 4096);
  624. break;
  625. default:
  626. break;
  627. }
  628. if (ret >= 0) {
  629. if (cmd == OTPGETREGIONCOUNT) {
  630. int nbr = ret / sizeof(struct otp_info);
  631. ret = copy_to_user(argp, &nbr, sizeof(int));
  632. } else
  633. ret = copy_to_user(argp, buf, ret);
  634. if (ret)
  635. ret = -EFAULT;
  636. }
  637. kfree(buf);
  638. break;
  639. }
  640. case OTPLOCK:
  641. {
  642. struct otp_info oinfo;
  643. if (mfi->mode != MTD_MODE_OTP_USER)
  644. return -EINVAL;
  645. if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
  646. return -EFAULT;
  647. if (!mtd->lock_user_prot_reg)
  648. return -EOPNOTSUPP;
  649. ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
  650. break;
  651. }
  652. #endif
  653. case ECCGETLAYOUT:
  654. {
  655. if (!mtd->ecclayout)
  656. return -EOPNOTSUPP;
  657. if (copy_to_user(argp, mtd->ecclayout,
  658. sizeof(struct nand_ecclayout)))
  659. return -EFAULT;
  660. break;
  661. }
  662. case ECCGETSTATS:
  663. {
  664. if (copy_to_user(argp, &mtd->ecc_stats,
  665. sizeof(struct mtd_ecc_stats)))
  666. return -EFAULT;
  667. break;
  668. }
  669. case MTDFILEMODE:
  670. {
  671. mfi->mode = 0;
  672. switch(arg) {
  673. case MTD_MODE_OTP_FACTORY:
  674. case MTD_MODE_OTP_USER:
  675. ret = otp_select_filemode(mfi, arg);
  676. break;
  677. case MTD_MODE_RAW:
  678. if (!mtd->read_oob || !mtd->write_oob)
  679. return -EOPNOTSUPP;
  680. mfi->mode = arg;
  681. case MTD_MODE_NORMAL:
  682. break;
  683. default:
  684. ret = -EINVAL;
  685. }
  686. file->f_pos = 0;
  687. break;
  688. }
  689. default:
  690. ret = -ENOTTY;
  691. }
  692. return ret;
  693. } /* memory_ioctl */
  694. #ifdef CONFIG_COMPAT
  695. struct mtd_oob_buf32 {
  696. u_int32_t start;
  697. u_int32_t length;
  698. compat_caddr_t ptr; /* unsigned char* */
  699. };
  700. #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
  701. #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
  702. static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
  703. unsigned long arg)
  704. {
  705. struct inode *inode = file->f_path.dentry->d_inode;
  706. struct mtd_file_info *mfi = file->private_data;
  707. struct mtd_info *mtd = mfi->mtd;
  708. void __user *argp = compat_ptr(arg);
  709. int ret = 0;
  710. lock_kernel();
  711. switch (cmd) {
  712. case MEMWRITEOOB32:
  713. {
  714. struct mtd_oob_buf32 buf;
  715. struct mtd_oob_buf32 __user *buf_user = argp;
  716. if (copy_from_user(&buf, argp, sizeof(buf)))
  717. ret = -EFAULT;
  718. else
  719. ret = mtd_do_writeoob(file, mtd, buf.start,
  720. buf.length, compat_ptr(buf.ptr),
  721. &buf_user->length);
  722. break;
  723. }
  724. case MEMREADOOB32:
  725. {
  726. struct mtd_oob_buf32 buf;
  727. struct mtd_oob_buf32 __user *buf_user = argp;
  728. /* NOTE: writes return length to buf->start */
  729. if (copy_from_user(&buf, argp, sizeof(buf)))
  730. ret = -EFAULT;
  731. else
  732. ret = mtd_do_readoob(mtd, buf.start,
  733. buf.length, compat_ptr(buf.ptr),
  734. &buf_user->start);
  735. break;
  736. }
  737. default:
  738. ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
  739. }
  740. unlock_kernel();
  741. return ret;
  742. }
  743. #endif /* CONFIG_COMPAT */
  744. /*
  745. * try to determine where a shared mapping can be made
  746. * - only supported for NOMMU at the moment (MMU can't doesn't copy private
  747. * mappings)
  748. */
  749. #ifndef CONFIG_MMU
  750. static unsigned long mtd_get_unmapped_area(struct file *file,
  751. unsigned long addr,
  752. unsigned long len,
  753. unsigned long pgoff,
  754. unsigned long flags)
  755. {
  756. struct mtd_file_info *mfi = file->private_data;
  757. struct mtd_info *mtd = mfi->mtd;
  758. if (mtd->get_unmapped_area) {
  759. unsigned long offset;
  760. if (addr != 0)
  761. return (unsigned long) -EINVAL;
  762. if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
  763. return (unsigned long) -EINVAL;
  764. offset = pgoff << PAGE_SHIFT;
  765. if (offset > mtd->size - len)
  766. return (unsigned long) -EINVAL;
  767. return mtd->get_unmapped_area(mtd, len, offset, flags);
  768. }
  769. /* can't map directly */
  770. return (unsigned long) -ENOSYS;
  771. }
  772. #endif
  773. /*
  774. * set up a mapping for shared memory segments
  775. */
  776. static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
  777. {
  778. #ifdef CONFIG_MMU
  779. struct mtd_file_info *mfi = file->private_data;
  780. struct mtd_info *mtd = mfi->mtd;
  781. if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
  782. return 0;
  783. return -ENOSYS;
  784. #else
  785. return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
  786. #endif
  787. }
  788. static const struct file_operations mtd_fops = {
  789. .owner = THIS_MODULE,
  790. .llseek = mtd_lseek,
  791. .read = mtd_read,
  792. .write = mtd_write,
  793. .ioctl = mtd_ioctl,
  794. #ifdef CONFIG_COMPAT
  795. .compat_ioctl = mtd_compat_ioctl,
  796. #endif
  797. .open = mtd_open,
  798. .release = mtd_close,
  799. .mmap = mtd_mmap,
  800. #ifndef CONFIG_MMU
  801. .get_unmapped_area = mtd_get_unmapped_area,
  802. #endif
  803. };
  804. static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
  805. const char *dev_name, void *data,
  806. struct vfsmount *mnt)
  807. {
  808. return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
  809. mnt);
  810. }
  811. static struct file_system_type mtd_inodefs_type = {
  812. .name = "mtd_inodefs",
  813. .get_sb = mtd_inodefs_get_sb,
  814. .kill_sb = kill_anon_super,
  815. };
  816. static void mtdchar_notify_add(struct mtd_info *mtd)
  817. {
  818. }
  819. static void mtdchar_notify_remove(struct mtd_info *mtd)
  820. {
  821. struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
  822. if (mtd_ino) {
  823. /* Destroy the inode if it exists */
  824. mtd_ino->i_nlink = 0;
  825. iput(mtd_ino);
  826. }
  827. }
  828. static struct mtd_notifier mtdchar_notifier = {
  829. .add = mtdchar_notify_add,
  830. .remove = mtdchar_notify_remove,
  831. };
  832. static int __init init_mtdchar(void)
  833. {
  834. int ret;
  835. ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
  836. "mtd", &mtd_fops);
  837. if (ret < 0) {
  838. pr_notice("Can't allocate major number %d for "
  839. "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
  840. return ret;
  841. }
  842. ret = register_filesystem(&mtd_inodefs_type);
  843. if (ret) {
  844. pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
  845. goto err_unregister_chdev;
  846. }
  847. mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
  848. if (IS_ERR(mtd_inode_mnt)) {
  849. ret = PTR_ERR(mtd_inode_mnt);
  850. pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
  851. goto err_unregister_filesystem;
  852. }
  853. register_mtd_user(&mtdchar_notifier);
  854. return ret;
  855. err_unregister_filesystem:
  856. unregister_filesystem(&mtd_inodefs_type);
  857. err_unregister_chdev:
  858. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  859. return ret;
  860. }
  861. static void __exit cleanup_mtdchar(void)
  862. {
  863. unregister_mtd_user(&mtdchar_notifier);
  864. mntput(mtd_inode_mnt);
  865. unregister_filesystem(&mtd_inodefs_type);
  866. __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
  867. }
  868. module_init(init_mtdchar);
  869. module_exit(cleanup_mtdchar);
  870. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
  871. MODULE_LICENSE("GPL");
  872. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  873. MODULE_DESCRIPTION("Direct character-device access to MTD devices");
  874. MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);