mtd.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. /*
  2. * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. *
  18. */
  19. #ifndef __MTD_MTD_H__
  20. #define __MTD_MTD_H__
  21. #include <linux/types.h>
  22. #include <linux/uio.h>
  23. #include <linux/notifier.h>
  24. #include <linux/device.h>
  25. #include <mtd/mtd-abi.h>
  26. #include <asm/div64.h>
  27. #define MTD_CHAR_MAJOR 90
  28. #define MTD_BLOCK_MAJOR 31
  29. #define MTD_ERASE_PENDING 0x01
  30. #define MTD_ERASING 0x02
  31. #define MTD_ERASE_SUSPEND 0x04
  32. #define MTD_ERASE_DONE 0x08
  33. #define MTD_ERASE_FAILED 0x10
  34. #define MTD_FAIL_ADDR_UNKNOWN -1LL
  35. /*
  36. * If the erase fails, fail_addr might indicate exactly which block failed. If
  37. * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
  38. * or was not specific to any particular block.
  39. */
  40. struct erase_info {
  41. struct mtd_info *mtd;
  42. uint64_t addr;
  43. uint64_t len;
  44. uint64_t fail_addr;
  45. u_long time;
  46. u_long retries;
  47. unsigned dev;
  48. unsigned cell;
  49. void (*callback) (struct erase_info *self);
  50. u_long priv;
  51. u_char state;
  52. struct erase_info *next;
  53. };
  54. struct mtd_erase_region_info {
  55. uint64_t offset; /* At which this region starts, from the beginning of the MTD */
  56. uint32_t erasesize; /* For this region */
  57. uint32_t numblocks; /* Number of blocks of erasesize in this region */
  58. unsigned long *lockmap; /* If keeping bitmap of locks */
  59. };
  60. /**
  61. * struct mtd_oob_ops - oob operation operands
  62. * @mode: operation mode
  63. *
  64. * @len: number of data bytes to write/read
  65. *
  66. * @retlen: number of data bytes written/read
  67. *
  68. * @ooblen: number of oob bytes to write/read
  69. * @oobretlen: number of oob bytes written/read
  70. * @ooboffs: offset of oob data in the oob area (only relevant when
  71. * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
  72. * @datbuf: data buffer - if NULL only oob data are read/written
  73. * @oobbuf: oob data buffer
  74. *
  75. * Note, it is allowed to read more than one OOB area at one go, but not write.
  76. * The interface assumes that the OOB write requests program only one page's
  77. * OOB area.
  78. */
  79. struct mtd_oob_ops {
  80. unsigned int mode;
  81. size_t len;
  82. size_t retlen;
  83. size_t ooblen;
  84. size_t oobretlen;
  85. uint32_t ooboffs;
  86. uint8_t *datbuf;
  87. uint8_t *oobbuf;
  88. };
  89. #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
  90. #define MTD_MAX_ECCPOS_ENTRIES_LARGE 448
  91. /*
  92. * Internal ECC layout control structure. For historical reasons, there is a
  93. * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
  94. * for export to user-space via the ECCGETLAYOUT ioctl.
  95. * nand_ecclayout should be expandable in the future simply by the above macros.
  96. */
  97. struct nand_ecclayout {
  98. __u32 eccbytes;
  99. __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
  100. __u32 oobavail;
  101. struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
  102. };
  103. struct module; /* only needed for owner field in mtd_info */
  104. struct mtd_info {
  105. u_char type;
  106. uint32_t flags;
  107. uint64_t size; // Total size of the MTD
  108. /* "Major" erase size for the device. Naïve users may take this
  109. * to be the only erase size available, or may use the more detailed
  110. * information below if they desire
  111. */
  112. uint32_t erasesize;
  113. /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
  114. * though individual bits can be cleared), in case of NAND flash it is
  115. * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
  116. * it is of ECC block size, etc. It is illegal to have writesize = 0.
  117. * Any driver registering a struct mtd_info must ensure a writesize of
  118. * 1 or larger.
  119. */
  120. uint32_t writesize;
  121. /*
  122. * Size of the write buffer used by the MTD. MTD devices having a write
  123. * buffer can write multiple writesize chunks at a time. E.g. while
  124. * writing 4 * writesize bytes to a device with 2 * writesize bytes
  125. * buffer the MTD driver can (but doesn't have to) do 2 writesize
  126. * operations, but not 4. Currently, all NANDs have writebufsize
  127. * equivalent to writesize (NAND page size). Some NOR flashes do have
  128. * writebufsize greater than writesize.
  129. */
  130. uint32_t writebufsize;
  131. uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
  132. uint32_t oobavail; // Available OOB bytes per block
  133. /*
  134. * If erasesize is a power of 2 then the shift is stored in
  135. * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
  136. */
  137. unsigned int erasesize_shift;
  138. unsigned int writesize_shift;
  139. /* Masks based on erasesize_shift and writesize_shift */
  140. unsigned int erasesize_mask;
  141. unsigned int writesize_mask;
  142. // Kernel-only stuff starts here.
  143. const char *name;
  144. int index;
  145. /* ECC layout structure pointer - read only! */
  146. struct nand_ecclayout *ecclayout;
  147. /* Data for variable erase regions. If numeraseregions is zero,
  148. * it means that the whole device has erasesize as given above.
  149. */
  150. int numeraseregions;
  151. struct mtd_erase_region_info *eraseregions;
  152. /*
  153. * Do not call via these pointers, use corresponding mtd_*()
  154. * wrappers instead.
  155. */
  156. int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
  157. int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
  158. size_t *retlen, void **virt, resource_size_t *phys);
  159. int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
  160. unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
  161. unsigned long len,
  162. unsigned long offset,
  163. unsigned long flags);
  164. int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
  165. size_t *retlen, u_char *buf);
  166. int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
  167. size_t *retlen, const u_char *buf);
  168. int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
  169. size_t *retlen, const u_char *buf);
  170. int (*_read_oob) (struct mtd_info *mtd, loff_t from,
  171. struct mtd_oob_ops *ops);
  172. int (*_write_oob) (struct mtd_info *mtd, loff_t to,
  173. struct mtd_oob_ops *ops);
  174. int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
  175. size_t len);
  176. int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
  177. size_t len, size_t *retlen, u_char *buf);
  178. int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
  179. size_t len);
  180. int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
  181. size_t len, size_t *retlen, u_char *buf);
  182. int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
  183. size_t len, size_t *retlen, u_char *buf);
  184. int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
  185. size_t len);
  186. int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
  187. unsigned long count, loff_t to, size_t *retlen);
  188. void (*_sync) (struct mtd_info *mtd);
  189. int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  190. int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  191. int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  192. int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
  193. int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
  194. int (*_suspend) (struct mtd_info *mtd);
  195. void (*_resume) (struct mtd_info *mtd);
  196. /*
  197. * If the driver is something smart, like UBI, it may need to maintain
  198. * its own reference counting. The below functions are only for driver.
  199. */
  200. int (*_get_device) (struct mtd_info *mtd);
  201. void (*_put_device) (struct mtd_info *mtd);
  202. /* Backing device capabilities for this device
  203. * - provides mmap capabilities
  204. */
  205. struct backing_dev_info *backing_dev_info;
  206. struct notifier_block reboot_notifier; /* default mode before reboot */
  207. /* ECC status information */
  208. struct mtd_ecc_stats ecc_stats;
  209. /* Subpage shift (NAND) */
  210. int subpage_sft;
  211. void *priv;
  212. struct module *owner;
  213. struct device dev;
  214. int usecount;
  215. };
  216. /*
  217. * Erase is an asynchronous operation. Device drivers are supposed
  218. * to call instr->callback() whenever the operation completes, even
  219. * if it completes with a failure.
  220. * Callers are supposed to pass a callback function and wait for it
  221. * to be called before writing to the block.
  222. */
  223. static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  224. {
  225. return mtd->_erase(mtd, instr);
  226. }
  227. /*
  228. * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
  229. */
  230. static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
  231. size_t *retlen, void **virt, resource_size_t *phys)
  232. {
  233. *retlen = 0;
  234. if (!mtd->_point)
  235. return -EOPNOTSUPP;
  236. return mtd->_point(mtd, from, len, retlen, virt, phys);
  237. }
  238. /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
  239. static inline int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
  240. {
  241. if (!mtd->_point)
  242. return -EOPNOTSUPP;
  243. return mtd->_unpoint(mtd, from, len);
  244. }
  245. /*
  246. * Allow NOMMU mmap() to directly map the device (if not NULL)
  247. * - return the address to which the offset maps
  248. * - return -ENOSYS to indicate refusal to do the mapping
  249. */
  250. static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
  251. unsigned long len,
  252. unsigned long offset,
  253. unsigned long flags)
  254. {
  255. if (!mtd->_get_unmapped_area)
  256. return -EOPNOTSUPP;
  257. return mtd->_get_unmapped_area(mtd, len, offset, flags);
  258. }
  259. static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
  260. size_t *retlen, u_char *buf)
  261. {
  262. return mtd->_read(mtd, from, len, retlen, buf);
  263. }
  264. static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
  265. size_t *retlen, const u_char *buf)
  266. {
  267. *retlen = 0;
  268. if (!mtd->_write)
  269. return -EROFS;
  270. return mtd->_write(mtd, to, len, retlen, buf);
  271. }
  272. /*
  273. * In blackbox flight recorder like scenarios we want to make successful writes
  274. * in interrupt context. panic_write() is only intended to be called when its
  275. * known the kernel is about to panic and we need the write to succeed. Since
  276. * the kernel is not going to be running for much longer, this function can
  277. * break locks and delay to ensure the write succeeds (but not sleep).
  278. */
  279. static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  280. size_t *retlen, const u_char *buf)
  281. {
  282. *retlen = 0;
  283. if (!mtd->_panic_write)
  284. return -EOPNOTSUPP;
  285. return mtd->_panic_write(mtd, to, len, retlen, buf);
  286. }
  287. static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
  288. struct mtd_oob_ops *ops)
  289. {
  290. ops->retlen = ops->oobretlen = 0;
  291. if (!mtd->_read_oob)
  292. return -EOPNOTSUPP;
  293. return mtd->_read_oob(mtd, from, ops);
  294. }
  295. static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
  296. struct mtd_oob_ops *ops)
  297. {
  298. ops->retlen = ops->oobretlen = 0;
  299. if (!mtd->_write_oob)
  300. return -EOPNOTSUPP;
  301. return mtd->_write_oob(mtd, to, ops);
  302. }
  303. /*
  304. * Method to access the protection register area, present in some flash
  305. * devices. The user data is one time programmable but the factory data is read
  306. * only.
  307. */
  308. static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
  309. struct otp_info *buf, size_t len)
  310. {
  311. if (!mtd->_get_fact_prot_info)
  312. return -EOPNOTSUPP;
  313. return mtd->_get_fact_prot_info(mtd, buf, len);
  314. }
  315. static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
  316. size_t len, size_t *retlen,
  317. u_char *buf)
  318. {
  319. *retlen = 0;
  320. if (!mtd->_read_fact_prot_reg)
  321. return -EOPNOTSUPP;
  322. return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
  323. }
  324. static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
  325. struct otp_info *buf,
  326. size_t len)
  327. {
  328. if (!mtd->_get_user_prot_info)
  329. return -EOPNOTSUPP;
  330. return mtd->_get_user_prot_info(mtd, buf, len);
  331. }
  332. static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
  333. size_t len, size_t *retlen,
  334. u_char *buf)
  335. {
  336. *retlen = 0;
  337. if (!mtd->_read_user_prot_reg)
  338. return -EOPNOTSUPP;
  339. return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
  340. }
  341. static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
  342. size_t len, size_t *retlen,
  343. u_char *buf)
  344. {
  345. *retlen = 0;
  346. if (!mtd->_write_user_prot_reg)
  347. return -EOPNOTSUPP;
  348. return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
  349. }
  350. static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
  351. size_t len)
  352. {
  353. if (!mtd->_lock_user_prot_reg)
  354. return -EOPNOTSUPP;
  355. return mtd->_lock_user_prot_reg(mtd, from, len);
  356. }
  357. int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
  358. unsigned long count, loff_t to, size_t *retlen);
  359. static inline void mtd_sync(struct mtd_info *mtd)
  360. {
  361. if (mtd->_sync)
  362. mtd->_sync(mtd);
  363. }
  364. /* Chip-supported device locking */
  365. static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  366. {
  367. if (!mtd->_lock)
  368. return -EOPNOTSUPP;
  369. return mtd->_lock(mtd, ofs, len);
  370. }
  371. static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  372. {
  373. if (!mtd->_unlock)
  374. return -EOPNOTSUPP;
  375. return mtd->_unlock(mtd, ofs, len);
  376. }
  377. static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  378. {
  379. if (!mtd->_is_locked)
  380. return -EOPNOTSUPP;
  381. return mtd->_is_locked(mtd, ofs, len);
  382. }
  383. static inline int mtd_suspend(struct mtd_info *mtd)
  384. {
  385. return mtd->_suspend ? mtd->_suspend(mtd) : 0;
  386. }
  387. static inline void mtd_resume(struct mtd_info *mtd)
  388. {
  389. if (mtd->_resume)
  390. mtd->_resume(mtd);
  391. }
  392. static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
  393. {
  394. if (!mtd->_block_isbad)
  395. return 0;
  396. return mtd->_block_isbad(mtd, ofs);
  397. }
  398. static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
  399. {
  400. if (!mtd->_block_markbad)
  401. return -EOPNOTSUPP;
  402. return mtd->_block_markbad(mtd, ofs);
  403. }
  404. static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
  405. {
  406. if (mtd->erasesize_shift)
  407. return sz >> mtd->erasesize_shift;
  408. do_div(sz, mtd->erasesize);
  409. return sz;
  410. }
  411. static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
  412. {
  413. if (mtd->erasesize_shift)
  414. return sz & mtd->erasesize_mask;
  415. return do_div(sz, mtd->erasesize);
  416. }
  417. static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
  418. {
  419. if (mtd->writesize_shift)
  420. return sz >> mtd->writesize_shift;
  421. do_div(sz, mtd->writesize);
  422. return sz;
  423. }
  424. static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
  425. {
  426. if (mtd->writesize_shift)
  427. return sz & mtd->writesize_mask;
  428. return do_div(sz, mtd->writesize);
  429. }
  430. static inline int mtd_has_oob(const struct mtd_info *mtd)
  431. {
  432. return mtd->_read_oob && mtd->_write_oob;
  433. }
  434. static inline int mtd_can_have_bb(const struct mtd_info *mtd)
  435. {
  436. return !!mtd->_block_isbad;
  437. }
  438. /* Kernel-side ioctl definitions */
  439. struct mtd_partition;
  440. struct mtd_part_parser_data;
  441. extern int mtd_device_parse_register(struct mtd_info *mtd,
  442. const char **part_probe_types,
  443. struct mtd_part_parser_data *parser_data,
  444. const struct mtd_partition *defparts,
  445. int defnr_parts);
  446. #define mtd_device_register(master, parts, nr_parts) \
  447. mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
  448. extern int mtd_device_unregister(struct mtd_info *master);
  449. extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
  450. extern int __get_mtd_device(struct mtd_info *mtd);
  451. extern void __put_mtd_device(struct mtd_info *mtd);
  452. extern struct mtd_info *get_mtd_device_nm(const char *name);
  453. extern void put_mtd_device(struct mtd_info *mtd);
  454. struct mtd_notifier {
  455. void (*add)(struct mtd_info *mtd);
  456. void (*remove)(struct mtd_info *mtd);
  457. struct list_head list;
  458. };
  459. extern void register_mtd_user (struct mtd_notifier *new);
  460. extern int unregister_mtd_user (struct mtd_notifier *old);
  461. void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
  462. void mtd_erase_callback(struct erase_info *instr);
  463. static inline int mtd_is_bitflip(int err) {
  464. return err == -EUCLEAN;
  465. }
  466. static inline int mtd_is_eccerr(int err) {
  467. return err == -EBADMSG;
  468. }
  469. static inline int mtd_is_bitflip_or_eccerr(int err) {
  470. return mtd_is_bitflip(err) || mtd_is_eccerr(err);
  471. }
  472. #endif /* __MTD_MTD_H__ */