mtd.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. *
  18. */
  19. #ifndef __MTD_MTD_H__
  20. #define __MTD_MTD_H__
  21. #include <linux/types.h>
  22. #include <linux/uio.h>
  23. #include <linux/notifier.h>
  24. #include <linux/device.h>
  25. #include <mtd/mtd-abi.h>
  26. #include <asm/div64.h>
  27. #define MTD_CHAR_MAJOR 90
  28. #define MTD_BLOCK_MAJOR 31
  29. #define MTD_ERASE_PENDING 0x01
  30. #define MTD_ERASING 0x02
  31. #define MTD_ERASE_SUSPEND 0x04
  32. #define MTD_ERASE_DONE 0x08
  33. #define MTD_ERASE_FAILED 0x10
  34. #define MTD_FAIL_ADDR_UNKNOWN -1LL
  35. /*
  36. * If the erase fails, fail_addr might indicate exactly which block failed. If
  37. * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
  38. * or was not specific to any particular block.
  39. */
  40. struct erase_info {
  41. struct mtd_info *mtd;
  42. uint64_t addr;
  43. uint64_t len;
  44. uint64_t fail_addr;
  45. u_long time;
  46. u_long retries;
  47. unsigned dev;
  48. unsigned cell;
  49. void (*callback) (struct erase_info *self);
  50. u_long priv;
  51. u_char state;
  52. struct erase_info *next;
  53. };
  54. struct mtd_erase_region_info {
  55. uint64_t offset; /* At which this region starts, from the beginning of the MTD */
  56. uint32_t erasesize; /* For this region */
  57. uint32_t numblocks; /* Number of blocks of erasesize in this region */
  58. unsigned long *lockmap; /* If keeping bitmap of locks */
  59. };
  60. /**
  61. * struct mtd_oob_ops - oob operation operands
  62. * @mode: operation mode
  63. *
  64. * @len: number of data bytes to write/read
  65. *
  66. * @retlen: number of data bytes written/read
  67. *
  68. * @ooblen: number of oob bytes to write/read
  69. * @oobretlen: number of oob bytes written/read
  70. * @ooboffs: offset of oob data in the oob area (only relevant when
  71. * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
  72. * @datbuf: data buffer - if NULL only oob data are read/written
  73. * @oobbuf: oob data buffer
  74. *
  75. * Note, it is allowed to read more than one OOB area at one go, but not write.
  76. * The interface assumes that the OOB write requests program only one page's
  77. * OOB area.
  78. */
  79. struct mtd_oob_ops {
  80. unsigned int mode;
  81. size_t len;
  82. size_t retlen;
  83. size_t ooblen;
  84. size_t oobretlen;
  85. uint32_t ooboffs;
  86. uint8_t *datbuf;
  87. uint8_t *oobbuf;
  88. };
  89. #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
  90. #define MTD_MAX_ECCPOS_ENTRIES_LARGE 448
  91. /*
  92. * Internal ECC layout control structure. For historical reasons, there is a
  93. * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
  94. * for export to user-space via the ECCGETLAYOUT ioctl.
  95. * nand_ecclayout should be expandable in the future simply by the above macros.
  96. */
  97. struct nand_ecclayout {
  98. __u32 eccbytes;
  99. __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
  100. __u32 oobavail;
  101. struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
  102. };
  103. struct module; /* only needed for owner field in mtd_info */
  104. struct mtd_info {
  105. u_char type;
  106. uint32_t flags;
  107. uint64_t size; // Total size of the MTD
  108. /* "Major" erase size for the device. Naïve users may take this
  109. * to be the only erase size available, or may use the more detailed
  110. * information below if they desire
  111. */
  112. uint32_t erasesize;
  113. /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
  114. * though individual bits can be cleared), in case of NAND flash it is
  115. * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
  116. * it is of ECC block size, etc. It is illegal to have writesize = 0.
  117. * Any driver registering a struct mtd_info must ensure a writesize of
  118. * 1 or larger.
  119. */
  120. uint32_t writesize;
  121. /*
  122. * Size of the write buffer used by the MTD. MTD devices having a write
  123. * buffer can write multiple writesize chunks at a time. E.g. while
  124. * writing 4 * writesize bytes to a device with 2 * writesize bytes
  125. * buffer the MTD driver can (but doesn't have to) do 2 writesize
  126. * operations, but not 4. Currently, all NANDs have writebufsize
  127. * equivalent to writesize (NAND page size). Some NOR flashes do have
  128. * writebufsize greater than writesize.
  129. */
  130. uint32_t writebufsize;
  131. uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
  132. uint32_t oobavail; // Available OOB bytes per block
  133. /*
  134. * If erasesize is a power of 2 then the shift is stored in
  135. * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
  136. */
  137. unsigned int erasesize_shift;
  138. unsigned int writesize_shift;
  139. /* Masks based on erasesize_shift and writesize_shift */
  140. unsigned int erasesize_mask;
  141. unsigned int writesize_mask;
  142. // Kernel-only stuff starts here.
  143. const char *name;
  144. int index;
  145. /* ECC layout structure pointer - read only! */
  146. struct nand_ecclayout *ecclayout;
  147. /* Data for variable erase regions. If numeraseregions is zero,
  148. * it means that the whole device has erasesize as given above.
  149. */
  150. int numeraseregions;
  151. struct mtd_erase_region_info *eraseregions;
  152. /*
  153. * Do not call via these pointers, use corresponding mtd_*()
  154. * wrappers instead.
  155. */
  156. int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
  157. int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
  158. size_t *retlen, void **virt, resource_size_t *phys);
  159. void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
  160. unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
  161. unsigned long len,
  162. unsigned long offset,
  163. unsigned long flags);
  164. int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
  165. size_t *retlen, u_char *buf);
  166. int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
  167. size_t *retlen, const u_char *buf);
  168. int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
  169. size_t *retlen, const u_char *buf);
  170. int (*read_oob) (struct mtd_info *mtd, loff_t from,
  171. struct mtd_oob_ops *ops);
  172. int (*write_oob) (struct mtd_info *mtd, loff_t to,
  173. struct mtd_oob_ops *ops);
  174. int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
  175. size_t len);
  176. int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
  177. size_t len, size_t *retlen, u_char *buf);
  178. int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
  179. size_t len);
  180. int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
  181. size_t len, size_t *retlen, u_char *buf);
  182. int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
  183. size_t *retlen, u_char *buf);
  184. int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
  185. size_t len);
  186. int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
  187. unsigned long count, loff_t to, size_t *retlen);
  188. void (*sync) (struct mtd_info *mtd);
  189. int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  190. int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  191. int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
  192. int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
  193. int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
  194. int (*suspend) (struct mtd_info *mtd);
  195. void (*resume) (struct mtd_info *mtd);
  196. /*
  197. * If the driver is something smart, like UBI, it may need to maintain
  198. * its own reference counting. The below functions are only for driver.
  199. */
  200. int (*get_device) (struct mtd_info *mtd);
  201. void (*put_device) (struct mtd_info *mtd);
  202. /* Backing device capabilities for this device
  203. * - provides mmap capabilities
  204. */
  205. struct backing_dev_info *backing_dev_info;
  206. struct notifier_block reboot_notifier; /* default mode before reboot */
  207. /* ECC status information */
  208. struct mtd_ecc_stats ecc_stats;
  209. /* Subpage shift (NAND) */
  210. int subpage_sft;
  211. void *priv;
  212. struct module *owner;
  213. struct device dev;
  214. int usecount;
  215. };
  216. /*
  217. * Erase is an asynchronous operation. Device drivers are supposed
  218. * to call instr->callback() whenever the operation completes, even
  219. * if it completes with a failure.
  220. * Callers are supposed to pass a callback function and wait for it
  221. * to be called before writing to the block.
  222. */
  223. static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  224. {
  225. return mtd->erase(mtd, instr);
  226. }
  227. /*
  228. * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
  229. */
  230. static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
  231. size_t *retlen, void **virt, resource_size_t *phys)
  232. {
  233. *retlen = 0;
  234. if (!mtd->point)
  235. return -EOPNOTSUPP;
  236. return mtd->point(mtd, from, len, retlen, virt, phys);
  237. }
  238. /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
  239. static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
  240. {
  241. return mtd->unpoint(mtd, from, len);
  242. }
  243. /*
  244. * Allow NOMMU mmap() to directly map the device (if not NULL)
  245. * - return the address to which the offset maps
  246. * - return -ENOSYS to indicate refusal to do the mapping
  247. */
  248. static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
  249. unsigned long len,
  250. unsigned long offset,
  251. unsigned long flags)
  252. {
  253. if (!mtd->get_unmapped_area)
  254. return -EOPNOTSUPP;
  255. return mtd->get_unmapped_area(mtd, len, offset, flags);
  256. }
  257. static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
  258. size_t *retlen, u_char *buf)
  259. {
  260. return mtd->read(mtd, from, len, retlen, buf);
  261. }
  262. static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
  263. size_t *retlen, const u_char *buf)
  264. {
  265. *retlen = 0;
  266. if (!mtd->write)
  267. return -EROFS;
  268. return mtd->write(mtd, to, len, retlen, buf);
  269. }
  270. /*
  271. * In blackbox flight recorder like scenarios we want to make successful writes
  272. * in interrupt context. panic_write() is only intended to be called when its
  273. * known the kernel is about to panic and we need the write to succeed. Since
  274. * the kernel is not going to be running for much longer, this function can
  275. * break locks and delay to ensure the write succeeds (but not sleep).
  276. */
  277. static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
  278. size_t *retlen, const u_char *buf)
  279. {
  280. *retlen = 0;
  281. if (!mtd->panic_write)
  282. return -EOPNOTSUPP;
  283. return mtd->panic_write(mtd, to, len, retlen, buf);
  284. }
  285. static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
  286. struct mtd_oob_ops *ops)
  287. {
  288. ops->retlen = ops->oobretlen = 0;
  289. if (!mtd->read_oob)
  290. return -EOPNOTSUPP;
  291. return mtd->read_oob(mtd, from, ops);
  292. }
  293. static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
  294. struct mtd_oob_ops *ops)
  295. {
  296. ops->retlen = ops->oobretlen = 0;
  297. if (!mtd->write_oob)
  298. return -EOPNOTSUPP;
  299. return mtd->write_oob(mtd, to, ops);
  300. }
  301. /*
  302. * Method to access the protection register area, present in some flash
  303. * devices. The user data is one time programmable but the factory data is read
  304. * only.
  305. */
  306. static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
  307. struct otp_info *buf, size_t len)
  308. {
  309. if (!mtd->get_fact_prot_info)
  310. return -EOPNOTSUPP;
  311. return mtd->get_fact_prot_info(mtd, buf, len);
  312. }
  313. static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
  314. size_t len, size_t *retlen,
  315. u_char *buf)
  316. {
  317. *retlen = 0;
  318. if (!mtd->read_fact_prot_reg)
  319. return -EOPNOTSUPP;
  320. return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
  321. }
  322. static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
  323. struct otp_info *buf,
  324. size_t len)
  325. {
  326. if (!mtd->get_user_prot_info)
  327. return -EOPNOTSUPP;
  328. return mtd->get_user_prot_info(mtd, buf, len);
  329. }
  330. static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
  331. size_t len, size_t *retlen,
  332. u_char *buf)
  333. {
  334. *retlen = 0;
  335. if (!mtd->read_user_prot_reg)
  336. return -EOPNOTSUPP;
  337. return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
  338. }
  339. static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
  340. size_t len, size_t *retlen,
  341. u_char *buf)
  342. {
  343. *retlen = 0;
  344. if (!mtd->write_user_prot_reg)
  345. return -EOPNOTSUPP;
  346. return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
  347. }
  348. static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
  349. size_t len)
  350. {
  351. if (!mtd->lock_user_prot_reg)
  352. return -EOPNOTSUPP;
  353. return mtd->lock_user_prot_reg(mtd, from, len);
  354. }
  355. /*
  356. * kvec-based read/write method. NB: The 'count' parameter is the number of
  357. * _vectors_, each of which contains an (ofs, len) tuple.
  358. */
  359. static inline int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
  360. unsigned long count, loff_t to, size_t *retlen)
  361. {
  362. *retlen = 0;
  363. return mtd->writev(mtd, vecs, count, to, retlen);
  364. }
  365. static inline void mtd_sync(struct mtd_info *mtd)
  366. {
  367. mtd->sync(mtd);
  368. }
  369. /* Chip-supported device locking */
  370. static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  371. {
  372. return mtd->lock(mtd, ofs, len);
  373. }
  374. static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  375. {
  376. return mtd->unlock(mtd, ofs, len);
  377. }
  378. static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  379. {
  380. return mtd->is_locked(mtd, ofs, len);
  381. }
  382. static inline int mtd_suspend(struct mtd_info *mtd)
  383. {
  384. return mtd->suspend(mtd);
  385. }
  386. static inline void mtd_resume(struct mtd_info *mtd)
  387. {
  388. mtd->resume(mtd);
  389. }
  390. static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
  391. {
  392. return mtd->block_isbad(mtd, ofs);
  393. }
  394. static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
  395. {
  396. return mtd->block_markbad(mtd, ofs);
  397. }
  398. static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
  399. {
  400. if (mtd->erasesize_shift)
  401. return sz >> mtd->erasesize_shift;
  402. do_div(sz, mtd->erasesize);
  403. return sz;
  404. }
  405. static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
  406. {
  407. if (mtd->erasesize_shift)
  408. return sz & mtd->erasesize_mask;
  409. return do_div(sz, mtd->erasesize);
  410. }
  411. static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
  412. {
  413. if (mtd->writesize_shift)
  414. return sz >> mtd->writesize_shift;
  415. do_div(sz, mtd->writesize);
  416. return sz;
  417. }
  418. static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
  419. {
  420. if (mtd->writesize_shift)
  421. return sz & mtd->writesize_mask;
  422. return do_div(sz, mtd->writesize);
  423. }
  424. static inline int mtd_has_oob(const struct mtd_info *mtd)
  425. {
  426. return mtd->read_oob && mtd->write_oob;
  427. }
  428. /* Kernel-side ioctl definitions */
  429. struct mtd_partition;
  430. struct mtd_part_parser_data;
  431. extern int mtd_device_parse_register(struct mtd_info *mtd,
  432. const char **part_probe_types,
  433. struct mtd_part_parser_data *parser_data,
  434. const struct mtd_partition *defparts,
  435. int defnr_parts);
  436. #define mtd_device_register(master, parts, nr_parts) \
  437. mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
  438. extern int mtd_device_unregister(struct mtd_info *master);
  439. extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
  440. extern int __get_mtd_device(struct mtd_info *mtd);
  441. extern void __put_mtd_device(struct mtd_info *mtd);
  442. extern struct mtd_info *get_mtd_device_nm(const char *name);
  443. extern void put_mtd_device(struct mtd_info *mtd);
  444. struct mtd_notifier {
  445. void (*add)(struct mtd_info *mtd);
  446. void (*remove)(struct mtd_info *mtd);
  447. struct list_head list;
  448. };
  449. extern void register_mtd_user (struct mtd_notifier *new);
  450. extern int unregister_mtd_user (struct mtd_notifier *old);
  451. int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
  452. unsigned long count, loff_t to, size_t *retlen);
  453. void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
  454. void mtd_erase_callback(struct erase_info *instr);
  455. static inline int mtd_is_bitflip(int err) {
  456. return err == -EUCLEAN;
  457. }
  458. static inline int mtd_is_eccerr(int err) {
  459. return err == -EBADMSG;
  460. }
  461. static inline int mtd_is_bitflip_or_eccerr(int err) {
  462. return mtd_is_bitflip(err) || mtd_is_eccerr(err);
  463. }
  464. #endif /* __MTD_MTD_H__ */