cfi.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /* Common Flash Interface structures
  2. * See http://support.intel.com/design/flash/technote/index.htm
  3. * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
  4. */
  5. #ifndef __MTD_CFI_H__
  6. #define __MTD_CFI_H__
  7. #include <linux/delay.h>
  8. #include <linux/types.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/mtd/flashchip.h>
  11. #include <linux/mtd/map.h>
  12. #include <linux/mtd/cfi_endian.h>
  13. #ifdef CONFIG_MTD_CFI_I1
  14. #define cfi_interleave(cfi) 1
  15. #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  16. #else
  17. #define cfi_interleave_is_1(cfi) (0)
  18. #endif
  19. #ifdef CONFIG_MTD_CFI_I2
  20. # ifdef cfi_interleave
  21. # undef cfi_interleave
  22. # define cfi_interleave(cfi) ((cfi)->interleave)
  23. # else
  24. # define cfi_interleave(cfi) 2
  25. # endif
  26. #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  27. #else
  28. #define cfi_interleave_is_2(cfi) (0)
  29. #endif
  30. #ifdef CONFIG_MTD_CFI_I4
  31. # ifdef cfi_interleave
  32. # undef cfi_interleave
  33. # define cfi_interleave(cfi) ((cfi)->interleave)
  34. # else
  35. # define cfi_interleave(cfi) 4
  36. # endif
  37. #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  38. #else
  39. #define cfi_interleave_is_4(cfi) (0)
  40. #endif
  41. #ifdef CONFIG_MTD_CFI_I8
  42. # ifdef cfi_interleave
  43. # undef cfi_interleave
  44. # define cfi_interleave(cfi) ((cfi)->interleave)
  45. # else
  46. # define cfi_interleave(cfi) 8
  47. # endif
  48. #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  49. #else
  50. #define cfi_interleave_is_8(cfi) (0)
  51. #endif
  52. static inline int cfi_interleave_supported(int i)
  53. {
  54. switch (i) {
  55. #ifdef CONFIG_MTD_CFI_I1
  56. case 1:
  57. #endif
  58. #ifdef CONFIG_MTD_CFI_I2
  59. case 2:
  60. #endif
  61. #ifdef CONFIG_MTD_CFI_I4
  62. case 4:
  63. #endif
  64. #ifdef CONFIG_MTD_CFI_I8
  65. case 8:
  66. #endif
  67. return 1;
  68. default:
  69. return 0;
  70. }
  71. }
  72. /* NB: these values must represents the number of bytes needed to meet the
  73. * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
  74. * These numbers are used in calculations.
  75. */
  76. #define CFI_DEVICETYPE_X8 (8 / 8)
  77. #define CFI_DEVICETYPE_X16 (16 / 8)
  78. #define CFI_DEVICETYPE_X32 (32 / 8)
  79. #define CFI_DEVICETYPE_X64 (64 / 8)
  80. /* NB: We keep these structures in memory in HOST byteorder, except
  81. * where individually noted.
  82. */
  83. /* Basic Query Structure */
  84. struct cfi_ident {
  85. uint8_t qry[3];
  86. uint16_t P_ID;
  87. uint16_t P_ADR;
  88. uint16_t A_ID;
  89. uint16_t A_ADR;
  90. uint8_t VccMin;
  91. uint8_t VccMax;
  92. uint8_t VppMin;
  93. uint8_t VppMax;
  94. uint8_t WordWriteTimeoutTyp;
  95. uint8_t BufWriteTimeoutTyp;
  96. uint8_t BlockEraseTimeoutTyp;
  97. uint8_t ChipEraseTimeoutTyp;
  98. uint8_t WordWriteTimeoutMax;
  99. uint8_t BufWriteTimeoutMax;
  100. uint8_t BlockEraseTimeoutMax;
  101. uint8_t ChipEraseTimeoutMax;
  102. uint8_t DevSize;
  103. uint16_t InterfaceDesc;
  104. uint16_t MaxBufWriteSize;
  105. uint8_t NumEraseRegions;
  106. uint32_t EraseRegionInfo[0]; /* Not host ordered */
  107. } __attribute__((packed));
  108. /* Extended Query Structure for both PRI and ALT */
  109. struct cfi_extquery {
  110. uint8_t pri[3];
  111. uint8_t MajorVersion;
  112. uint8_t MinorVersion;
  113. } __attribute__((packed));
  114. /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
  115. struct cfi_pri_intelext {
  116. uint8_t pri[3];
  117. uint8_t MajorVersion;
  118. uint8_t MinorVersion;
  119. uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
  120. block follows - FIXME - not currently supported */
  121. uint8_t SuspendCmdSupport;
  122. uint16_t BlkStatusRegMask;
  123. uint8_t VccOptimal;
  124. uint8_t VppOptimal;
  125. uint8_t NumProtectionFields;
  126. uint16_t ProtRegAddr;
  127. uint8_t FactProtRegSize;
  128. uint8_t UserProtRegSize;
  129. uint8_t extra[0];
  130. } __attribute__((packed));
  131. struct cfi_intelext_otpinfo {
  132. uint32_t ProtRegAddr;
  133. uint16_t FactGroups;
  134. uint8_t FactProtRegSize;
  135. uint16_t UserGroups;
  136. uint8_t UserProtRegSize;
  137. } __attribute__((packed));
  138. struct cfi_intelext_blockinfo {
  139. uint16_t NumIdentBlocks;
  140. uint16_t BlockSize;
  141. uint16_t MinBlockEraseCycles;
  142. uint8_t BitsPerCell;
  143. uint8_t BlockCap;
  144. } __attribute__((packed));
  145. struct cfi_intelext_regioninfo {
  146. uint16_t NumIdentPartitions;
  147. uint8_t NumOpAllowed;
  148. uint8_t NumOpAllowedSimProgMode;
  149. uint8_t NumOpAllowedSimEraMode;
  150. uint8_t NumBlockTypes;
  151. struct cfi_intelext_blockinfo BlockTypes[1];
  152. } __attribute__((packed));
  153. struct cfi_intelext_programming_regioninfo {
  154. uint8_t ProgRegShift;
  155. uint8_t Reserved1;
  156. uint8_t ControlValid;
  157. uint8_t Reserved2;
  158. uint8_t ControlInvalid;
  159. uint8_t Reserved3;
  160. } __attribute__((packed));
  161. /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
  162. struct cfi_pri_amdstd {
  163. uint8_t pri[3];
  164. uint8_t MajorVersion;
  165. uint8_t MinorVersion;
  166. uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
  167. uint8_t EraseSuspend;
  168. uint8_t BlkProt;
  169. uint8_t TmpBlkUnprotect;
  170. uint8_t BlkProtUnprot;
  171. uint8_t SimultaneousOps;
  172. uint8_t BurstMode;
  173. uint8_t PageMode;
  174. uint8_t VppMin;
  175. uint8_t VppMax;
  176. uint8_t TopBottom;
  177. } __attribute__((packed));
  178. struct cfi_pri_query {
  179. uint8_t NumFields;
  180. uint32_t ProtField[1]; /* Not host ordered */
  181. } __attribute__((packed));
  182. struct cfi_bri_query {
  183. uint8_t PageModeReadCap;
  184. uint8_t NumFields;
  185. uint32_t ConfField[1]; /* Not host ordered */
  186. } __attribute__((packed));
  187. #define P_ID_NONE 0x0000
  188. #define P_ID_INTEL_EXT 0x0001
  189. #define P_ID_AMD_STD 0x0002
  190. #define P_ID_INTEL_STD 0x0003
  191. #define P_ID_AMD_EXT 0x0004
  192. #define P_ID_WINBOND 0x0006
  193. #define P_ID_ST_ADV 0x0020
  194. #define P_ID_MITSUBISHI_STD 0x0100
  195. #define P_ID_MITSUBISHI_EXT 0x0101
  196. #define P_ID_SST_PAGE 0x0102
  197. #define P_ID_INTEL_PERFORMANCE 0x0200
  198. #define P_ID_INTEL_DATA 0x0210
  199. #define P_ID_RESERVED 0xffff
  200. #define CFI_MODE_CFI 1
  201. #define CFI_MODE_JEDEC 0
  202. struct cfi_private {
  203. uint16_t cmdset;
  204. void *cmdset_priv;
  205. int interleave;
  206. int device_type;
  207. int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
  208. int addr_unlock1;
  209. int addr_unlock2;
  210. struct mtd_info *(*cmdset_setup)(struct map_info *);
  211. struct cfi_ident *cfiq; /* For now only one. We insist that all devs
  212. must be of the same type. */
  213. int mfr, id;
  214. int numchips;
  215. unsigned long chipshift; /* Because they're of the same type */
  216. const char *im_name; /* inter_module name for cmdset_setup */
  217. struct flchip chips[0]; /* per-chip data structure for each chip */
  218. };
  219. /*
  220. * Returns the command address according to the given geometry.
  221. */
  222. static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
  223. {
  224. return (cmd_ofs * type) * interleave;
  225. }
  226. /*
  227. * Transforms the CFI command for the given geometry (bus width & interleave).
  228. * It looks too long to be inline, but in the common case it should almost all
  229. * get optimised away.
  230. */
  231. static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
  232. {
  233. map_word val = { {0} };
  234. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  235. unsigned long onecmd;
  236. int i;
  237. /* We do it this way to give the compiler a fighting chance
  238. of optimising away all the crap for 'bankwidth' larger than
  239. an unsigned long, in the common case where that support is
  240. disabled */
  241. if (map_bankwidth_is_large(map)) {
  242. wordwidth = sizeof(unsigned long);
  243. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  244. } else {
  245. wordwidth = map_bankwidth(map);
  246. words_per_bus = 1;
  247. }
  248. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  249. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  250. /* First, determine what the bit-pattern should be for a single
  251. device, according to chip mode and endianness... */
  252. switch (chip_mode) {
  253. default: BUG();
  254. case 1:
  255. onecmd = cmd;
  256. break;
  257. case 2:
  258. onecmd = cpu_to_cfi16(cmd);
  259. break;
  260. case 4:
  261. onecmd = cpu_to_cfi32(cmd);
  262. break;
  263. }
  264. /* Now replicate it across the size of an unsigned long, or
  265. just to the bus width as appropriate */
  266. switch (chips_per_word) {
  267. default: BUG();
  268. #if BITS_PER_LONG >= 64
  269. case 8:
  270. onecmd |= (onecmd << (chip_mode * 32));
  271. #endif
  272. case 4:
  273. onecmd |= (onecmd << (chip_mode * 16));
  274. case 2:
  275. onecmd |= (onecmd << (chip_mode * 8));
  276. case 1:
  277. ;
  278. }
  279. /* And finally, for the multi-word case, replicate it
  280. in all words in the structure */
  281. for (i=0; i < words_per_bus; i++) {
  282. val.x[i] = onecmd;
  283. }
  284. return val;
  285. }
  286. #define CMD(x) cfi_build_cmd((x), map, cfi)
  287. static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
  288. struct cfi_private *cfi)
  289. {
  290. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  291. unsigned long onestat, res = 0;
  292. int i;
  293. /* We do it this way to give the compiler a fighting chance
  294. of optimising away all the crap for 'bankwidth' larger than
  295. an unsigned long, in the common case where that support is
  296. disabled */
  297. if (map_bankwidth_is_large(map)) {
  298. wordwidth = sizeof(unsigned long);
  299. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  300. } else {
  301. wordwidth = map_bankwidth(map);
  302. words_per_bus = 1;
  303. }
  304. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  305. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  306. onestat = val.x[0];
  307. /* Or all status words together */
  308. for (i=1; i < words_per_bus; i++) {
  309. onestat |= val.x[i];
  310. }
  311. res = onestat;
  312. switch(chips_per_word) {
  313. default: BUG();
  314. #if BITS_PER_LONG >= 64
  315. case 8:
  316. res |= (onestat >> (chip_mode * 32));
  317. #endif
  318. case 4:
  319. res |= (onestat >> (chip_mode * 16));
  320. case 2:
  321. res |= (onestat >> (chip_mode * 8));
  322. case 1:
  323. ;
  324. }
  325. /* Last, determine what the bit-pattern should be for a single
  326. device, according to chip mode and endianness... */
  327. switch (chip_mode) {
  328. case 1:
  329. break;
  330. case 2:
  331. res = cfi16_to_cpu(res);
  332. break;
  333. case 4:
  334. res = cfi32_to_cpu(res);
  335. break;
  336. default: BUG();
  337. }
  338. return res;
  339. }
  340. #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
  341. /*
  342. * Sends a CFI command to a bank of flash for the given geometry.
  343. *
  344. * Returns the offset in flash where the command was written.
  345. * If prev_val is non-null, it will be set to the value at the command address,
  346. * before the command was written.
  347. */
  348. static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
  349. struct map_info *map, struct cfi_private *cfi,
  350. int type, map_word *prev_val)
  351. {
  352. map_word val;
  353. uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
  354. val = cfi_build_cmd(cmd, map, cfi);
  355. if (prev_val)
  356. *prev_val = map_read(map, addr);
  357. map_write(map, val, addr);
  358. return addr - base;
  359. }
  360. static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
  361. {
  362. map_word val = map_read(map, addr);
  363. if (map_bankwidth_is_1(map)) {
  364. return val.x[0];
  365. } else if (map_bankwidth_is_2(map)) {
  366. return cfi16_to_cpu(val.x[0]);
  367. } else {
  368. /* No point in a 64-bit byteswap since that would just be
  369. swapping the responses from different chips, and we are
  370. only interested in one chip (a representative sample) */
  371. return cfi32_to_cpu(val.x[0]);
  372. }
  373. }
  374. static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
  375. {
  376. map_word val = map_read(map, addr);
  377. if (map_bankwidth_is_1(map)) {
  378. return val.x[0] & 0xff;
  379. } else if (map_bankwidth_is_2(map)) {
  380. return cfi16_to_cpu(val.x[0]);
  381. } else {
  382. /* No point in a 64-bit byteswap since that would just be
  383. swapping the responses from different chips, and we are
  384. only interested in one chip (a representative sample) */
  385. return cfi32_to_cpu(val.x[0]);
  386. }
  387. }
  388. static inline void cfi_udelay(int us)
  389. {
  390. if (us >= 1000) {
  391. msleep((us+999)/1000);
  392. } else {
  393. udelay(us);
  394. cond_resched();
  395. }
  396. }
  397. struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
  398. const char* name);
  399. struct cfi_fixup {
  400. uint16_t mfr;
  401. uint16_t id;
  402. void (*fixup)(struct mtd_info *mtd, void* param);
  403. void* param;
  404. };
  405. #define CFI_MFR_ANY 0xffff
  406. #define CFI_ID_ANY 0xffff
  407. #define CFI_MFR_AMD 0x0001
  408. #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
  409. void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
  410. typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
  411. unsigned long adr, int len, void *thunk);
  412. int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
  413. loff_t ofs, size_t len, void *thunk);
  414. #endif /* __MTD_CFI_H__ */