cfi.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /* Common Flash Interface structures
  2. * See http://support.intel.com/design/flash/technote/index.htm
  3. * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
  4. */
  5. #ifndef __MTD_CFI_H__
  6. #define __MTD_CFI_H__
  7. #include <linux/config.h>
  8. #include <linux/delay.h>
  9. #include <linux/types.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/mtd/flashchip.h>
  12. #include <linux/mtd/map.h>
  13. #include <linux/mtd/cfi_endian.h>
  14. #ifdef CONFIG_MTD_CFI_I1
  15. #define cfi_interleave(cfi) 1
  16. #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  17. #else
  18. #define cfi_interleave_is_1(cfi) (0)
  19. #endif
  20. #ifdef CONFIG_MTD_CFI_I2
  21. # ifdef cfi_interleave
  22. # undef cfi_interleave
  23. # define cfi_interleave(cfi) ((cfi)->interleave)
  24. # else
  25. # define cfi_interleave(cfi) 2
  26. # endif
  27. #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  28. #else
  29. #define cfi_interleave_is_2(cfi) (0)
  30. #endif
  31. #ifdef CONFIG_MTD_CFI_I4
  32. # ifdef cfi_interleave
  33. # undef cfi_interleave
  34. # define cfi_interleave(cfi) ((cfi)->interleave)
  35. # else
  36. # define cfi_interleave(cfi) 4
  37. # endif
  38. #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  39. #else
  40. #define cfi_interleave_is_4(cfi) (0)
  41. #endif
  42. #ifdef CONFIG_MTD_CFI_I8
  43. # ifdef cfi_interleave
  44. # undef cfi_interleave
  45. # define cfi_interleave(cfi) ((cfi)->interleave)
  46. # else
  47. # define cfi_interleave(cfi) 8
  48. # endif
  49. #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  50. #else
  51. #define cfi_interleave_is_8(cfi) (0)
  52. #endif
  53. static inline int cfi_interleave_supported(int i)
  54. {
  55. switch (i) {
  56. #ifdef CONFIG_MTD_CFI_I1
  57. case 1:
  58. #endif
  59. #ifdef CONFIG_MTD_CFI_I2
  60. case 2:
  61. #endif
  62. #ifdef CONFIG_MTD_CFI_I4
  63. case 4:
  64. #endif
  65. #ifdef CONFIG_MTD_CFI_I8
  66. case 8:
  67. #endif
  68. return 1;
  69. default:
  70. return 0;
  71. }
  72. }
  73. /* NB: these values must represents the number of bytes needed to meet the
  74. * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
  75. * These numbers are used in calculations.
  76. */
  77. #define CFI_DEVICETYPE_X8 (8 / 8)
  78. #define CFI_DEVICETYPE_X16 (16 / 8)
  79. #define CFI_DEVICETYPE_X32 (32 / 8)
  80. #define CFI_DEVICETYPE_X64 (64 / 8)
  81. /* NB: We keep these structures in memory in HOST byteorder, except
  82. * where individually noted.
  83. */
  84. /* Basic Query Structure */
  85. struct cfi_ident {
  86. uint8_t qry[3];
  87. uint16_t P_ID;
  88. uint16_t P_ADR;
  89. uint16_t A_ID;
  90. uint16_t A_ADR;
  91. uint8_t VccMin;
  92. uint8_t VccMax;
  93. uint8_t VppMin;
  94. uint8_t VppMax;
  95. uint8_t WordWriteTimeoutTyp;
  96. uint8_t BufWriteTimeoutTyp;
  97. uint8_t BlockEraseTimeoutTyp;
  98. uint8_t ChipEraseTimeoutTyp;
  99. uint8_t WordWriteTimeoutMax;
  100. uint8_t BufWriteTimeoutMax;
  101. uint8_t BlockEraseTimeoutMax;
  102. uint8_t ChipEraseTimeoutMax;
  103. uint8_t DevSize;
  104. uint16_t InterfaceDesc;
  105. uint16_t MaxBufWriteSize;
  106. uint8_t NumEraseRegions;
  107. uint32_t EraseRegionInfo[0]; /* Not host ordered */
  108. } __attribute__((packed));
  109. /* Extended Query Structure for both PRI and ALT */
  110. struct cfi_extquery {
  111. uint8_t pri[3];
  112. uint8_t MajorVersion;
  113. uint8_t MinorVersion;
  114. } __attribute__((packed));
  115. /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
  116. struct cfi_pri_intelext {
  117. uint8_t pri[3];
  118. uint8_t MajorVersion;
  119. uint8_t MinorVersion;
  120. uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
  121. block follows - FIXME - not currently supported */
  122. uint8_t SuspendCmdSupport;
  123. uint16_t BlkStatusRegMask;
  124. uint8_t VccOptimal;
  125. uint8_t VppOptimal;
  126. uint8_t NumProtectionFields;
  127. uint16_t ProtRegAddr;
  128. uint8_t FactProtRegSize;
  129. uint8_t UserProtRegSize;
  130. uint8_t extra[0];
  131. } __attribute__((packed));
  132. struct cfi_intelext_otpinfo {
  133. uint32_t ProtRegAddr;
  134. uint16_t FactGroups;
  135. uint8_t FactProtRegSize;
  136. uint16_t UserGroups;
  137. uint8_t UserProtRegSize;
  138. } __attribute__((packed));
  139. struct cfi_intelext_blockinfo {
  140. uint16_t NumIdentBlocks;
  141. uint16_t BlockSize;
  142. uint16_t MinBlockEraseCycles;
  143. uint8_t BitsPerCell;
  144. uint8_t BlockCap;
  145. } __attribute__((packed));
  146. struct cfi_intelext_regioninfo {
  147. uint16_t NumIdentPartitions;
  148. uint8_t NumOpAllowed;
  149. uint8_t NumOpAllowedSimProgMode;
  150. uint8_t NumOpAllowedSimEraMode;
  151. uint8_t NumBlockTypes;
  152. struct cfi_intelext_blockinfo BlockTypes[1];
  153. } __attribute__((packed));
  154. struct cfi_intelext_programming_regioninfo {
  155. uint8_t ProgRegShift;
  156. uint8_t Reserved1;
  157. uint8_t ControlValid;
  158. uint8_t Reserved2;
  159. uint8_t ControlInvalid;
  160. uint8_t Reserved3;
  161. } __attribute__((packed));
  162. /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
  163. struct cfi_pri_amdstd {
  164. uint8_t pri[3];
  165. uint8_t MajorVersion;
  166. uint8_t MinorVersion;
  167. uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
  168. uint8_t EraseSuspend;
  169. uint8_t BlkProt;
  170. uint8_t TmpBlkUnprotect;
  171. uint8_t BlkProtUnprot;
  172. uint8_t SimultaneousOps;
  173. uint8_t BurstMode;
  174. uint8_t PageMode;
  175. uint8_t VppMin;
  176. uint8_t VppMax;
  177. uint8_t TopBottom;
  178. } __attribute__((packed));
  179. struct cfi_pri_query {
  180. uint8_t NumFields;
  181. uint32_t ProtField[1]; /* Not host ordered */
  182. } __attribute__((packed));
  183. struct cfi_bri_query {
  184. uint8_t PageModeReadCap;
  185. uint8_t NumFields;
  186. uint32_t ConfField[1]; /* Not host ordered */
  187. } __attribute__((packed));
  188. #define P_ID_NONE 0x0000
  189. #define P_ID_INTEL_EXT 0x0001
  190. #define P_ID_AMD_STD 0x0002
  191. #define P_ID_INTEL_STD 0x0003
  192. #define P_ID_AMD_EXT 0x0004
  193. #define P_ID_WINBOND 0x0006
  194. #define P_ID_ST_ADV 0x0020
  195. #define P_ID_MITSUBISHI_STD 0x0100
  196. #define P_ID_MITSUBISHI_EXT 0x0101
  197. #define P_ID_SST_PAGE 0x0102
  198. #define P_ID_INTEL_PERFORMANCE 0x0200
  199. #define P_ID_INTEL_DATA 0x0210
  200. #define P_ID_RESERVED 0xffff
  201. #define CFI_MODE_CFI 1
  202. #define CFI_MODE_JEDEC 0
  203. struct cfi_private {
  204. uint16_t cmdset;
  205. void *cmdset_priv;
  206. int interleave;
  207. int device_type;
  208. int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
  209. int addr_unlock1;
  210. int addr_unlock2;
  211. struct mtd_info *(*cmdset_setup)(struct map_info *);
  212. struct cfi_ident *cfiq; /* For now only one. We insist that all devs
  213. must be of the same type. */
  214. int mfr, id;
  215. int numchips;
  216. unsigned long chipshift; /* Because they're of the same type */
  217. const char *im_name; /* inter_module name for cmdset_setup */
  218. struct flchip chips[0]; /* per-chip data structure for each chip */
  219. };
  220. /*
  221. * Returns the command address according to the given geometry.
  222. */
  223. static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
  224. {
  225. return (cmd_ofs * type) * interleave;
  226. }
  227. /*
  228. * Transforms the CFI command for the given geometry (bus width & interleave).
  229. * It looks too long to be inline, but in the common case it should almost all
  230. * get optimised away.
  231. */
  232. static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
  233. {
  234. map_word val = { {0} };
  235. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  236. unsigned long onecmd;
  237. int i;
  238. /* We do it this way to give the compiler a fighting chance
  239. of optimising away all the crap for 'bankwidth' larger than
  240. an unsigned long, in the common case where that support is
  241. disabled */
  242. if (map_bankwidth_is_large(map)) {
  243. wordwidth = sizeof(unsigned long);
  244. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  245. } else {
  246. wordwidth = map_bankwidth(map);
  247. words_per_bus = 1;
  248. }
  249. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  250. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  251. /* First, determine what the bit-pattern should be for a single
  252. device, according to chip mode and endianness... */
  253. switch (chip_mode) {
  254. default: BUG();
  255. case 1:
  256. onecmd = cmd;
  257. break;
  258. case 2:
  259. onecmd = cpu_to_cfi16(cmd);
  260. break;
  261. case 4:
  262. onecmd = cpu_to_cfi32(cmd);
  263. break;
  264. }
  265. /* Now replicate it across the size of an unsigned long, or
  266. just to the bus width as appropriate */
  267. switch (chips_per_word) {
  268. default: BUG();
  269. #if BITS_PER_LONG >= 64
  270. case 8:
  271. onecmd |= (onecmd << (chip_mode * 32));
  272. #endif
  273. case 4:
  274. onecmd |= (onecmd << (chip_mode * 16));
  275. case 2:
  276. onecmd |= (onecmd << (chip_mode * 8));
  277. case 1:
  278. ;
  279. }
  280. /* And finally, for the multi-word case, replicate it
  281. in all words in the structure */
  282. for (i=0; i < words_per_bus; i++) {
  283. val.x[i] = onecmd;
  284. }
  285. return val;
  286. }
  287. #define CMD(x) cfi_build_cmd((x), map, cfi)
  288. static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
  289. struct cfi_private *cfi)
  290. {
  291. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  292. unsigned long onestat, res = 0;
  293. int i;
  294. /* We do it this way to give the compiler a fighting chance
  295. of optimising away all the crap for 'bankwidth' larger than
  296. an unsigned long, in the common case where that support is
  297. disabled */
  298. if (map_bankwidth_is_large(map)) {
  299. wordwidth = sizeof(unsigned long);
  300. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  301. } else {
  302. wordwidth = map_bankwidth(map);
  303. words_per_bus = 1;
  304. }
  305. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  306. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  307. onestat = val.x[0];
  308. /* Or all status words together */
  309. for (i=1; i < words_per_bus; i++) {
  310. onestat |= val.x[i];
  311. }
  312. res = onestat;
  313. switch(chips_per_word) {
  314. default: BUG();
  315. #if BITS_PER_LONG >= 64
  316. case 8:
  317. res |= (onestat >> (chip_mode * 32));
  318. #endif
  319. case 4:
  320. res |= (onestat >> (chip_mode * 16));
  321. case 2:
  322. res |= (onestat >> (chip_mode * 8));
  323. case 1:
  324. ;
  325. }
  326. /* Last, determine what the bit-pattern should be for a single
  327. device, according to chip mode and endianness... */
  328. switch (chip_mode) {
  329. case 1:
  330. break;
  331. case 2:
  332. res = cfi16_to_cpu(res);
  333. break;
  334. case 4:
  335. res = cfi32_to_cpu(res);
  336. break;
  337. default: BUG();
  338. }
  339. return res;
  340. }
  341. #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
  342. /*
  343. * Sends a CFI command to a bank of flash for the given geometry.
  344. *
  345. * Returns the offset in flash where the command was written.
  346. * If prev_val is non-null, it will be set to the value at the command address,
  347. * before the command was written.
  348. */
  349. static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
  350. struct map_info *map, struct cfi_private *cfi,
  351. int type, map_word *prev_val)
  352. {
  353. map_word val;
  354. uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
  355. val = cfi_build_cmd(cmd, map, cfi);
  356. if (prev_val)
  357. *prev_val = map_read(map, addr);
  358. map_write(map, val, addr);
  359. return addr - base;
  360. }
  361. static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
  362. {
  363. map_word val = map_read(map, addr);
  364. if (map_bankwidth_is_1(map)) {
  365. return val.x[0];
  366. } else if (map_bankwidth_is_2(map)) {
  367. return cfi16_to_cpu(val.x[0]);
  368. } else {
  369. /* No point in a 64-bit byteswap since that would just be
  370. swapping the responses from different chips, and we are
  371. only interested in one chip (a representative sample) */
  372. return cfi32_to_cpu(val.x[0]);
  373. }
  374. }
  375. static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
  376. {
  377. map_word val = map_read(map, addr);
  378. if (map_bankwidth_is_1(map)) {
  379. return val.x[0] & 0xff;
  380. } else if (map_bankwidth_is_2(map)) {
  381. return cfi16_to_cpu(val.x[0]);
  382. } else {
  383. /* No point in a 64-bit byteswap since that would just be
  384. swapping the responses from different chips, and we are
  385. only interested in one chip (a representative sample) */
  386. return cfi32_to_cpu(val.x[0]);
  387. }
  388. }
  389. static inline void cfi_udelay(int us)
  390. {
  391. if (us >= 1000) {
  392. msleep((us+999)/1000);
  393. } else {
  394. udelay(us);
  395. cond_resched();
  396. }
  397. }
  398. struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
  399. const char* name);
  400. struct cfi_fixup {
  401. uint16_t mfr;
  402. uint16_t id;
  403. void (*fixup)(struct mtd_info *mtd, void* param);
  404. void* param;
  405. };
  406. #define CFI_MFR_ANY 0xffff
  407. #define CFI_ID_ANY 0xffff
  408. #define CFI_MFR_AMD 0x0001
  409. #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
  410. void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
  411. typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
  412. unsigned long adr, int len, void *thunk);
  413. int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
  414. loff_t ofs, size_t len, void *thunk);
  415. #endif /* __MTD_CFI_H__ */