cfi.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /* Common Flash Interface structures
  2. * See http://support.intel.com/design/flash/technote/index.htm
  3. * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $
  4. */
  5. #ifndef __MTD_CFI_H__
  6. #define __MTD_CFI_H__
  7. #include <linux/delay.h>
  8. #include <linux/types.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/mtd/flashchip.h>
  11. #include <linux/mtd/map.h>
  12. #include <linux/mtd/cfi_endian.h>
  13. #ifdef CONFIG_MTD_CFI_I1
  14. #define cfi_interleave(cfi) 1
  15. #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  16. #else
  17. #define cfi_interleave_is_1(cfi) (0)
  18. #endif
  19. #ifdef CONFIG_MTD_CFI_I2
  20. # ifdef cfi_interleave
  21. # undef cfi_interleave
  22. # define cfi_interleave(cfi) ((cfi)->interleave)
  23. # else
  24. # define cfi_interleave(cfi) 2
  25. # endif
  26. #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  27. #else
  28. #define cfi_interleave_is_2(cfi) (0)
  29. #endif
  30. #ifdef CONFIG_MTD_CFI_I4
  31. # ifdef cfi_interleave
  32. # undef cfi_interleave
  33. # define cfi_interleave(cfi) ((cfi)->interleave)
  34. # else
  35. # define cfi_interleave(cfi) 4
  36. # endif
  37. #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  38. #else
  39. #define cfi_interleave_is_4(cfi) (0)
  40. #endif
  41. #ifdef CONFIG_MTD_CFI_I8
  42. # ifdef cfi_interleave
  43. # undef cfi_interleave
  44. # define cfi_interleave(cfi) ((cfi)->interleave)
  45. # else
  46. # define cfi_interleave(cfi) 8
  47. # endif
  48. #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  49. #else
  50. #define cfi_interleave_is_8(cfi) (0)
  51. #endif
  52. static inline int cfi_interleave_supported(int i)
  53. {
  54. switch (i) {
  55. #ifdef CONFIG_MTD_CFI_I1
  56. case 1:
  57. #endif
  58. #ifdef CONFIG_MTD_CFI_I2
  59. case 2:
  60. #endif
  61. #ifdef CONFIG_MTD_CFI_I4
  62. case 4:
  63. #endif
  64. #ifdef CONFIG_MTD_CFI_I8
  65. case 8:
  66. #endif
  67. return 1;
  68. default:
  69. return 0;
  70. }
  71. }
  72. /* NB: these values must represents the number of bytes needed to meet the
  73. * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
  74. * These numbers are used in calculations.
  75. */
  76. #define CFI_DEVICETYPE_X8 (8 / 8)
  77. #define CFI_DEVICETYPE_X16 (16 / 8)
  78. #define CFI_DEVICETYPE_X32 (32 / 8)
  79. #define CFI_DEVICETYPE_X64 (64 / 8)
  80. /* NB: We keep these structures in memory in HOST byteorder, except
  81. * where individually noted.
  82. */
  83. /* Basic Query Structure */
  84. struct cfi_ident {
  85. uint8_t qry[3];
  86. uint16_t P_ID;
  87. uint16_t P_ADR;
  88. uint16_t A_ID;
  89. uint16_t A_ADR;
  90. uint8_t VccMin;
  91. uint8_t VccMax;
  92. uint8_t VppMin;
  93. uint8_t VppMax;
  94. uint8_t WordWriteTimeoutTyp;
  95. uint8_t BufWriteTimeoutTyp;
  96. uint8_t BlockEraseTimeoutTyp;
  97. uint8_t ChipEraseTimeoutTyp;
  98. uint8_t WordWriteTimeoutMax;
  99. uint8_t BufWriteTimeoutMax;
  100. uint8_t BlockEraseTimeoutMax;
  101. uint8_t ChipEraseTimeoutMax;
  102. uint8_t DevSize;
  103. uint16_t InterfaceDesc;
  104. uint16_t MaxBufWriteSize;
  105. uint8_t NumEraseRegions;
  106. uint32_t EraseRegionInfo[0]; /* Not host ordered */
  107. } __attribute__((packed));
  108. /* Extended Query Structure for both PRI and ALT */
  109. struct cfi_extquery {
  110. uint8_t pri[3];
  111. uint8_t MajorVersion;
  112. uint8_t MinorVersion;
  113. } __attribute__((packed));
  114. /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
  115. struct cfi_pri_intelext {
  116. uint8_t pri[3];
  117. uint8_t MajorVersion;
  118. uint8_t MinorVersion;
  119. uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
  120. block follows - FIXME - not currently supported */
  121. uint8_t SuspendCmdSupport;
  122. uint16_t BlkStatusRegMask;
  123. uint8_t VccOptimal;
  124. uint8_t VppOptimal;
  125. uint8_t NumProtectionFields;
  126. uint16_t ProtRegAddr;
  127. uint8_t FactProtRegSize;
  128. uint8_t UserProtRegSize;
  129. uint8_t extra[0];
  130. } __attribute__((packed));
  131. struct cfi_intelext_otpinfo {
  132. uint32_t ProtRegAddr;
  133. uint16_t FactGroups;
  134. uint8_t FactProtRegSize;
  135. uint16_t UserGroups;
  136. uint8_t UserProtRegSize;
  137. } __attribute__((packed));
  138. struct cfi_intelext_blockinfo {
  139. uint16_t NumIdentBlocks;
  140. uint16_t BlockSize;
  141. uint16_t MinBlockEraseCycles;
  142. uint8_t BitsPerCell;
  143. uint8_t BlockCap;
  144. } __attribute__((packed));
  145. struct cfi_intelext_regioninfo {
  146. uint16_t NumIdentPartitions;
  147. uint8_t NumOpAllowed;
  148. uint8_t NumOpAllowedSimProgMode;
  149. uint8_t NumOpAllowedSimEraMode;
  150. uint8_t NumBlockTypes;
  151. struct cfi_intelext_blockinfo BlockTypes[1];
  152. } __attribute__((packed));
  153. struct cfi_intelext_programming_regioninfo {
  154. uint8_t ProgRegShift;
  155. uint8_t Reserved1;
  156. uint8_t ControlValid;
  157. uint8_t Reserved2;
  158. uint8_t ControlInvalid;
  159. uint8_t Reserved3;
  160. } __attribute__((packed));
  161. /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
  162. struct cfi_pri_amdstd {
  163. uint8_t pri[3];
  164. uint8_t MajorVersion;
  165. uint8_t MinorVersion;
  166. uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
  167. uint8_t EraseSuspend;
  168. uint8_t BlkProt;
  169. uint8_t TmpBlkUnprotect;
  170. uint8_t BlkProtUnprot;
  171. uint8_t SimultaneousOps;
  172. uint8_t BurstMode;
  173. uint8_t PageMode;
  174. uint8_t VppMin;
  175. uint8_t VppMax;
  176. uint8_t TopBottom;
  177. } __attribute__((packed));
  178. /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
  179. struct cfi_pri_atmel {
  180. uint8_t pri[3];
  181. uint8_t MajorVersion;
  182. uint8_t MinorVersion;
  183. uint8_t Features;
  184. uint8_t BottomBoot;
  185. uint8_t BurstMode;
  186. uint8_t PageMode;
  187. } __attribute__((packed));
  188. struct cfi_pri_query {
  189. uint8_t NumFields;
  190. uint32_t ProtField[1]; /* Not host ordered */
  191. } __attribute__((packed));
  192. struct cfi_bri_query {
  193. uint8_t PageModeReadCap;
  194. uint8_t NumFields;
  195. uint32_t ConfField[1]; /* Not host ordered */
  196. } __attribute__((packed));
  197. #define P_ID_NONE 0x0000
  198. #define P_ID_INTEL_EXT 0x0001
  199. #define P_ID_AMD_STD 0x0002
  200. #define P_ID_INTEL_STD 0x0003
  201. #define P_ID_AMD_EXT 0x0004
  202. #define P_ID_WINBOND 0x0006
  203. #define P_ID_ST_ADV 0x0020
  204. #define P_ID_MITSUBISHI_STD 0x0100
  205. #define P_ID_MITSUBISHI_EXT 0x0101
  206. #define P_ID_SST_PAGE 0x0102
  207. #define P_ID_INTEL_PERFORMANCE 0x0200
  208. #define P_ID_INTEL_DATA 0x0210
  209. #define P_ID_RESERVED 0xffff
  210. #define CFI_MODE_CFI 1
  211. #define CFI_MODE_JEDEC 0
  212. struct cfi_private {
  213. uint16_t cmdset;
  214. void *cmdset_priv;
  215. int interleave;
  216. int device_type;
  217. int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
  218. int addr_unlock1;
  219. int addr_unlock2;
  220. struct mtd_info *(*cmdset_setup)(struct map_info *);
  221. struct cfi_ident *cfiq; /* For now only one. We insist that all devs
  222. must be of the same type. */
  223. int mfr, id;
  224. int numchips;
  225. unsigned long chipshift; /* Because they're of the same type */
  226. const char *im_name; /* inter_module name for cmdset_setup */
  227. struct flchip chips[0]; /* per-chip data structure for each chip */
  228. };
  229. /*
  230. * Returns the command address according to the given geometry.
  231. */
  232. static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
  233. {
  234. return (cmd_ofs * type) * interleave;
  235. }
  236. /*
  237. * Transforms the CFI command for the given geometry (bus width & interleave).
  238. * It looks too long to be inline, but in the common case it should almost all
  239. * get optimised away.
  240. */
  241. static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
  242. {
  243. map_word val = { {0} };
  244. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  245. unsigned long onecmd;
  246. int i;
  247. /* We do it this way to give the compiler a fighting chance
  248. of optimising away all the crap for 'bankwidth' larger than
  249. an unsigned long, in the common case where that support is
  250. disabled */
  251. if (map_bankwidth_is_large(map)) {
  252. wordwidth = sizeof(unsigned long);
  253. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  254. } else {
  255. wordwidth = map_bankwidth(map);
  256. words_per_bus = 1;
  257. }
  258. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  259. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  260. /* First, determine what the bit-pattern should be for a single
  261. device, according to chip mode and endianness... */
  262. switch (chip_mode) {
  263. default: BUG();
  264. case 1:
  265. onecmd = cmd;
  266. break;
  267. case 2:
  268. onecmd = cpu_to_cfi16(cmd);
  269. break;
  270. case 4:
  271. onecmd = cpu_to_cfi32(cmd);
  272. break;
  273. }
  274. /* Now replicate it across the size of an unsigned long, or
  275. just to the bus width as appropriate */
  276. switch (chips_per_word) {
  277. default: BUG();
  278. #if BITS_PER_LONG >= 64
  279. case 8:
  280. onecmd |= (onecmd << (chip_mode * 32));
  281. #endif
  282. case 4:
  283. onecmd |= (onecmd << (chip_mode * 16));
  284. case 2:
  285. onecmd |= (onecmd << (chip_mode * 8));
  286. case 1:
  287. ;
  288. }
  289. /* And finally, for the multi-word case, replicate it
  290. in all words in the structure */
  291. for (i=0; i < words_per_bus; i++) {
  292. val.x[i] = onecmd;
  293. }
  294. return val;
  295. }
  296. #define CMD(x) cfi_build_cmd((x), map, cfi)
  297. static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
  298. struct cfi_private *cfi)
  299. {
  300. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  301. unsigned long onestat, res = 0;
  302. int i;
  303. /* We do it this way to give the compiler a fighting chance
  304. of optimising away all the crap for 'bankwidth' larger than
  305. an unsigned long, in the common case where that support is
  306. disabled */
  307. if (map_bankwidth_is_large(map)) {
  308. wordwidth = sizeof(unsigned long);
  309. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  310. } else {
  311. wordwidth = map_bankwidth(map);
  312. words_per_bus = 1;
  313. }
  314. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  315. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  316. onestat = val.x[0];
  317. /* Or all status words together */
  318. for (i=1; i < words_per_bus; i++) {
  319. onestat |= val.x[i];
  320. }
  321. res = onestat;
  322. switch(chips_per_word) {
  323. default: BUG();
  324. #if BITS_PER_LONG >= 64
  325. case 8:
  326. res |= (onestat >> (chip_mode * 32));
  327. #endif
  328. case 4:
  329. res |= (onestat >> (chip_mode * 16));
  330. case 2:
  331. res |= (onestat >> (chip_mode * 8));
  332. case 1:
  333. ;
  334. }
  335. /* Last, determine what the bit-pattern should be for a single
  336. device, according to chip mode and endianness... */
  337. switch (chip_mode) {
  338. case 1:
  339. break;
  340. case 2:
  341. res = cfi16_to_cpu(res);
  342. break;
  343. case 4:
  344. res = cfi32_to_cpu(res);
  345. break;
  346. default: BUG();
  347. }
  348. return res;
  349. }
  350. #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
  351. /*
  352. * Sends a CFI command to a bank of flash for the given geometry.
  353. *
  354. * Returns the offset in flash where the command was written.
  355. * If prev_val is non-null, it will be set to the value at the command address,
  356. * before the command was written.
  357. */
  358. static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
  359. struct map_info *map, struct cfi_private *cfi,
  360. int type, map_word *prev_val)
  361. {
  362. map_word val;
  363. uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
  364. val = cfi_build_cmd(cmd, map, cfi);
  365. if (prev_val)
  366. *prev_val = map_read(map, addr);
  367. map_write(map, val, addr);
  368. return addr - base;
  369. }
  370. static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
  371. {
  372. map_word val = map_read(map, addr);
  373. if (map_bankwidth_is_1(map)) {
  374. return val.x[0];
  375. } else if (map_bankwidth_is_2(map)) {
  376. return cfi16_to_cpu(val.x[0]);
  377. } else {
  378. /* No point in a 64-bit byteswap since that would just be
  379. swapping the responses from different chips, and we are
  380. only interested in one chip (a representative sample) */
  381. return cfi32_to_cpu(val.x[0]);
  382. }
  383. }
  384. static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
  385. {
  386. map_word val = map_read(map, addr);
  387. if (map_bankwidth_is_1(map)) {
  388. return val.x[0] & 0xff;
  389. } else if (map_bankwidth_is_2(map)) {
  390. return cfi16_to_cpu(val.x[0]);
  391. } else {
  392. /* No point in a 64-bit byteswap since that would just be
  393. swapping the responses from different chips, and we are
  394. only interested in one chip (a representative sample) */
  395. return cfi32_to_cpu(val.x[0]);
  396. }
  397. }
  398. static inline void cfi_udelay(int us)
  399. {
  400. if (us >= 1000) {
  401. msleep((us+999)/1000);
  402. } else {
  403. udelay(us);
  404. cond_resched();
  405. }
  406. }
  407. struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
  408. const char* name);
  409. struct cfi_fixup {
  410. uint16_t mfr;
  411. uint16_t id;
  412. void (*fixup)(struct mtd_info *mtd, void* param);
  413. void* param;
  414. };
  415. #define CFI_MFR_ANY 0xffff
  416. #define CFI_ID_ANY 0xffff
  417. #define CFI_MFR_AMD 0x0001
  418. #define CFI_MFR_ATMEL 0x001F
  419. #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
  420. void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
  421. typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
  422. unsigned long adr, int len, void *thunk);
  423. int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
  424. loff_t ofs, size_t len, void *thunk);
  425. #endif /* __MTD_CFI_H__ */