cfi.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /* Common Flash Interface structures
  2. * See http://support.intel.com/design/flash/technote/index.htm
  3. */
  4. #ifndef __MTD_CFI_H__
  5. #define __MTD_CFI_H__
  6. #include <linux/delay.h>
  7. #include <linux/types.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/mtd/flashchip.h>
  10. #include <linux/mtd/map.h>
  11. #include <linux/mtd/cfi_endian.h>
  12. #include <linux/mtd/xip.h>
  13. #ifdef CONFIG_MTD_CFI_I1
  14. #define cfi_interleave(cfi) 1
  15. #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  16. #else
  17. #define cfi_interleave_is_1(cfi) (0)
  18. #endif
  19. #ifdef CONFIG_MTD_CFI_I2
  20. # ifdef cfi_interleave
  21. # undef cfi_interleave
  22. # define cfi_interleave(cfi) ((cfi)->interleave)
  23. # else
  24. # define cfi_interleave(cfi) 2
  25. # endif
  26. #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  27. #else
  28. #define cfi_interleave_is_2(cfi) (0)
  29. #endif
  30. #ifdef CONFIG_MTD_CFI_I4
  31. # ifdef cfi_interleave
  32. # undef cfi_interleave
  33. # define cfi_interleave(cfi) ((cfi)->interleave)
  34. # else
  35. # define cfi_interleave(cfi) 4
  36. # endif
  37. #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  38. #else
  39. #define cfi_interleave_is_4(cfi) (0)
  40. #endif
  41. #ifdef CONFIG_MTD_CFI_I8
  42. # ifdef cfi_interleave
  43. # undef cfi_interleave
  44. # define cfi_interleave(cfi) ((cfi)->interleave)
  45. # else
  46. # define cfi_interleave(cfi) 8
  47. # endif
  48. #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  49. #else
  50. #define cfi_interleave_is_8(cfi) (0)
  51. #endif
  52. #ifndef cfi_interleave
  53. #warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
  54. static inline int cfi_interleave(void *cfi)
  55. {
  56. BUG();
  57. return 0;
  58. }
  59. #endif
  60. static inline int cfi_interleave_supported(int i)
  61. {
  62. switch (i) {
  63. #ifdef CONFIG_MTD_CFI_I1
  64. case 1:
  65. #endif
  66. #ifdef CONFIG_MTD_CFI_I2
  67. case 2:
  68. #endif
  69. #ifdef CONFIG_MTD_CFI_I4
  70. case 4:
  71. #endif
  72. #ifdef CONFIG_MTD_CFI_I8
  73. case 8:
  74. #endif
  75. return 1;
  76. default:
  77. return 0;
  78. }
  79. }
  80. /* NB: these values must represents the number of bytes needed to meet the
  81. * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
  82. * These numbers are used in calculations.
  83. */
  84. #define CFI_DEVICETYPE_X8 (8 / 8)
  85. #define CFI_DEVICETYPE_X16 (16 / 8)
  86. #define CFI_DEVICETYPE_X32 (32 / 8)
  87. #define CFI_DEVICETYPE_X64 (64 / 8)
  88. /* Device Interface Code Assignments from the "Common Flash Memory Interface
  89. * Publication 100" dated December 1, 2001.
  90. */
  91. #define CFI_INTERFACE_X8_ASYNC 0x0000
  92. #define CFI_INTERFACE_X16_ASYNC 0x0001
  93. #define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
  94. #define CFI_INTERFACE_X32_ASYNC 0x0003
  95. #define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
  96. #define CFI_INTERFACE_NOT_ALLOWED 0xffff
  97. /* NB: We keep these structures in memory in HOST byteorder, except
  98. * where individually noted.
  99. */
  100. /* Basic Query Structure */
  101. struct cfi_ident {
  102. uint8_t qry[3];
  103. uint16_t P_ID;
  104. uint16_t P_ADR;
  105. uint16_t A_ID;
  106. uint16_t A_ADR;
  107. uint8_t VccMin;
  108. uint8_t VccMax;
  109. uint8_t VppMin;
  110. uint8_t VppMax;
  111. uint8_t WordWriteTimeoutTyp;
  112. uint8_t BufWriteTimeoutTyp;
  113. uint8_t BlockEraseTimeoutTyp;
  114. uint8_t ChipEraseTimeoutTyp;
  115. uint8_t WordWriteTimeoutMax;
  116. uint8_t BufWriteTimeoutMax;
  117. uint8_t BlockEraseTimeoutMax;
  118. uint8_t ChipEraseTimeoutMax;
  119. uint8_t DevSize;
  120. uint16_t InterfaceDesc;
  121. uint16_t MaxBufWriteSize;
  122. uint8_t NumEraseRegions;
  123. uint32_t EraseRegionInfo[0]; /* Not host ordered */
  124. } __attribute__((packed));
  125. /* Extended Query Structure for both PRI and ALT */
  126. struct cfi_extquery {
  127. uint8_t pri[3];
  128. uint8_t MajorVersion;
  129. uint8_t MinorVersion;
  130. } __attribute__((packed));
  131. /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
  132. struct cfi_pri_intelext {
  133. uint8_t pri[3];
  134. uint8_t MajorVersion;
  135. uint8_t MinorVersion;
  136. uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
  137. block follows - FIXME - not currently supported */
  138. uint8_t SuspendCmdSupport;
  139. uint16_t BlkStatusRegMask;
  140. uint8_t VccOptimal;
  141. uint8_t VppOptimal;
  142. uint8_t NumProtectionFields;
  143. uint16_t ProtRegAddr;
  144. uint8_t FactProtRegSize;
  145. uint8_t UserProtRegSize;
  146. uint8_t extra[0];
  147. } __attribute__((packed));
  148. struct cfi_intelext_otpinfo {
  149. uint32_t ProtRegAddr;
  150. uint16_t FactGroups;
  151. uint8_t FactProtRegSize;
  152. uint16_t UserGroups;
  153. uint8_t UserProtRegSize;
  154. } __attribute__((packed));
  155. struct cfi_intelext_blockinfo {
  156. uint16_t NumIdentBlocks;
  157. uint16_t BlockSize;
  158. uint16_t MinBlockEraseCycles;
  159. uint8_t BitsPerCell;
  160. uint8_t BlockCap;
  161. } __attribute__((packed));
  162. struct cfi_intelext_regioninfo {
  163. uint16_t NumIdentPartitions;
  164. uint8_t NumOpAllowed;
  165. uint8_t NumOpAllowedSimProgMode;
  166. uint8_t NumOpAllowedSimEraMode;
  167. uint8_t NumBlockTypes;
  168. struct cfi_intelext_blockinfo BlockTypes[1];
  169. } __attribute__((packed));
  170. struct cfi_intelext_programming_regioninfo {
  171. uint8_t ProgRegShift;
  172. uint8_t Reserved1;
  173. uint8_t ControlValid;
  174. uint8_t Reserved2;
  175. uint8_t ControlInvalid;
  176. uint8_t Reserved3;
  177. } __attribute__((packed));
  178. /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
  179. struct cfi_pri_amdstd {
  180. uint8_t pri[3];
  181. uint8_t MajorVersion;
  182. uint8_t MinorVersion;
  183. uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
  184. uint8_t EraseSuspend;
  185. uint8_t BlkProt;
  186. uint8_t TmpBlkUnprotect;
  187. uint8_t BlkProtUnprot;
  188. uint8_t SimultaneousOps;
  189. uint8_t BurstMode;
  190. uint8_t PageMode;
  191. uint8_t VppMin;
  192. uint8_t VppMax;
  193. uint8_t TopBottom;
  194. } __attribute__((packed));
  195. /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
  196. struct cfi_pri_atmel {
  197. uint8_t pri[3];
  198. uint8_t MajorVersion;
  199. uint8_t MinorVersion;
  200. uint8_t Features;
  201. uint8_t BottomBoot;
  202. uint8_t BurstMode;
  203. uint8_t PageMode;
  204. } __attribute__((packed));
  205. struct cfi_pri_query {
  206. uint8_t NumFields;
  207. uint32_t ProtField[1]; /* Not host ordered */
  208. } __attribute__((packed));
  209. struct cfi_bri_query {
  210. uint8_t PageModeReadCap;
  211. uint8_t NumFields;
  212. uint32_t ConfField[1]; /* Not host ordered */
  213. } __attribute__((packed));
  214. #define P_ID_NONE 0x0000
  215. #define P_ID_INTEL_EXT 0x0001
  216. #define P_ID_AMD_STD 0x0002
  217. #define P_ID_INTEL_STD 0x0003
  218. #define P_ID_AMD_EXT 0x0004
  219. #define P_ID_WINBOND 0x0006
  220. #define P_ID_ST_ADV 0x0020
  221. #define P_ID_MITSUBISHI_STD 0x0100
  222. #define P_ID_MITSUBISHI_EXT 0x0101
  223. #define P_ID_SST_PAGE 0x0102
  224. #define P_ID_SST_OLD 0x0701
  225. #define P_ID_INTEL_PERFORMANCE 0x0200
  226. #define P_ID_INTEL_DATA 0x0210
  227. #define P_ID_RESERVED 0xffff
  228. #define CFI_MODE_CFI 1
  229. #define CFI_MODE_JEDEC 0
  230. struct cfi_private {
  231. uint16_t cmdset;
  232. void *cmdset_priv;
  233. int interleave;
  234. int device_type;
  235. int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
  236. int addr_unlock1;
  237. int addr_unlock2;
  238. struct mtd_info *(*cmdset_setup)(struct map_info *);
  239. struct cfi_ident *cfiq; /* For now only one. We insist that all devs
  240. must be of the same type. */
  241. int mfr, id;
  242. int numchips;
  243. unsigned long chipshift; /* Because they're of the same type */
  244. const char *im_name; /* inter_module name for cmdset_setup */
  245. struct flchip chips[0]; /* per-chip data structure for each chip */
  246. };
  247. /*
  248. * Returns the command address according to the given geometry.
  249. */
  250. static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
  251. struct map_info *map, struct cfi_private *cfi)
  252. {
  253. unsigned bankwidth = map_bankwidth(map);
  254. unsigned interleave = cfi_interleave(cfi);
  255. unsigned type = cfi->device_type;
  256. uint32_t addr;
  257. addr = (cmd_ofs * type) * interleave;
  258. /* Modify the unlock address if we are in compatiblity mode.
  259. * For 16bit devices on 8 bit busses
  260. * and 32bit devices on 16 bit busses
  261. * set the low bit of the alternating bit sequence of the address.
  262. */
  263. if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
  264. addr |= (type >> 1)*interleave;
  265. return addr;
  266. }
  267. /*
  268. * Transforms the CFI command for the given geometry (bus width & interleave).
  269. * It looks too long to be inline, but in the common case it should almost all
  270. * get optimised away.
  271. */
  272. static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
  273. {
  274. map_word val = { {0} };
  275. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  276. unsigned long onecmd;
  277. int i;
  278. /* We do it this way to give the compiler a fighting chance
  279. of optimising away all the crap for 'bankwidth' larger than
  280. an unsigned long, in the common case where that support is
  281. disabled */
  282. if (map_bankwidth_is_large(map)) {
  283. wordwidth = sizeof(unsigned long);
  284. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  285. } else {
  286. wordwidth = map_bankwidth(map);
  287. words_per_bus = 1;
  288. }
  289. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  290. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  291. /* First, determine what the bit-pattern should be for a single
  292. device, according to chip mode and endianness... */
  293. switch (chip_mode) {
  294. default: BUG();
  295. case 1:
  296. onecmd = cmd;
  297. break;
  298. case 2:
  299. onecmd = cpu_to_cfi16(cmd);
  300. break;
  301. case 4:
  302. onecmd = cpu_to_cfi32(cmd);
  303. break;
  304. }
  305. /* Now replicate it across the size of an unsigned long, or
  306. just to the bus width as appropriate */
  307. switch (chips_per_word) {
  308. default: BUG();
  309. #if BITS_PER_LONG >= 64
  310. case 8:
  311. onecmd |= (onecmd << (chip_mode * 32));
  312. #endif
  313. case 4:
  314. onecmd |= (onecmd << (chip_mode * 16));
  315. case 2:
  316. onecmd |= (onecmd << (chip_mode * 8));
  317. case 1:
  318. ;
  319. }
  320. /* And finally, for the multi-word case, replicate it
  321. in all words in the structure */
  322. for (i=0; i < words_per_bus; i++) {
  323. val.x[i] = onecmd;
  324. }
  325. return val;
  326. }
  327. #define CMD(x) cfi_build_cmd((x), map, cfi)
  328. static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
  329. struct cfi_private *cfi)
  330. {
  331. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  332. unsigned long onestat, res = 0;
  333. int i;
  334. /* We do it this way to give the compiler a fighting chance
  335. of optimising away all the crap for 'bankwidth' larger than
  336. an unsigned long, in the common case where that support is
  337. disabled */
  338. if (map_bankwidth_is_large(map)) {
  339. wordwidth = sizeof(unsigned long);
  340. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  341. } else {
  342. wordwidth = map_bankwidth(map);
  343. words_per_bus = 1;
  344. }
  345. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  346. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  347. onestat = val.x[0];
  348. /* Or all status words together */
  349. for (i=1; i < words_per_bus; i++) {
  350. onestat |= val.x[i];
  351. }
  352. res = onestat;
  353. switch(chips_per_word) {
  354. default: BUG();
  355. #if BITS_PER_LONG >= 64
  356. case 8:
  357. res |= (onestat >> (chip_mode * 32));
  358. #endif
  359. case 4:
  360. res |= (onestat >> (chip_mode * 16));
  361. case 2:
  362. res |= (onestat >> (chip_mode * 8));
  363. case 1:
  364. ;
  365. }
  366. /* Last, determine what the bit-pattern should be for a single
  367. device, according to chip mode and endianness... */
  368. switch (chip_mode) {
  369. case 1:
  370. break;
  371. case 2:
  372. res = cfi16_to_cpu(res);
  373. break;
  374. case 4:
  375. res = cfi32_to_cpu(res);
  376. break;
  377. default: BUG();
  378. }
  379. return res;
  380. }
  381. #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
  382. /*
  383. * Sends a CFI command to a bank of flash for the given geometry.
  384. *
  385. * Returns the offset in flash where the command was written.
  386. * If prev_val is non-null, it will be set to the value at the command address,
  387. * before the command was written.
  388. */
  389. static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
  390. struct map_info *map, struct cfi_private *cfi,
  391. int type, map_word *prev_val)
  392. {
  393. map_word val;
  394. uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
  395. val = cfi_build_cmd(cmd, map, cfi);
  396. if (prev_val)
  397. *prev_val = map_read(map, addr);
  398. map_write(map, val, addr);
  399. return addr - base;
  400. }
  401. static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
  402. {
  403. map_word val = map_read(map, addr);
  404. if (map_bankwidth_is_1(map)) {
  405. return val.x[0];
  406. } else if (map_bankwidth_is_2(map)) {
  407. return cfi16_to_cpu(val.x[0]);
  408. } else {
  409. /* No point in a 64-bit byteswap since that would just be
  410. swapping the responses from different chips, and we are
  411. only interested in one chip (a representative sample) */
  412. return cfi32_to_cpu(val.x[0]);
  413. }
  414. }
  415. static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
  416. {
  417. map_word val = map_read(map, addr);
  418. if (map_bankwidth_is_1(map)) {
  419. return val.x[0] & 0xff;
  420. } else if (map_bankwidth_is_2(map)) {
  421. return cfi16_to_cpu(val.x[0]);
  422. } else {
  423. /* No point in a 64-bit byteswap since that would just be
  424. swapping the responses from different chips, and we are
  425. only interested in one chip (a representative sample) */
  426. return cfi32_to_cpu(val.x[0]);
  427. }
  428. }
  429. static inline void cfi_udelay(int us)
  430. {
  431. if (us >= 1000) {
  432. msleep((us+999)/1000);
  433. } else {
  434. udelay(us);
  435. cond_resched();
  436. }
  437. }
  438. int __xipram cfi_qry_present(struct map_info *map, __u32 base,
  439. struct cfi_private *cfi);
  440. int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
  441. struct cfi_private *cfi);
  442. void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
  443. struct cfi_private *cfi);
  444. struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
  445. const char* name);
  446. struct cfi_fixup {
  447. uint16_t mfr;
  448. uint16_t id;
  449. void (*fixup)(struct mtd_info *mtd, void* param);
  450. void* param;
  451. };
  452. #define CFI_MFR_ANY 0xFFFF
  453. #define CFI_ID_ANY 0xFFFF
  454. #define CFI_MFR_CONTINUATION 0x007F
  455. #define CFI_MFR_AMD 0x0001
  456. #define CFI_MFR_ATMEL 0x001F
  457. #define CFI_MFR_EON 0x001C
  458. #define CFI_MFR_FUJITSU 0x0004
  459. #define CFI_MFR_HYUNDAI 0x00AD
  460. #define CFI_MFR_INTEL 0x0089
  461. #define CFI_MFR_MACRONIX 0x00C2
  462. #define CFI_MFR_NEC 0x0010
  463. #define CFI_MFR_PMC 0x009D
  464. #define CFI_MFR_SAMSUNG 0x00EC
  465. #define CFI_MFR_SHARP 0x00B0
  466. #define CFI_MFR_SST 0x00BF
  467. #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
  468. #define CFI_MFR_TOSHIBA 0x0098
  469. #define CFI_MFR_WINBOND 0x00DA
  470. void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
  471. typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
  472. unsigned long adr, int len, void *thunk);
  473. int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
  474. loff_t ofs, size_t len, void *thunk);
  475. #endif /* __MTD_CFI_H__ */