cfi.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. /* Common Flash Interface structures
  2. * See http://support.intel.com/design/flash/technote/index.htm
  3. */
  4. #ifndef __MTD_CFI_H__
  5. #define __MTD_CFI_H__
  6. #include <linux/delay.h>
  7. #include <linux/types.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/mtd/flashchip.h>
  10. #include <linux/mtd/map.h>
  11. #include <linux/mtd/cfi_endian.h>
  12. #include <linux/mtd/xip.h>
  13. #ifdef CONFIG_MTD_CFI_I1
  14. #define cfi_interleave(cfi) 1
  15. #define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
  16. #else
  17. #define cfi_interleave_is_1(cfi) (0)
  18. #endif
  19. #ifdef CONFIG_MTD_CFI_I2
  20. # ifdef cfi_interleave
  21. # undef cfi_interleave
  22. # define cfi_interleave(cfi) ((cfi)->interleave)
  23. # else
  24. # define cfi_interleave(cfi) 2
  25. # endif
  26. #define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
  27. #else
  28. #define cfi_interleave_is_2(cfi) (0)
  29. #endif
  30. #ifdef CONFIG_MTD_CFI_I4
  31. # ifdef cfi_interleave
  32. # undef cfi_interleave
  33. # define cfi_interleave(cfi) ((cfi)->interleave)
  34. # else
  35. # define cfi_interleave(cfi) 4
  36. # endif
  37. #define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
  38. #else
  39. #define cfi_interleave_is_4(cfi) (0)
  40. #endif
  41. #ifdef CONFIG_MTD_CFI_I8
  42. # ifdef cfi_interleave
  43. # undef cfi_interleave
  44. # define cfi_interleave(cfi) ((cfi)->interleave)
  45. # else
  46. # define cfi_interleave(cfi) 8
  47. # endif
  48. #define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
  49. #else
  50. #define cfi_interleave_is_8(cfi) (0)
  51. #endif
  52. #ifndef cfi_interleave
  53. #warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
  54. static inline int cfi_interleave(void *cfi)
  55. {
  56. BUG();
  57. return 0;
  58. }
  59. #endif
  60. static inline int cfi_interleave_supported(int i)
  61. {
  62. switch (i) {
  63. #ifdef CONFIG_MTD_CFI_I1
  64. case 1:
  65. #endif
  66. #ifdef CONFIG_MTD_CFI_I2
  67. case 2:
  68. #endif
  69. #ifdef CONFIG_MTD_CFI_I4
  70. case 4:
  71. #endif
  72. #ifdef CONFIG_MTD_CFI_I8
  73. case 8:
  74. #endif
  75. return 1;
  76. default:
  77. return 0;
  78. }
  79. }
  80. /* NB: these values must represents the number of bytes needed to meet the
  81. * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
  82. * These numbers are used in calculations.
  83. */
  84. #define CFI_DEVICETYPE_X8 (8 / 8)
  85. #define CFI_DEVICETYPE_X16 (16 / 8)
  86. #define CFI_DEVICETYPE_X32 (32 / 8)
  87. #define CFI_DEVICETYPE_X64 (64 / 8)
  88. /* Device Interface Code Assignments from the "Common Flash Memory Interface
  89. * Publication 100" dated December 1, 2001.
  90. */
  91. #define CFI_INTERFACE_X8_ASYNC 0x0000
  92. #define CFI_INTERFACE_X16_ASYNC 0x0001
  93. #define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
  94. #define CFI_INTERFACE_X32_ASYNC 0x0003
  95. #define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
  96. #define CFI_INTERFACE_NOT_ALLOWED 0xffff
  97. /* NB: We keep these structures in memory in HOST byteorder, except
  98. * where individually noted.
  99. */
  100. /* Basic Query Structure */
  101. struct cfi_ident {
  102. uint8_t qry[3];
  103. uint16_t P_ID;
  104. uint16_t P_ADR;
  105. uint16_t A_ID;
  106. uint16_t A_ADR;
  107. uint8_t VccMin;
  108. uint8_t VccMax;
  109. uint8_t VppMin;
  110. uint8_t VppMax;
  111. uint8_t WordWriteTimeoutTyp;
  112. uint8_t BufWriteTimeoutTyp;
  113. uint8_t BlockEraseTimeoutTyp;
  114. uint8_t ChipEraseTimeoutTyp;
  115. uint8_t WordWriteTimeoutMax;
  116. uint8_t BufWriteTimeoutMax;
  117. uint8_t BlockEraseTimeoutMax;
  118. uint8_t ChipEraseTimeoutMax;
  119. uint8_t DevSize;
  120. uint16_t InterfaceDesc;
  121. uint16_t MaxBufWriteSize;
  122. uint8_t NumEraseRegions;
  123. uint32_t EraseRegionInfo[0]; /* Not host ordered */
  124. } __attribute__((packed));
  125. /* Extended Query Structure for both PRI and ALT */
  126. struct cfi_extquery {
  127. uint8_t pri[3];
  128. uint8_t MajorVersion;
  129. uint8_t MinorVersion;
  130. } __attribute__((packed));
  131. /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
  132. struct cfi_pri_intelext {
  133. uint8_t pri[3];
  134. uint8_t MajorVersion;
  135. uint8_t MinorVersion;
  136. uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
  137. block follows - FIXME - not currently supported */
  138. uint8_t SuspendCmdSupport;
  139. uint16_t BlkStatusRegMask;
  140. uint8_t VccOptimal;
  141. uint8_t VppOptimal;
  142. uint8_t NumProtectionFields;
  143. uint16_t ProtRegAddr;
  144. uint8_t FactProtRegSize;
  145. uint8_t UserProtRegSize;
  146. uint8_t extra[0];
  147. } __attribute__((packed));
  148. struct cfi_intelext_otpinfo {
  149. uint32_t ProtRegAddr;
  150. uint16_t FactGroups;
  151. uint8_t FactProtRegSize;
  152. uint16_t UserGroups;
  153. uint8_t UserProtRegSize;
  154. } __attribute__((packed));
  155. struct cfi_intelext_blockinfo {
  156. uint16_t NumIdentBlocks;
  157. uint16_t BlockSize;
  158. uint16_t MinBlockEraseCycles;
  159. uint8_t BitsPerCell;
  160. uint8_t BlockCap;
  161. } __attribute__((packed));
  162. struct cfi_intelext_regioninfo {
  163. uint16_t NumIdentPartitions;
  164. uint8_t NumOpAllowed;
  165. uint8_t NumOpAllowedSimProgMode;
  166. uint8_t NumOpAllowedSimEraMode;
  167. uint8_t NumBlockTypes;
  168. struct cfi_intelext_blockinfo BlockTypes[1];
  169. } __attribute__((packed));
  170. struct cfi_intelext_programming_regioninfo {
  171. uint8_t ProgRegShift;
  172. uint8_t Reserved1;
  173. uint8_t ControlValid;
  174. uint8_t Reserved2;
  175. uint8_t ControlInvalid;
  176. uint8_t Reserved3;
  177. } __attribute__((packed));
  178. /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
  179. struct cfi_pri_amdstd {
  180. uint8_t pri[3];
  181. uint8_t MajorVersion;
  182. uint8_t MinorVersion;
  183. uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
  184. uint8_t EraseSuspend;
  185. uint8_t BlkProt;
  186. uint8_t TmpBlkUnprotect;
  187. uint8_t BlkProtUnprot;
  188. uint8_t SimultaneousOps;
  189. uint8_t BurstMode;
  190. uint8_t PageMode;
  191. uint8_t VppMin;
  192. uint8_t VppMax;
  193. uint8_t TopBottom;
  194. } __attribute__((packed));
  195. /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
  196. struct cfi_pri_atmel {
  197. uint8_t pri[3];
  198. uint8_t MajorVersion;
  199. uint8_t MinorVersion;
  200. uint8_t Features;
  201. uint8_t BottomBoot;
  202. uint8_t BurstMode;
  203. uint8_t PageMode;
  204. } __attribute__((packed));
  205. struct cfi_pri_query {
  206. uint8_t NumFields;
  207. uint32_t ProtField[1]; /* Not host ordered */
  208. } __attribute__((packed));
  209. struct cfi_bri_query {
  210. uint8_t PageModeReadCap;
  211. uint8_t NumFields;
  212. uint32_t ConfField[1]; /* Not host ordered */
  213. } __attribute__((packed));
  214. #define P_ID_NONE 0x0000
  215. #define P_ID_INTEL_EXT 0x0001
  216. #define P_ID_AMD_STD 0x0002
  217. #define P_ID_INTEL_STD 0x0003
  218. #define P_ID_AMD_EXT 0x0004
  219. #define P_ID_WINBOND 0x0006
  220. #define P_ID_ST_ADV 0x0020
  221. #define P_ID_MITSUBISHI_STD 0x0100
  222. #define P_ID_MITSUBISHI_EXT 0x0101
  223. #define P_ID_SST_PAGE 0x0102
  224. #define P_ID_INTEL_PERFORMANCE 0x0200
  225. #define P_ID_INTEL_DATA 0x0210
  226. #define P_ID_RESERVED 0xffff
  227. #define CFI_MODE_CFI 1
  228. #define CFI_MODE_JEDEC 0
  229. struct cfi_private {
  230. uint16_t cmdset;
  231. void *cmdset_priv;
  232. int interleave;
  233. int device_type;
  234. int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
  235. int addr_unlock1;
  236. int addr_unlock2;
  237. struct mtd_info *(*cmdset_setup)(struct map_info *);
  238. struct cfi_ident *cfiq; /* For now only one. We insist that all devs
  239. must be of the same type. */
  240. int mfr, id;
  241. int numchips;
  242. unsigned long chipshift; /* Because they're of the same type */
  243. const char *im_name; /* inter_module name for cmdset_setup */
  244. struct flchip chips[0]; /* per-chip data structure for each chip */
  245. };
  246. /*
  247. * Returns the command address according to the given geometry.
  248. */
  249. static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
  250. struct map_info *map, struct cfi_private *cfi)
  251. {
  252. unsigned bankwidth = map_bankwidth(map);
  253. unsigned interleave = cfi_interleave(cfi);
  254. unsigned type = cfi->device_type;
  255. uint32_t addr;
  256. addr = (cmd_ofs * type) * interleave;
  257. /* Modify the unlock address if we are in compatiblity mode.
  258. * For 16bit devices on 8 bit busses
  259. * and 32bit devices on 16 bit busses
  260. * set the low bit of the alternating bit sequence of the address.
  261. */
  262. if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
  263. addr |= (type >> 1)*interleave;
  264. return addr;
  265. }
  266. /*
  267. * Transforms the CFI command for the given geometry (bus width & interleave).
  268. * It looks too long to be inline, but in the common case it should almost all
  269. * get optimised away.
  270. */
  271. static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
  272. {
  273. map_word val = { {0} };
  274. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  275. unsigned long onecmd;
  276. int i;
  277. /* We do it this way to give the compiler a fighting chance
  278. of optimising away all the crap for 'bankwidth' larger than
  279. an unsigned long, in the common case where that support is
  280. disabled */
  281. if (map_bankwidth_is_large(map)) {
  282. wordwidth = sizeof(unsigned long);
  283. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  284. } else {
  285. wordwidth = map_bankwidth(map);
  286. words_per_bus = 1;
  287. }
  288. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  289. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  290. /* First, determine what the bit-pattern should be for a single
  291. device, according to chip mode and endianness... */
  292. switch (chip_mode) {
  293. default: BUG();
  294. case 1:
  295. onecmd = cmd;
  296. break;
  297. case 2:
  298. onecmd = cpu_to_cfi16(cmd);
  299. break;
  300. case 4:
  301. onecmd = cpu_to_cfi32(cmd);
  302. break;
  303. }
  304. /* Now replicate it across the size of an unsigned long, or
  305. just to the bus width as appropriate */
  306. switch (chips_per_word) {
  307. default: BUG();
  308. #if BITS_PER_LONG >= 64
  309. case 8:
  310. onecmd |= (onecmd << (chip_mode * 32));
  311. #endif
  312. case 4:
  313. onecmd |= (onecmd << (chip_mode * 16));
  314. case 2:
  315. onecmd |= (onecmd << (chip_mode * 8));
  316. case 1:
  317. ;
  318. }
  319. /* And finally, for the multi-word case, replicate it
  320. in all words in the structure */
  321. for (i=0; i < words_per_bus; i++) {
  322. val.x[i] = onecmd;
  323. }
  324. return val;
  325. }
  326. #define CMD(x) cfi_build_cmd((x), map, cfi)
  327. static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
  328. struct cfi_private *cfi)
  329. {
  330. int wordwidth, words_per_bus, chip_mode, chips_per_word;
  331. unsigned long onestat, res = 0;
  332. int i;
  333. /* We do it this way to give the compiler a fighting chance
  334. of optimising away all the crap for 'bankwidth' larger than
  335. an unsigned long, in the common case where that support is
  336. disabled */
  337. if (map_bankwidth_is_large(map)) {
  338. wordwidth = sizeof(unsigned long);
  339. words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
  340. } else {
  341. wordwidth = map_bankwidth(map);
  342. words_per_bus = 1;
  343. }
  344. chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
  345. chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
  346. onestat = val.x[0];
  347. /* Or all status words together */
  348. for (i=1; i < words_per_bus; i++) {
  349. onestat |= val.x[i];
  350. }
  351. res = onestat;
  352. switch(chips_per_word) {
  353. default: BUG();
  354. #if BITS_PER_LONG >= 64
  355. case 8:
  356. res |= (onestat >> (chip_mode * 32));
  357. #endif
  358. case 4:
  359. res |= (onestat >> (chip_mode * 16));
  360. case 2:
  361. res |= (onestat >> (chip_mode * 8));
  362. case 1:
  363. ;
  364. }
  365. /* Last, determine what the bit-pattern should be for a single
  366. device, according to chip mode and endianness... */
  367. switch (chip_mode) {
  368. case 1:
  369. break;
  370. case 2:
  371. res = cfi16_to_cpu(res);
  372. break;
  373. case 4:
  374. res = cfi32_to_cpu(res);
  375. break;
  376. default: BUG();
  377. }
  378. return res;
  379. }
  380. #define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
  381. /*
  382. * Sends a CFI command to a bank of flash for the given geometry.
  383. *
  384. * Returns the offset in flash where the command was written.
  385. * If prev_val is non-null, it will be set to the value at the command address,
  386. * before the command was written.
  387. */
  388. static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
  389. struct map_info *map, struct cfi_private *cfi,
  390. int type, map_word *prev_val)
  391. {
  392. map_word val;
  393. uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
  394. val = cfi_build_cmd(cmd, map, cfi);
  395. if (prev_val)
  396. *prev_val = map_read(map, addr);
  397. map_write(map, val, addr);
  398. return addr - base;
  399. }
  400. static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
  401. {
  402. map_word val = map_read(map, addr);
  403. if (map_bankwidth_is_1(map)) {
  404. return val.x[0];
  405. } else if (map_bankwidth_is_2(map)) {
  406. return cfi16_to_cpu(val.x[0]);
  407. } else {
  408. /* No point in a 64-bit byteswap since that would just be
  409. swapping the responses from different chips, and we are
  410. only interested in one chip (a representative sample) */
  411. return cfi32_to_cpu(val.x[0]);
  412. }
  413. }
  414. static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
  415. {
  416. map_word val = map_read(map, addr);
  417. if (map_bankwidth_is_1(map)) {
  418. return val.x[0] & 0xff;
  419. } else if (map_bankwidth_is_2(map)) {
  420. return cfi16_to_cpu(val.x[0]);
  421. } else {
  422. /* No point in a 64-bit byteswap since that would just be
  423. swapping the responses from different chips, and we are
  424. only interested in one chip (a representative sample) */
  425. return cfi32_to_cpu(val.x[0]);
  426. }
  427. }
  428. static inline void cfi_udelay(int us)
  429. {
  430. if (us >= 1000) {
  431. msleep((us+999)/1000);
  432. } else {
  433. udelay(us);
  434. cond_resched();
  435. }
  436. }
  437. int __xipram cfi_qry_present(struct map_info *map, __u32 base,
  438. struct cfi_private *cfi);
  439. int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
  440. struct cfi_private *cfi);
  441. void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
  442. struct cfi_private *cfi);
  443. struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
  444. const char* name);
  445. struct cfi_fixup {
  446. uint16_t mfr;
  447. uint16_t id;
  448. void (*fixup)(struct mtd_info *mtd, void* param);
  449. void* param;
  450. };
  451. #define CFI_MFR_ANY 0xffff
  452. #define CFI_ID_ANY 0xffff
  453. #define CFI_MFR_AMD 0x0001
  454. #define CFI_MFR_INTEL 0x0089
  455. #define CFI_MFR_ATMEL 0x001F
  456. #define CFI_MFR_SAMSUNG 0x00EC
  457. #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
  458. void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
  459. typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
  460. unsigned long adr, int len, void *thunk);
  461. int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
  462. loff_t ofs, size_t len, void *thunk);
  463. #endif /* __MTD_CFI_H__ */