cfi_cmdset_0002.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782
  1. /*
  2. * Common Flash Interface support:
  3. * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
  4. *
  5. * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
  6. * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
  7. * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
  8. *
  9. * 2_by_8 routines added by Simon Munton
  10. *
  11. * 4_by_16 work by Carolyn J. Smith
  12. *
  13. * XIP support hooks by Vitaly Wool (based on code for Intel flash
  14. * by Nicolas Pitre)
  15. *
  16. * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  17. *
  18. * This code is GPL
  19. *
  20. * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
  21. *
  22. */
  23. #include <linux/config.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/kernel.h>
  27. #include <linux/sched.h>
  28. #include <linux/init.h>
  29. #include <asm/io.h>
  30. #include <asm/byteorder.h>
  31. #include <linux/errno.h>
  32. #include <linux/slab.h>
  33. #include <linux/delay.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/mtd/compatmac.h>
  36. #include <linux/mtd/map.h>
  37. #include <linux/mtd/mtd.h>
  38. #include <linux/mtd/cfi.h>
  39. #include <linux/mtd/xip.h>
  40. #define AMD_BOOTLOC_BUG
  41. #define FORCE_WORD_WRITE 0
  42. #define MAX_WORD_RETRIES 3
  43. #define MANUFACTURER_AMD 0x0001
  44. #define MANUFACTURER_SST 0x00BF
  45. #define SST49LF004B 0x0060
  46. #define SST49LF008A 0x005a
  47. static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  48. static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  49. static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  50. static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  51. static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  52. static void cfi_amdstd_sync (struct mtd_info *);
  53. static int cfi_amdstd_suspend (struct mtd_info *);
  54. static void cfi_amdstd_resume (struct mtd_info *);
  55. static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  56. static void cfi_amdstd_destroy(struct mtd_info *);
  57. struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  58. static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  59. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  60. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  61. #include "fwh_lock.h"
  62. static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  63. .probe = NULL, /* Not usable directly */
  64. .destroy = cfi_amdstd_destroy,
  65. .name = "cfi_cmdset_0002",
  66. .module = THIS_MODULE
  67. };
  68. /* #define DEBUG_CFI_FEATURES */
  69. #ifdef DEBUG_CFI_FEATURES
  70. static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  71. {
  72. const char* erase_suspend[3] = {
  73. "Not supported", "Read only", "Read/write"
  74. };
  75. const char* top_bottom[6] = {
  76. "No WP", "8x8KiB sectors at top & bottom, no WP",
  77. "Bottom boot", "Top boot",
  78. "Uniform, Bottom WP", "Uniform, Top WP"
  79. };
  80. printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
  81. printk(" Address sensitive unlock: %s\n",
  82. (extp->SiliconRevision & 1) ? "Not required" : "Required");
  83. if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
  84. printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
  85. else
  86. printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
  87. if (extp->BlkProt == 0)
  88. printk(" Block protection: Not supported\n");
  89. else
  90. printk(" Block protection: %d sectors per group\n", extp->BlkProt);
  91. printk(" Temporary block unprotect: %s\n",
  92. extp->TmpBlkUnprotect ? "Supported" : "Not supported");
  93. printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
  94. printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
  95. printk(" Burst mode: %s\n",
  96. extp->BurstMode ? "Supported" : "Not supported");
  97. if (extp->PageMode == 0)
  98. printk(" Page mode: Not supported\n");
  99. else
  100. printk(" Page mode: %d word page\n", extp->PageMode << 2);
  101. printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
  102. extp->VppMin >> 4, extp->VppMin & 0xf);
  103. printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
  104. extp->VppMax >> 4, extp->VppMax & 0xf);
  105. if (extp->TopBottom < ARRAY_SIZE(top_bottom))
  106. printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
  107. else
  108. printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
  109. }
  110. #endif
  111. #ifdef AMD_BOOTLOC_BUG
  112. /* Wheee. Bring me the head of someone at AMD. */
  113. static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
  114. {
  115. struct map_info *map = mtd->priv;
  116. struct cfi_private *cfi = map->fldrv_priv;
  117. struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
  118. __u8 major = extp->MajorVersion;
  119. __u8 minor = extp->MinorVersion;
  120. if (((major << 8) | minor) < 0x3131) {
  121. /* CFI version 1.0 => don't trust bootloc */
  122. if (cfi->id & 0x80) {
  123. printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
  124. extp->TopBottom = 3; /* top boot */
  125. } else {
  126. extp->TopBottom = 2; /* bottom boot */
  127. }
  128. }
  129. }
  130. #endif
  131. static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
  132. {
  133. struct map_info *map = mtd->priv;
  134. struct cfi_private *cfi = map->fldrv_priv;
  135. if (cfi->cfiq->BufWriteTimeoutTyp) {
  136. DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
  137. mtd->write = cfi_amdstd_write_buffers;
  138. }
  139. }
  140. static void fixup_use_secsi(struct mtd_info *mtd, void *param)
  141. {
  142. /* Setup for chips with a secsi area */
  143. mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
  144. mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
  145. }
  146. static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
  147. {
  148. struct map_info *map = mtd->priv;
  149. struct cfi_private *cfi = map->fldrv_priv;
  150. if ((cfi->cfiq->NumEraseRegions == 1) &&
  151. ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
  152. mtd->erase = cfi_amdstd_erase_chip;
  153. }
  154. }
  155. static struct cfi_fixup cfi_fixup_table[] = {
  156. #ifdef AMD_BOOTLOC_BUG
  157. { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
  158. #endif
  159. { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
  160. { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
  161. { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
  162. { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
  163. { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
  164. { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
  165. #if !FORCE_WORD_WRITE
  166. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
  167. #endif
  168. { 0, 0, NULL, NULL }
  169. };
  170. static struct cfi_fixup jedec_fixup_table[] = {
  171. { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
  172. { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
  173. { 0, 0, NULL, NULL }
  174. };
  175. static struct cfi_fixup fixup_table[] = {
  176. /* The CFI vendor ids and the JEDEC vendor IDs appear
  177. * to be common. It is like the devices id's are as
  178. * well. This table is to pick all cases where
  179. * we know that is the case.
  180. */
  181. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
  182. { 0, 0, NULL, NULL }
  183. };
  184. struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
  185. {
  186. struct cfi_private *cfi = map->fldrv_priv;
  187. struct mtd_info *mtd;
  188. int i;
  189. mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
  190. if (!mtd) {
  191. printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
  192. return NULL;
  193. }
  194. memset(mtd, 0, sizeof(*mtd));
  195. mtd->priv = map;
  196. mtd->type = MTD_NORFLASH;
  197. /* Fill in the default mtd operations */
  198. mtd->erase = cfi_amdstd_erase_varsize;
  199. mtd->write = cfi_amdstd_write_words;
  200. mtd->read = cfi_amdstd_read;
  201. mtd->sync = cfi_amdstd_sync;
  202. mtd->suspend = cfi_amdstd_suspend;
  203. mtd->resume = cfi_amdstd_resume;
  204. mtd->flags = MTD_CAP_NORFLASH;
  205. mtd->name = map->name;
  206. if (cfi->cfi_mode==CFI_MODE_CFI){
  207. unsigned char bootloc;
  208. /*
  209. * It's a real CFI chip, not one for which the probe
  210. * routine faked a CFI structure. So we read the feature
  211. * table from it.
  212. */
  213. __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
  214. struct cfi_pri_amdstd *extp;
  215. extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
  216. if (!extp) {
  217. kfree(mtd);
  218. return NULL;
  219. }
  220. if (extp->MajorVersion != '1' ||
  221. (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
  222. printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
  223. "version %c.%c.\n", extp->MajorVersion,
  224. extp->MinorVersion);
  225. kfree(extp);
  226. kfree(mtd);
  227. return NULL;
  228. }
  229. /* Install our own private info structure */
  230. cfi->cmdset_priv = extp;
  231. /* Apply cfi device specific fixups */
  232. cfi_fixup(mtd, cfi_fixup_table);
  233. #ifdef DEBUG_CFI_FEATURES
  234. /* Tell the user about it in lots of lovely detail */
  235. cfi_tell_features(extp);
  236. #endif
  237. bootloc = extp->TopBottom;
  238. if ((bootloc != 2) && (bootloc != 3)) {
  239. printk(KERN_WARNING "%s: CFI does not contain boot "
  240. "bank location. Assuming top.\n", map->name);
  241. bootloc = 2;
  242. }
  243. if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
  244. printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
  245. for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
  246. int j = (cfi->cfiq->NumEraseRegions-1)-i;
  247. __u32 swap;
  248. swap = cfi->cfiq->EraseRegionInfo[i];
  249. cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
  250. cfi->cfiq->EraseRegionInfo[j] = swap;
  251. }
  252. }
  253. /* Set the default CFI lock/unlock addresses */
  254. cfi->addr_unlock1 = 0x555;
  255. cfi->addr_unlock2 = 0x2aa;
  256. /* Modify the unlock address if we are in compatibility mode */
  257. if ( /* x16 in x8 mode */
  258. ((cfi->device_type == CFI_DEVICETYPE_X8) &&
  259. (cfi->cfiq->InterfaceDesc == 2)) ||
  260. /* x32 in x16 mode */
  261. ((cfi->device_type == CFI_DEVICETYPE_X16) &&
  262. (cfi->cfiq->InterfaceDesc == 4)))
  263. {
  264. cfi->addr_unlock1 = 0xaaa;
  265. cfi->addr_unlock2 = 0x555;
  266. }
  267. } /* CFI mode */
  268. else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
  269. /* Apply jedec specific fixups */
  270. cfi_fixup(mtd, jedec_fixup_table);
  271. }
  272. /* Apply generic fixups */
  273. cfi_fixup(mtd, fixup_table);
  274. for (i=0; i< cfi->numchips; i++) {
  275. cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
  276. cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
  277. cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
  278. }
  279. map->fldrv = &cfi_amdstd_chipdrv;
  280. return cfi_amdstd_setup(mtd);
  281. }
  282. static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
  283. {
  284. struct map_info *map = mtd->priv;
  285. struct cfi_private *cfi = map->fldrv_priv;
  286. unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
  287. unsigned long offset = 0;
  288. int i,j;
  289. printk(KERN_NOTICE "number of %s chips: %d\n",
  290. (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
  291. /* Select the correct geometry setup */
  292. mtd->size = devsize * cfi->numchips;
  293. mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
  294. mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
  295. * mtd->numeraseregions, GFP_KERNEL);
  296. if (!mtd->eraseregions) {
  297. printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
  298. goto setup_err;
  299. }
  300. for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
  301. unsigned long ernum, ersize;
  302. ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
  303. ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
  304. if (mtd->erasesize < ersize) {
  305. mtd->erasesize = ersize;
  306. }
  307. for (j=0; j<cfi->numchips; j++) {
  308. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
  309. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
  310. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
  311. }
  312. offset += (ersize * ernum);
  313. }
  314. if (offset != devsize) {
  315. /* Argh */
  316. printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
  317. goto setup_err;
  318. }
  319. #if 0
  320. // debug
  321. for (i=0; i<mtd->numeraseregions;i++){
  322. printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
  323. i,mtd->eraseregions[i].offset,
  324. mtd->eraseregions[i].erasesize,
  325. mtd->eraseregions[i].numblocks);
  326. }
  327. #endif
  328. /* FIXME: erase-suspend-program is broken. See
  329. http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
  330. printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
  331. __module_get(THIS_MODULE);
  332. return mtd;
  333. setup_err:
  334. if(mtd) {
  335. kfree(mtd->eraseregions);
  336. kfree(mtd);
  337. }
  338. kfree(cfi->cmdset_priv);
  339. kfree(cfi->cfiq);
  340. return NULL;
  341. }
  342. /*
  343. * Return true if the chip is ready.
  344. *
  345. * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
  346. * non-suspended sector) and is indicated by no toggle bits toggling.
  347. *
  348. * Note that anything more complicated than checking if no bits are toggling
  349. * (including checking DQ5 for an error status) is tricky to get working
  350. * correctly and is therefore not done (particulary with interleaved chips
  351. * as each chip must be checked independantly of the others).
  352. */
  353. static int __xipram chip_ready(struct map_info *map, unsigned long addr)
  354. {
  355. map_word d, t;
  356. d = map_read(map, addr);
  357. t = map_read(map, addr);
  358. return map_word_equal(map, d, t);
  359. }
  360. /*
  361. * Return true if the chip is ready and has the correct value.
  362. *
  363. * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
  364. * non-suspended sector) and it is indicated by no bits toggling.
  365. *
  366. * Error are indicated by toggling bits or bits held with the wrong value,
  367. * or with bits toggling.
  368. *
  369. * Note that anything more complicated than checking if no bits are toggling
  370. * (including checking DQ5 for an error status) is tricky to get working
  371. * correctly and is therefore not done (particulary with interleaved chips
  372. * as each chip must be checked independantly of the others).
  373. *
  374. */
  375. static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
  376. {
  377. map_word oldd, curd;
  378. oldd = map_read(map, addr);
  379. curd = map_read(map, addr);
  380. return map_word_equal(map, oldd, curd) &&
  381. map_word_equal(map, curd, expected);
  382. }
  383. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
  384. {
  385. DECLARE_WAITQUEUE(wait, current);
  386. struct cfi_private *cfi = map->fldrv_priv;
  387. unsigned long timeo;
  388. struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
  389. resettime:
  390. timeo = jiffies + HZ;
  391. retry:
  392. switch (chip->state) {
  393. case FL_STATUS:
  394. for (;;) {
  395. if (chip_ready(map, adr))
  396. break;
  397. if (time_after(jiffies, timeo)) {
  398. printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
  399. spin_unlock(chip->mutex);
  400. return -EIO;
  401. }
  402. spin_unlock(chip->mutex);
  403. cfi_udelay(1);
  404. spin_lock(chip->mutex);
  405. /* Someone else might have been playing with it. */
  406. goto retry;
  407. }
  408. case FL_READY:
  409. case FL_CFI_QUERY:
  410. case FL_JEDEC_QUERY:
  411. return 0;
  412. case FL_ERASING:
  413. if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
  414. goto sleep;
  415. if (!(mode == FL_READY || mode == FL_POINT
  416. || !cfip
  417. || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
  418. || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
  419. goto sleep;
  420. /* We could check to see if we're trying to access the sector
  421. * that is currently being erased. However, no user will try
  422. * anything like that so we just wait for the timeout. */
  423. /* Erase suspend */
  424. /* It's harmless to issue the Erase-Suspend and Erase-Resume
  425. * commands when the erase algorithm isn't in progress. */
  426. map_write(map, CMD(0xB0), chip->in_progress_block_addr);
  427. chip->oldstate = FL_ERASING;
  428. chip->state = FL_ERASE_SUSPENDING;
  429. chip->erase_suspended = 1;
  430. for (;;) {
  431. if (chip_ready(map, adr))
  432. break;
  433. if (time_after(jiffies, timeo)) {
  434. /* Should have suspended the erase by now.
  435. * Send an Erase-Resume command as either
  436. * there was an error (so leave the erase
  437. * routine to recover from it) or we trying to
  438. * use the erase-in-progress sector. */
  439. map_write(map, CMD(0x30), chip->in_progress_block_addr);
  440. chip->state = FL_ERASING;
  441. chip->oldstate = FL_READY;
  442. printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
  443. return -EIO;
  444. }
  445. spin_unlock(chip->mutex);
  446. cfi_udelay(1);
  447. spin_lock(chip->mutex);
  448. /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
  449. So we can just loop here. */
  450. }
  451. chip->state = FL_READY;
  452. return 0;
  453. case FL_XIP_WHILE_ERASING:
  454. if (mode != FL_READY && mode != FL_POINT &&
  455. (!cfip || !(cfip->EraseSuspend&2)))
  456. goto sleep;
  457. chip->oldstate = chip->state;
  458. chip->state = FL_READY;
  459. return 0;
  460. case FL_POINT:
  461. /* Only if there's no operation suspended... */
  462. if (mode == FL_READY && chip->oldstate == FL_READY)
  463. return 0;
  464. default:
  465. sleep:
  466. set_current_state(TASK_UNINTERRUPTIBLE);
  467. add_wait_queue(&chip->wq, &wait);
  468. spin_unlock(chip->mutex);
  469. schedule();
  470. remove_wait_queue(&chip->wq, &wait);
  471. spin_lock(chip->mutex);
  472. goto resettime;
  473. }
  474. }
  475. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
  476. {
  477. struct cfi_private *cfi = map->fldrv_priv;
  478. switch(chip->oldstate) {
  479. case FL_ERASING:
  480. chip->state = chip->oldstate;
  481. map_write(map, CMD(0x30), chip->in_progress_block_addr);
  482. chip->oldstate = FL_READY;
  483. chip->state = FL_ERASING;
  484. break;
  485. case FL_XIP_WHILE_ERASING:
  486. chip->state = chip->oldstate;
  487. chip->oldstate = FL_READY;
  488. break;
  489. case FL_READY:
  490. case FL_STATUS:
  491. /* We should really make set_vpp() count, rather than doing this */
  492. DISABLE_VPP(map);
  493. break;
  494. default:
  495. printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
  496. }
  497. wake_up(&chip->wq);
  498. }
  499. #ifdef CONFIG_MTD_XIP
  500. /*
  501. * No interrupt what so ever can be serviced while the flash isn't in array
  502. * mode. This is ensured by the xip_disable() and xip_enable() functions
  503. * enclosing any code path where the flash is known not to be in array mode.
  504. * And within a XIP disabled code path, only functions marked with __xipram
  505. * may be called and nothing else (it's a good thing to inspect generated
  506. * assembly to make sure inline functions were actually inlined and that gcc
  507. * didn't emit calls to its own support functions). Also configuring MTD CFI
  508. * support to a single buswidth and a single interleave is also recommended.
  509. */
  510. static void xip_disable(struct map_info *map, struct flchip *chip,
  511. unsigned long adr)
  512. {
  513. /* TODO: chips with no XIP use should ignore and return */
  514. (void) map_read(map, adr); /* ensure mmu mapping is up to date */
  515. local_irq_disable();
  516. }
  517. static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
  518. unsigned long adr)
  519. {
  520. struct cfi_private *cfi = map->fldrv_priv;
  521. if (chip->state != FL_POINT && chip->state != FL_READY) {
  522. map_write(map, CMD(0xf0), adr);
  523. chip->state = FL_READY;
  524. }
  525. (void) map_read(map, adr);
  526. xip_iprefetch();
  527. local_irq_enable();
  528. }
  529. /*
  530. * When a delay is required for the flash operation to complete, the
  531. * xip_udelay() function is polling for both the given timeout and pending
  532. * (but still masked) hardware interrupts. Whenever there is an interrupt
  533. * pending then the flash erase operation is suspended, array mode restored
  534. * and interrupts unmasked. Task scheduling might also happen at that
  535. * point. The CPU eventually returns from the interrupt or the call to
  536. * schedule() and the suspended flash operation is resumed for the remaining
  537. * of the delay period.
  538. *
  539. * Warning: this function _will_ fool interrupt latency tracing tools.
  540. */
  541. static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
  542. unsigned long adr, int usec)
  543. {
  544. struct cfi_private *cfi = map->fldrv_priv;
  545. struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
  546. map_word status, OK = CMD(0x80);
  547. unsigned long suspended, start = xip_currtime();
  548. flstate_t oldstate;
  549. do {
  550. cpu_relax();
  551. if (xip_irqpending() && extp &&
  552. ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
  553. (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
  554. /*
  555. * Let's suspend the erase operation when supported.
  556. * Note that we currently don't try to suspend
  557. * interleaved chips if there is already another
  558. * operation suspended (imagine what happens
  559. * when one chip was already done with the current
  560. * operation while another chip suspended it, then
  561. * we resume the whole thing at once). Yes, it
  562. * can happen!
  563. */
  564. map_write(map, CMD(0xb0), adr);
  565. usec -= xip_elapsed_since(start);
  566. suspended = xip_currtime();
  567. do {
  568. if (xip_elapsed_since(suspended) > 100000) {
  569. /*
  570. * The chip doesn't want to suspend
  571. * after waiting for 100 msecs.
  572. * This is a critical error but there
  573. * is not much we can do here.
  574. */
  575. return;
  576. }
  577. status = map_read(map, adr);
  578. } while (!map_word_andequal(map, status, OK, OK));
  579. /* Suspend succeeded */
  580. oldstate = chip->state;
  581. if (!map_word_bitsset(map, status, CMD(0x40)))
  582. break;
  583. chip->state = FL_XIP_WHILE_ERASING;
  584. chip->erase_suspended = 1;
  585. map_write(map, CMD(0xf0), adr);
  586. (void) map_read(map, adr);
  587. asm volatile (".rep 8; nop; .endr");
  588. local_irq_enable();
  589. spin_unlock(chip->mutex);
  590. asm volatile (".rep 8; nop; .endr");
  591. cond_resched();
  592. /*
  593. * We're back. However someone else might have
  594. * decided to go write to the chip if we are in
  595. * a suspended erase state. If so let's wait
  596. * until it's done.
  597. */
  598. spin_lock(chip->mutex);
  599. while (chip->state != FL_XIP_WHILE_ERASING) {
  600. DECLARE_WAITQUEUE(wait, current);
  601. set_current_state(TASK_UNINTERRUPTIBLE);
  602. add_wait_queue(&chip->wq, &wait);
  603. spin_unlock(chip->mutex);
  604. schedule();
  605. remove_wait_queue(&chip->wq, &wait);
  606. spin_lock(chip->mutex);
  607. }
  608. /* Disallow XIP again */
  609. local_irq_disable();
  610. /* Resume the write or erase operation */
  611. map_write(map, CMD(0x30), adr);
  612. chip->state = oldstate;
  613. start = xip_currtime();
  614. } else if (usec >= 1000000/HZ) {
  615. /*
  616. * Try to save on CPU power when waiting delay
  617. * is at least a system timer tick period.
  618. * No need to be extremely accurate here.
  619. */
  620. xip_cpu_idle();
  621. }
  622. status = map_read(map, adr);
  623. } while (!map_word_andequal(map, status, OK, OK)
  624. && xip_elapsed_since(start) < usec);
  625. }
  626. #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
  627. /*
  628. * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
  629. * the flash is actively programming or erasing since we have to poll for
  630. * the operation to complete anyway. We can't do that in a generic way with
  631. * a XIP setup so do it before the actual flash operation in this case
  632. * and stub it out from INVALIDATE_CACHE_UDELAY.
  633. */
  634. #define XIP_INVAL_CACHED_RANGE(map, from, size) \
  635. INVALIDATE_CACHED_RANGE(map, from, size)
  636. #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
  637. UDELAY(map, chip, adr, usec)
  638. /*
  639. * Extra notes:
  640. *
  641. * Activating this XIP support changes the way the code works a bit. For
  642. * example the code to suspend the current process when concurrent access
  643. * happens is never executed because xip_udelay() will always return with the
  644. * same chip state as it was entered with. This is why there is no care for
  645. * the presence of add_wait_queue() or schedule() calls from within a couple
  646. * xip_disable()'d areas of code, like in do_erase_oneblock for example.
  647. * The queueing and scheduling are always happening within xip_udelay().
  648. *
  649. * Similarly, get_chip() and put_chip() just happen to always be executed
  650. * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
  651. * is in array mode, therefore never executing many cases therein and not
  652. * causing any problem with XIP.
  653. */
  654. #else
  655. #define xip_disable(map, chip, adr)
  656. #define xip_enable(map, chip, adr)
  657. #define XIP_INVAL_CACHED_RANGE(x...)
  658. #define UDELAY(map, chip, adr, usec) \
  659. do { \
  660. spin_unlock(chip->mutex); \
  661. cfi_udelay(usec); \
  662. spin_lock(chip->mutex); \
  663. } while (0)
  664. #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
  665. do { \
  666. spin_unlock(chip->mutex); \
  667. INVALIDATE_CACHED_RANGE(map, adr, len); \
  668. cfi_udelay(usec); \
  669. spin_lock(chip->mutex); \
  670. } while (0)
  671. #endif
  672. static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  673. {
  674. unsigned long cmd_addr;
  675. struct cfi_private *cfi = map->fldrv_priv;
  676. int ret;
  677. adr += chip->start;
  678. /* Ensure cmd read/writes are aligned. */
  679. cmd_addr = adr & ~(map_bankwidth(map)-1);
  680. spin_lock(chip->mutex);
  681. ret = get_chip(map, chip, cmd_addr, FL_READY);
  682. if (ret) {
  683. spin_unlock(chip->mutex);
  684. return ret;
  685. }
  686. if (chip->state != FL_POINT && chip->state != FL_READY) {
  687. map_write(map, CMD(0xf0), cmd_addr);
  688. chip->state = FL_READY;
  689. }
  690. map_copy_from(map, buf, adr, len);
  691. put_chip(map, chip, cmd_addr);
  692. spin_unlock(chip->mutex);
  693. return 0;
  694. }
  695. static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  696. {
  697. struct map_info *map = mtd->priv;
  698. struct cfi_private *cfi = map->fldrv_priv;
  699. unsigned long ofs;
  700. int chipnum;
  701. int ret = 0;
  702. /* ofs: offset within the first chip that the first read should start */
  703. chipnum = (from >> cfi->chipshift);
  704. ofs = from - (chipnum << cfi->chipshift);
  705. *retlen = 0;
  706. while (len) {
  707. unsigned long thislen;
  708. if (chipnum >= cfi->numchips)
  709. break;
  710. if ((len + ofs -1) >> cfi->chipshift)
  711. thislen = (1<<cfi->chipshift) - ofs;
  712. else
  713. thislen = len;
  714. ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  715. if (ret)
  716. break;
  717. *retlen += thislen;
  718. len -= thislen;
  719. buf += thislen;
  720. ofs = 0;
  721. chipnum++;
  722. }
  723. return ret;
  724. }
  725. static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  726. {
  727. DECLARE_WAITQUEUE(wait, current);
  728. unsigned long timeo = jiffies + HZ;
  729. struct cfi_private *cfi = map->fldrv_priv;
  730. retry:
  731. spin_lock(chip->mutex);
  732. if (chip->state != FL_READY){
  733. #if 0
  734. printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
  735. #endif
  736. set_current_state(TASK_UNINTERRUPTIBLE);
  737. add_wait_queue(&chip->wq, &wait);
  738. spin_unlock(chip->mutex);
  739. schedule();
  740. remove_wait_queue(&chip->wq, &wait);
  741. #if 0
  742. if(signal_pending(current))
  743. return -EINTR;
  744. #endif
  745. timeo = jiffies + HZ;
  746. goto retry;
  747. }
  748. adr += chip->start;
  749. chip->state = FL_READY;
  750. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  751. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  752. cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  753. map_copy_from(map, buf, adr, len);
  754. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  755. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  756. cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  757. cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  758. wake_up(&chip->wq);
  759. spin_unlock(chip->mutex);
  760. return 0;
  761. }
  762. static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  763. {
  764. struct map_info *map = mtd->priv;
  765. struct cfi_private *cfi = map->fldrv_priv;
  766. unsigned long ofs;
  767. int chipnum;
  768. int ret = 0;
  769. /* ofs: offset within the first chip that the first read should start */
  770. /* 8 secsi bytes per chip */
  771. chipnum=from>>3;
  772. ofs=from & 7;
  773. *retlen = 0;
  774. while (len) {
  775. unsigned long thislen;
  776. if (chipnum >= cfi->numchips)
  777. break;
  778. if ((len + ofs -1) >> 3)
  779. thislen = (1<<3) - ofs;
  780. else
  781. thislen = len;
  782. ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  783. if (ret)
  784. break;
  785. *retlen += thislen;
  786. len -= thislen;
  787. buf += thislen;
  788. ofs = 0;
  789. chipnum++;
  790. }
  791. return ret;
  792. }
  793. static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
  794. {
  795. struct cfi_private *cfi = map->fldrv_priv;
  796. unsigned long timeo = jiffies + HZ;
  797. /*
  798. * We use a 1ms + 1 jiffies generic timeout for writes (most devices
  799. * have a max write time of a few hundreds usec). However, we should
  800. * use the maximum timeout value given by the chip at probe time
  801. * instead. Unfortunately, struct flchip does have a field for
  802. * maximum timeout, only for typical which can be far too short
  803. * depending of the conditions. The ' + 1' is to avoid having a
  804. * timeout of 0 jiffies if HZ is smaller than 1000.
  805. */
  806. unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
  807. int ret = 0;
  808. map_word oldd;
  809. int retry_cnt = 0;
  810. adr += chip->start;
  811. spin_lock(chip->mutex);
  812. ret = get_chip(map, chip, adr, FL_WRITING);
  813. if (ret) {
  814. spin_unlock(chip->mutex);
  815. return ret;
  816. }
  817. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
  818. __func__, adr, datum.x[0] );
  819. /*
  820. * Check for a NOP for the case when the datum to write is already
  821. * present - it saves time and works around buggy chips that corrupt
  822. * data at other locations when 0xff is written to a location that
  823. * already contains 0xff.
  824. */
  825. oldd = map_read(map, adr);
  826. if (map_word_equal(map, oldd, datum)) {
  827. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
  828. __func__);
  829. goto op_done;
  830. }
  831. XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
  832. ENABLE_VPP(map);
  833. xip_disable(map, chip, adr);
  834. retry:
  835. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  836. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  837. cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  838. map_write(map, datum, adr);
  839. chip->state = FL_WRITING;
  840. INVALIDATE_CACHE_UDELAY(map, chip,
  841. adr, map_bankwidth(map),
  842. chip->word_write_time);
  843. /* See comment above for timeout value. */
  844. timeo = jiffies + uWriteTimeout;
  845. for (;;) {
  846. if (chip->state != FL_WRITING) {
  847. /* Someone's suspended the write. Sleep */
  848. DECLARE_WAITQUEUE(wait, current);
  849. set_current_state(TASK_UNINTERRUPTIBLE);
  850. add_wait_queue(&chip->wq, &wait);
  851. spin_unlock(chip->mutex);
  852. schedule();
  853. remove_wait_queue(&chip->wq, &wait);
  854. timeo = jiffies + (HZ / 2); /* FIXME */
  855. spin_lock(chip->mutex);
  856. continue;
  857. }
  858. if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
  859. xip_enable(map, chip, adr);
  860. printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
  861. xip_disable(map, chip, adr);
  862. break;
  863. }
  864. if (chip_ready(map, adr))
  865. break;
  866. /* Latency issues. Drop the lock, wait a while and retry */
  867. UDELAY(map, chip, adr, 1);
  868. }
  869. /* Did we succeed? */
  870. if (!chip_good(map, adr, datum)) {
  871. /* reset on all failures. */
  872. map_write( map, CMD(0xF0), chip->start );
  873. /* FIXME - should have reset delay before continuing */
  874. if (++retry_cnt <= MAX_WORD_RETRIES)
  875. goto retry;
  876. ret = -EIO;
  877. }
  878. xip_enable(map, chip, adr);
  879. op_done:
  880. chip->state = FL_READY;
  881. put_chip(map, chip, adr);
  882. spin_unlock(chip->mutex);
  883. return ret;
  884. }
  885. static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
  886. size_t *retlen, const u_char *buf)
  887. {
  888. struct map_info *map = mtd->priv;
  889. struct cfi_private *cfi = map->fldrv_priv;
  890. int ret = 0;
  891. int chipnum;
  892. unsigned long ofs, chipstart;
  893. DECLARE_WAITQUEUE(wait, current);
  894. *retlen = 0;
  895. if (!len)
  896. return 0;
  897. chipnum = to >> cfi->chipshift;
  898. ofs = to - (chipnum << cfi->chipshift);
  899. chipstart = cfi->chips[chipnum].start;
  900. /* If it's not bus-aligned, do the first byte write */
  901. if (ofs & (map_bankwidth(map)-1)) {
  902. unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
  903. int i = ofs - bus_ofs;
  904. int n = 0;
  905. map_word tmp_buf;
  906. retry:
  907. spin_lock(cfi->chips[chipnum].mutex);
  908. if (cfi->chips[chipnum].state != FL_READY) {
  909. #if 0
  910. printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
  911. #endif
  912. set_current_state(TASK_UNINTERRUPTIBLE);
  913. add_wait_queue(&cfi->chips[chipnum].wq, &wait);
  914. spin_unlock(cfi->chips[chipnum].mutex);
  915. schedule();
  916. remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
  917. #if 0
  918. if(signal_pending(current))
  919. return -EINTR;
  920. #endif
  921. goto retry;
  922. }
  923. /* Load 'tmp_buf' with old contents of flash */
  924. tmp_buf = map_read(map, bus_ofs+chipstart);
  925. spin_unlock(cfi->chips[chipnum].mutex);
  926. /* Number of bytes to copy from buffer */
  927. n = min_t(int, len, map_bankwidth(map)-i);
  928. tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
  929. ret = do_write_oneword(map, &cfi->chips[chipnum],
  930. bus_ofs, tmp_buf);
  931. if (ret)
  932. return ret;
  933. ofs += n;
  934. buf += n;
  935. (*retlen) += n;
  936. len -= n;
  937. if (ofs >> cfi->chipshift) {
  938. chipnum ++;
  939. ofs = 0;
  940. if (chipnum == cfi->numchips)
  941. return 0;
  942. }
  943. }
  944. /* We are now aligned, write as much as possible */
  945. while(len >= map_bankwidth(map)) {
  946. map_word datum;
  947. datum = map_word_load(map, buf);
  948. ret = do_write_oneword(map, &cfi->chips[chipnum],
  949. ofs, datum);
  950. if (ret)
  951. return ret;
  952. ofs += map_bankwidth(map);
  953. buf += map_bankwidth(map);
  954. (*retlen) += map_bankwidth(map);
  955. len -= map_bankwidth(map);
  956. if (ofs >> cfi->chipshift) {
  957. chipnum ++;
  958. ofs = 0;
  959. if (chipnum == cfi->numchips)
  960. return 0;
  961. chipstart = cfi->chips[chipnum].start;
  962. }
  963. }
  964. /* Write the trailing bytes if any */
  965. if (len & (map_bankwidth(map)-1)) {
  966. map_word tmp_buf;
  967. retry1:
  968. spin_lock(cfi->chips[chipnum].mutex);
  969. if (cfi->chips[chipnum].state != FL_READY) {
  970. #if 0
  971. printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
  972. #endif
  973. set_current_state(TASK_UNINTERRUPTIBLE);
  974. add_wait_queue(&cfi->chips[chipnum].wq, &wait);
  975. spin_unlock(cfi->chips[chipnum].mutex);
  976. schedule();
  977. remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
  978. #if 0
  979. if(signal_pending(current))
  980. return -EINTR;
  981. #endif
  982. goto retry1;
  983. }
  984. tmp_buf = map_read(map, ofs + chipstart);
  985. spin_unlock(cfi->chips[chipnum].mutex);
  986. tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
  987. ret = do_write_oneword(map, &cfi->chips[chipnum],
  988. ofs, tmp_buf);
  989. if (ret)
  990. return ret;
  991. (*retlen) += len;
  992. }
  993. return 0;
  994. }
  995. /*
  996. * FIXME: interleaved mode not tested, and probably not supported!
  997. */
  998. static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
  999. unsigned long adr, const u_char *buf,
  1000. int len)
  1001. {
  1002. struct cfi_private *cfi = map->fldrv_priv;
  1003. unsigned long timeo = jiffies + HZ;
  1004. /* see comments in do_write_oneword() regarding uWriteTimeo. */
  1005. unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
  1006. int ret = -EIO;
  1007. unsigned long cmd_adr;
  1008. int z, words;
  1009. map_word datum;
  1010. adr += chip->start;
  1011. cmd_adr = adr;
  1012. spin_lock(chip->mutex);
  1013. ret = get_chip(map, chip, adr, FL_WRITING);
  1014. if (ret) {
  1015. spin_unlock(chip->mutex);
  1016. return ret;
  1017. }
  1018. datum = map_word_load(map, buf);
  1019. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
  1020. __func__, adr, datum.x[0] );
  1021. XIP_INVAL_CACHED_RANGE(map, adr, len);
  1022. ENABLE_VPP(map);
  1023. xip_disable(map, chip, cmd_adr);
  1024. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1025. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1026. //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1027. /* Write Buffer Load */
  1028. map_write(map, CMD(0x25), cmd_adr);
  1029. chip->state = FL_WRITING_TO_BUFFER;
  1030. /* Write length of data to come */
  1031. words = len / map_bankwidth(map);
  1032. map_write(map, CMD(words - 1), cmd_adr);
  1033. /* Write data */
  1034. z = 0;
  1035. while(z < words * map_bankwidth(map)) {
  1036. datum = map_word_load(map, buf);
  1037. map_write(map, datum, adr + z);
  1038. z += map_bankwidth(map);
  1039. buf += map_bankwidth(map);
  1040. }
  1041. z -= map_bankwidth(map);
  1042. adr += z;
  1043. /* Write Buffer Program Confirm: GO GO GO */
  1044. map_write(map, CMD(0x29), cmd_adr);
  1045. chip->state = FL_WRITING;
  1046. INVALIDATE_CACHE_UDELAY(map, chip,
  1047. adr, map_bankwidth(map),
  1048. chip->word_write_time);
  1049. timeo = jiffies + uWriteTimeout;
  1050. for (;;) {
  1051. if (chip->state != FL_WRITING) {
  1052. /* Someone's suspended the write. Sleep */
  1053. DECLARE_WAITQUEUE(wait, current);
  1054. set_current_state(TASK_UNINTERRUPTIBLE);
  1055. add_wait_queue(&chip->wq, &wait);
  1056. spin_unlock(chip->mutex);
  1057. schedule();
  1058. remove_wait_queue(&chip->wq, &wait);
  1059. timeo = jiffies + (HZ / 2); /* FIXME */
  1060. spin_lock(chip->mutex);
  1061. continue;
  1062. }
  1063. if (time_after(jiffies, timeo) && !chip_ready(map, adr))
  1064. break;
  1065. if (chip_ready(map, adr)) {
  1066. xip_enable(map, chip, adr);
  1067. goto op_done;
  1068. }
  1069. /* Latency issues. Drop the lock, wait a while and retry */
  1070. UDELAY(map, chip, adr, 1);
  1071. }
  1072. /* reset on all failures. */
  1073. map_write( map, CMD(0xF0), chip->start );
  1074. xip_enable(map, chip, adr);
  1075. /* FIXME - should have reset delay before continuing */
  1076. printk(KERN_WARNING "MTD %s(): software timeout\n",
  1077. __func__ );
  1078. ret = -EIO;
  1079. op_done:
  1080. chip->state = FL_READY;
  1081. put_chip(map, chip, adr);
  1082. spin_unlock(chip->mutex);
  1083. return ret;
  1084. }
  1085. static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
  1086. size_t *retlen, const u_char *buf)
  1087. {
  1088. struct map_info *map = mtd->priv;
  1089. struct cfi_private *cfi = map->fldrv_priv;
  1090. int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  1091. int ret = 0;
  1092. int chipnum;
  1093. unsigned long ofs;
  1094. *retlen = 0;
  1095. if (!len)
  1096. return 0;
  1097. chipnum = to >> cfi->chipshift;
  1098. ofs = to - (chipnum << cfi->chipshift);
  1099. /* If it's not bus-aligned, do the first word write */
  1100. if (ofs & (map_bankwidth(map)-1)) {
  1101. size_t local_len = (-ofs)&(map_bankwidth(map)-1);
  1102. if (local_len > len)
  1103. local_len = len;
  1104. ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
  1105. local_len, retlen, buf);
  1106. if (ret)
  1107. return ret;
  1108. ofs += local_len;
  1109. buf += local_len;
  1110. len -= local_len;
  1111. if (ofs >> cfi->chipshift) {
  1112. chipnum ++;
  1113. ofs = 0;
  1114. if (chipnum == cfi->numchips)
  1115. return 0;
  1116. }
  1117. }
  1118. /* Write buffer is worth it only if more than one word to write... */
  1119. while (len >= map_bankwidth(map) * 2) {
  1120. /* We must not cross write block boundaries */
  1121. int size = wbufsize - (ofs & (wbufsize-1));
  1122. if (size > len)
  1123. size = len;
  1124. if (size % map_bankwidth(map))
  1125. size -= size % map_bankwidth(map);
  1126. ret = do_write_buffer(map, &cfi->chips[chipnum],
  1127. ofs, buf, size);
  1128. if (ret)
  1129. return ret;
  1130. ofs += size;
  1131. buf += size;
  1132. (*retlen) += size;
  1133. len -= size;
  1134. if (ofs >> cfi->chipshift) {
  1135. chipnum ++;
  1136. ofs = 0;
  1137. if (chipnum == cfi->numchips)
  1138. return 0;
  1139. }
  1140. }
  1141. if (len) {
  1142. size_t retlen_dregs = 0;
  1143. ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
  1144. len, &retlen_dregs, buf);
  1145. *retlen += retlen_dregs;
  1146. return ret;
  1147. }
  1148. return 0;
  1149. }
  1150. /*
  1151. * Handle devices with one erase region, that only implement
  1152. * the chip erase command.
  1153. */
  1154. static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
  1155. {
  1156. struct cfi_private *cfi = map->fldrv_priv;
  1157. unsigned long timeo = jiffies + HZ;
  1158. unsigned long int adr;
  1159. DECLARE_WAITQUEUE(wait, current);
  1160. int ret = 0;
  1161. adr = cfi->addr_unlock1;
  1162. spin_lock(chip->mutex);
  1163. ret = get_chip(map, chip, adr, FL_WRITING);
  1164. if (ret) {
  1165. spin_unlock(chip->mutex);
  1166. return ret;
  1167. }
  1168. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
  1169. __func__, chip->start );
  1170. XIP_INVAL_CACHED_RANGE(map, adr, map->size);
  1171. ENABLE_VPP(map);
  1172. xip_disable(map, chip, adr);
  1173. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1174. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1175. cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1176. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1177. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1178. cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1179. chip->state = FL_ERASING;
  1180. chip->erase_suspended = 0;
  1181. chip->in_progress_block_addr = adr;
  1182. INVALIDATE_CACHE_UDELAY(map, chip,
  1183. adr, map->size,
  1184. chip->erase_time*500);
  1185. timeo = jiffies + (HZ*20);
  1186. for (;;) {
  1187. if (chip->state != FL_ERASING) {
  1188. /* Someone's suspended the erase. Sleep */
  1189. set_current_state(TASK_UNINTERRUPTIBLE);
  1190. add_wait_queue(&chip->wq, &wait);
  1191. spin_unlock(chip->mutex);
  1192. schedule();
  1193. remove_wait_queue(&chip->wq, &wait);
  1194. spin_lock(chip->mutex);
  1195. continue;
  1196. }
  1197. if (chip->erase_suspended) {
  1198. /* This erase was suspended and resumed.
  1199. Adjust the timeout */
  1200. timeo = jiffies + (HZ*20); /* FIXME */
  1201. chip->erase_suspended = 0;
  1202. }
  1203. if (chip_ready(map, adr))
  1204. break;
  1205. if (time_after(jiffies, timeo)) {
  1206. printk(KERN_WARNING "MTD %s(): software timeout\n",
  1207. __func__ );
  1208. break;
  1209. }
  1210. /* Latency issues. Drop the lock, wait a while and retry */
  1211. UDELAY(map, chip, adr, 1000000/HZ);
  1212. }
  1213. /* Did we succeed? */
  1214. if (!chip_good(map, adr, map_word_ff(map))) {
  1215. /* reset on all failures. */
  1216. map_write( map, CMD(0xF0), chip->start );
  1217. /* FIXME - should have reset delay before continuing */
  1218. ret = -EIO;
  1219. }
  1220. chip->state = FL_READY;
  1221. xip_enable(map, chip, adr);
  1222. put_chip(map, chip, adr);
  1223. spin_unlock(chip->mutex);
  1224. return ret;
  1225. }
  1226. static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
  1227. {
  1228. struct cfi_private *cfi = map->fldrv_priv;
  1229. unsigned long timeo = jiffies + HZ;
  1230. DECLARE_WAITQUEUE(wait, current);
  1231. int ret = 0;
  1232. adr += chip->start;
  1233. spin_lock(chip->mutex);
  1234. ret = get_chip(map, chip, adr, FL_ERASING);
  1235. if (ret) {
  1236. spin_unlock(chip->mutex);
  1237. return ret;
  1238. }
  1239. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
  1240. __func__, adr );
  1241. XIP_INVAL_CACHED_RANGE(map, adr, len);
  1242. ENABLE_VPP(map);
  1243. xip_disable(map, chip, adr);
  1244. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1245. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1246. cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1247. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1248. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1249. map_write(map, CMD(0x30), adr);
  1250. chip->state = FL_ERASING;
  1251. chip->erase_suspended = 0;
  1252. chip->in_progress_block_addr = adr;
  1253. INVALIDATE_CACHE_UDELAY(map, chip,
  1254. adr, len,
  1255. chip->erase_time*500);
  1256. timeo = jiffies + (HZ*20);
  1257. for (;;) {
  1258. if (chip->state != FL_ERASING) {
  1259. /* Someone's suspended the erase. Sleep */
  1260. set_current_state(TASK_UNINTERRUPTIBLE);
  1261. add_wait_queue(&chip->wq, &wait);
  1262. spin_unlock(chip->mutex);
  1263. schedule();
  1264. remove_wait_queue(&chip->wq, &wait);
  1265. spin_lock(chip->mutex);
  1266. continue;
  1267. }
  1268. if (chip->erase_suspended) {
  1269. /* This erase was suspended and resumed.
  1270. Adjust the timeout */
  1271. timeo = jiffies + (HZ*20); /* FIXME */
  1272. chip->erase_suspended = 0;
  1273. }
  1274. if (chip_ready(map, adr)) {
  1275. xip_enable(map, chip, adr);
  1276. break;
  1277. }
  1278. if (time_after(jiffies, timeo)) {
  1279. xip_enable(map, chip, adr);
  1280. printk(KERN_WARNING "MTD %s(): software timeout\n",
  1281. __func__ );
  1282. break;
  1283. }
  1284. /* Latency issues. Drop the lock, wait a while and retry */
  1285. UDELAY(map, chip, adr, 1000000/HZ);
  1286. }
  1287. /* Did we succeed? */
  1288. if (!chip_good(map, adr, map_word_ff(map))) {
  1289. /* reset on all failures. */
  1290. map_write( map, CMD(0xF0), chip->start );
  1291. /* FIXME - should have reset delay before continuing */
  1292. ret = -EIO;
  1293. }
  1294. chip->state = FL_READY;
  1295. put_chip(map, chip, adr);
  1296. spin_unlock(chip->mutex);
  1297. return ret;
  1298. }
  1299. int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
  1300. {
  1301. unsigned long ofs, len;
  1302. int ret;
  1303. ofs = instr->addr;
  1304. len = instr->len;
  1305. ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
  1306. if (ret)
  1307. return ret;
  1308. instr->state = MTD_ERASE_DONE;
  1309. mtd_erase_callback(instr);
  1310. return 0;
  1311. }
  1312. static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
  1313. {
  1314. struct map_info *map = mtd->priv;
  1315. struct cfi_private *cfi = map->fldrv_priv;
  1316. int ret = 0;
  1317. if (instr->addr != 0)
  1318. return -EINVAL;
  1319. if (instr->len != mtd->size)
  1320. return -EINVAL;
  1321. ret = do_erase_chip(map, &cfi->chips[0]);
  1322. if (ret)
  1323. return ret;
  1324. instr->state = MTD_ERASE_DONE;
  1325. mtd_erase_callback(instr);
  1326. return 0;
  1327. }
  1328. static void cfi_amdstd_sync (struct mtd_info *mtd)
  1329. {
  1330. struct map_info *map = mtd->priv;
  1331. struct cfi_private *cfi = map->fldrv_priv;
  1332. int i;
  1333. struct flchip *chip;
  1334. int ret = 0;
  1335. DECLARE_WAITQUEUE(wait, current);
  1336. for (i=0; !ret && i<cfi->numchips; i++) {
  1337. chip = &cfi->chips[i];
  1338. retry:
  1339. spin_lock(chip->mutex);
  1340. switch(chip->state) {
  1341. case FL_READY:
  1342. case FL_STATUS:
  1343. case FL_CFI_QUERY:
  1344. case FL_JEDEC_QUERY:
  1345. chip->oldstate = chip->state;
  1346. chip->state = FL_SYNCING;
  1347. /* No need to wake_up() on this state change -
  1348. * as the whole point is that nobody can do anything
  1349. * with the chip now anyway.
  1350. */
  1351. case FL_SYNCING:
  1352. spin_unlock(chip->mutex);
  1353. break;
  1354. default:
  1355. /* Not an idle state */
  1356. add_wait_queue(&chip->wq, &wait);
  1357. spin_unlock(chip->mutex);
  1358. schedule();
  1359. remove_wait_queue(&chip->wq, &wait);
  1360. goto retry;
  1361. }
  1362. }
  1363. /* Unlock the chips again */
  1364. for (i--; i >=0; i--) {
  1365. chip = &cfi->chips[i];
  1366. spin_lock(chip->mutex);
  1367. if (chip->state == FL_SYNCING) {
  1368. chip->state = chip->oldstate;
  1369. wake_up(&chip->wq);
  1370. }
  1371. spin_unlock(chip->mutex);
  1372. }
  1373. }
  1374. static int cfi_amdstd_suspend(struct mtd_info *mtd)
  1375. {
  1376. struct map_info *map = mtd->priv;
  1377. struct cfi_private *cfi = map->fldrv_priv;
  1378. int i;
  1379. struct flchip *chip;
  1380. int ret = 0;
  1381. for (i=0; !ret && i<cfi->numchips; i++) {
  1382. chip = &cfi->chips[i];
  1383. spin_lock(chip->mutex);
  1384. switch(chip->state) {
  1385. case FL_READY:
  1386. case FL_STATUS:
  1387. case FL_CFI_QUERY:
  1388. case FL_JEDEC_QUERY:
  1389. chip->oldstate = chip->state;
  1390. chip->state = FL_PM_SUSPENDED;
  1391. /* No need to wake_up() on this state change -
  1392. * as the whole point is that nobody can do anything
  1393. * with the chip now anyway.
  1394. */
  1395. case FL_PM_SUSPENDED:
  1396. break;
  1397. default:
  1398. ret = -EAGAIN;
  1399. break;
  1400. }
  1401. spin_unlock(chip->mutex);
  1402. }
  1403. /* Unlock the chips again */
  1404. if (ret) {
  1405. for (i--; i >=0; i--) {
  1406. chip = &cfi->chips[i];
  1407. spin_lock(chip->mutex);
  1408. if (chip->state == FL_PM_SUSPENDED) {
  1409. chip->state = chip->oldstate;
  1410. wake_up(&chip->wq);
  1411. }
  1412. spin_unlock(chip->mutex);
  1413. }
  1414. }
  1415. return ret;
  1416. }
  1417. static void cfi_amdstd_resume(struct mtd_info *mtd)
  1418. {
  1419. struct map_info *map = mtd->priv;
  1420. struct cfi_private *cfi = map->fldrv_priv;
  1421. int i;
  1422. struct flchip *chip;
  1423. for (i=0; i<cfi->numchips; i++) {
  1424. chip = &cfi->chips[i];
  1425. spin_lock(chip->mutex);
  1426. if (chip->state == FL_PM_SUSPENDED) {
  1427. chip->state = FL_READY;
  1428. map_write(map, CMD(0xF0), chip->start);
  1429. wake_up(&chip->wq);
  1430. }
  1431. else
  1432. printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
  1433. spin_unlock(chip->mutex);
  1434. }
  1435. }
  1436. static void cfi_amdstd_destroy(struct mtd_info *mtd)
  1437. {
  1438. struct map_info *map = mtd->priv;
  1439. struct cfi_private *cfi = map->fldrv_priv;
  1440. kfree(cfi->cmdset_priv);
  1441. kfree(cfi->cfiq);
  1442. kfree(cfi);
  1443. kfree(mtd->eraseregions);
  1444. }
  1445. static char im_name[]="cfi_cmdset_0002";
  1446. static int __init cfi_amdstd_init(void)
  1447. {
  1448. inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
  1449. return 0;
  1450. }
  1451. static void __exit cfi_amdstd_exit(void)
  1452. {
  1453. inter_module_unregister(im_name);
  1454. }
  1455. module_init(cfi_amdstd_init);
  1456. module_exit(cfi_amdstd_exit);
  1457. MODULE_LICENSE("GPL");
  1458. MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
  1459. MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");