cfi_cmdset_0002.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550
  1. /*
  2. * Common Flash Interface support:
  3. * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
  4. *
  5. * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
  6. * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
  7. *
  8. * 2_by_8 routines added by Simon Munton
  9. *
  10. * 4_by_16 work by Carolyn J. Smith
  11. *
  12. * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
  13. *
  14. * This code is GPL
  15. *
  16. * $Id: cfi_cmdset_0002.c,v 1.115 2005/05/20 03:28:23 eric Exp $
  17. *
  18. */
  19. #include <linux/config.h>
  20. #include <linux/module.h>
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/init.h>
  25. #include <asm/io.h>
  26. #include <asm/byteorder.h>
  27. #include <linux/errno.h>
  28. #include <linux/slab.h>
  29. #include <linux/delay.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/mtd/compatmac.h>
  32. #include <linux/mtd/map.h>
  33. #include <linux/mtd/mtd.h>
  34. #include <linux/mtd/cfi.h>
  35. #define AMD_BOOTLOC_BUG
  36. #define FORCE_WORD_WRITE 0
  37. #define MAX_WORD_RETRIES 3
  38. #define MANUFACTURER_AMD 0x0001
  39. #define MANUFACTURER_SST 0x00BF
  40. #define SST49LF004B 0x0060
  41. #define SST49LF008A 0x005a
  42. static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  43. static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  44. static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  45. static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
  46. static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
  47. static void cfi_amdstd_sync (struct mtd_info *);
  48. static int cfi_amdstd_suspend (struct mtd_info *);
  49. static void cfi_amdstd_resume (struct mtd_info *);
  50. static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  51. static void cfi_amdstd_destroy(struct mtd_info *);
  52. struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
  53. static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
  54. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  55. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  56. #include "fwh_lock.h"
  57. static struct mtd_chip_driver cfi_amdstd_chipdrv = {
  58. .probe = NULL, /* Not usable directly */
  59. .destroy = cfi_amdstd_destroy,
  60. .name = "cfi_cmdset_0002",
  61. .module = THIS_MODULE
  62. };
  63. /* #define DEBUG_CFI_FEATURES */
  64. #ifdef DEBUG_CFI_FEATURES
  65. static void cfi_tell_features(struct cfi_pri_amdstd *extp)
  66. {
  67. const char* erase_suspend[3] = {
  68. "Not supported", "Read only", "Read/write"
  69. };
  70. const char* top_bottom[6] = {
  71. "No WP", "8x8KiB sectors at top & bottom, no WP",
  72. "Bottom boot", "Top boot",
  73. "Uniform, Bottom WP", "Uniform, Top WP"
  74. };
  75. printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
  76. printk(" Address sensitive unlock: %s\n",
  77. (extp->SiliconRevision & 1) ? "Not required" : "Required");
  78. if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
  79. printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
  80. else
  81. printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
  82. if (extp->BlkProt == 0)
  83. printk(" Block protection: Not supported\n");
  84. else
  85. printk(" Block protection: %d sectors per group\n", extp->BlkProt);
  86. printk(" Temporary block unprotect: %s\n",
  87. extp->TmpBlkUnprotect ? "Supported" : "Not supported");
  88. printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
  89. printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
  90. printk(" Burst mode: %s\n",
  91. extp->BurstMode ? "Supported" : "Not supported");
  92. if (extp->PageMode == 0)
  93. printk(" Page mode: Not supported\n");
  94. else
  95. printk(" Page mode: %d word page\n", extp->PageMode << 2);
  96. printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
  97. extp->VppMin >> 4, extp->VppMin & 0xf);
  98. printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
  99. extp->VppMax >> 4, extp->VppMax & 0xf);
  100. if (extp->TopBottom < ARRAY_SIZE(top_bottom))
  101. printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
  102. else
  103. printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
  104. }
  105. #endif
  106. #ifdef AMD_BOOTLOC_BUG
  107. /* Wheee. Bring me the head of someone at AMD. */
  108. static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
  109. {
  110. struct map_info *map = mtd->priv;
  111. struct cfi_private *cfi = map->fldrv_priv;
  112. struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
  113. __u8 major = extp->MajorVersion;
  114. __u8 minor = extp->MinorVersion;
  115. if (((major << 8) | minor) < 0x3131) {
  116. /* CFI version 1.0 => don't trust bootloc */
  117. if (cfi->id & 0x80) {
  118. printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
  119. extp->TopBottom = 3; /* top boot */
  120. } else {
  121. extp->TopBottom = 2; /* bottom boot */
  122. }
  123. }
  124. }
  125. #endif
  126. static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
  127. {
  128. struct map_info *map = mtd->priv;
  129. struct cfi_private *cfi = map->fldrv_priv;
  130. if (cfi->cfiq->BufWriteTimeoutTyp) {
  131. DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
  132. mtd->write = cfi_amdstd_write_buffers;
  133. }
  134. }
  135. static void fixup_use_secsi(struct mtd_info *mtd, void *param)
  136. {
  137. /* Setup for chips with a secsi area */
  138. mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
  139. mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
  140. }
  141. static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
  142. {
  143. struct map_info *map = mtd->priv;
  144. struct cfi_private *cfi = map->fldrv_priv;
  145. if ((cfi->cfiq->NumEraseRegions == 1) &&
  146. ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
  147. mtd->erase = cfi_amdstd_erase_chip;
  148. }
  149. }
  150. static struct cfi_fixup cfi_fixup_table[] = {
  151. #ifdef AMD_BOOTLOC_BUG
  152. { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
  153. #endif
  154. { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
  155. { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
  156. { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
  157. { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
  158. { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
  159. { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
  160. #if !FORCE_WORD_WRITE
  161. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
  162. #endif
  163. { 0, 0, NULL, NULL }
  164. };
  165. static struct cfi_fixup jedec_fixup_table[] = {
  166. { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
  167. { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
  168. { 0, 0, NULL, NULL }
  169. };
  170. static struct cfi_fixup fixup_table[] = {
  171. /* The CFI vendor ids and the JEDEC vendor IDs appear
  172. * to be common. It is like the devices id's are as
  173. * well. This table is to pick all cases where
  174. * we know that is the case.
  175. */
  176. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
  177. { 0, 0, NULL, NULL }
  178. };
  179. struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
  180. {
  181. struct cfi_private *cfi = map->fldrv_priv;
  182. struct mtd_info *mtd;
  183. int i;
  184. mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
  185. if (!mtd) {
  186. printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
  187. return NULL;
  188. }
  189. memset(mtd, 0, sizeof(*mtd));
  190. mtd->priv = map;
  191. mtd->type = MTD_NORFLASH;
  192. /* Fill in the default mtd operations */
  193. mtd->erase = cfi_amdstd_erase_varsize;
  194. mtd->write = cfi_amdstd_write_words;
  195. mtd->read = cfi_amdstd_read;
  196. mtd->sync = cfi_amdstd_sync;
  197. mtd->suspend = cfi_amdstd_suspend;
  198. mtd->resume = cfi_amdstd_resume;
  199. mtd->flags = MTD_CAP_NORFLASH;
  200. mtd->name = map->name;
  201. if (cfi->cfi_mode==CFI_MODE_CFI){
  202. unsigned char bootloc;
  203. /*
  204. * It's a real CFI chip, not one for which the probe
  205. * routine faked a CFI structure. So we read the feature
  206. * table from it.
  207. */
  208. __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
  209. struct cfi_pri_amdstd *extp;
  210. extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
  211. if (!extp) {
  212. kfree(mtd);
  213. return NULL;
  214. }
  215. /* Install our own private info structure */
  216. cfi->cmdset_priv = extp;
  217. /* Apply cfi device specific fixups */
  218. cfi_fixup(mtd, cfi_fixup_table);
  219. #ifdef DEBUG_CFI_FEATURES
  220. /* Tell the user about it in lots of lovely detail */
  221. cfi_tell_features(extp);
  222. #endif
  223. bootloc = extp->TopBottom;
  224. if ((bootloc != 2) && (bootloc != 3)) {
  225. printk(KERN_WARNING "%s: CFI does not contain boot "
  226. "bank location. Assuming top.\n", map->name);
  227. bootloc = 2;
  228. }
  229. if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
  230. printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
  231. for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
  232. int j = (cfi->cfiq->NumEraseRegions-1)-i;
  233. __u32 swap;
  234. swap = cfi->cfiq->EraseRegionInfo[i];
  235. cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
  236. cfi->cfiq->EraseRegionInfo[j] = swap;
  237. }
  238. }
  239. /* Set the default CFI lock/unlock addresses */
  240. cfi->addr_unlock1 = 0x555;
  241. cfi->addr_unlock2 = 0x2aa;
  242. /* Modify the unlock address if we are in compatibility mode */
  243. if ( /* x16 in x8 mode */
  244. ((cfi->device_type == CFI_DEVICETYPE_X8) &&
  245. (cfi->cfiq->InterfaceDesc == 2)) ||
  246. /* x32 in x16 mode */
  247. ((cfi->device_type == CFI_DEVICETYPE_X16) &&
  248. (cfi->cfiq->InterfaceDesc == 4)))
  249. {
  250. cfi->addr_unlock1 = 0xaaa;
  251. cfi->addr_unlock2 = 0x555;
  252. }
  253. } /* CFI mode */
  254. else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
  255. /* Apply jedec specific fixups */
  256. cfi_fixup(mtd, jedec_fixup_table);
  257. }
  258. /* Apply generic fixups */
  259. cfi_fixup(mtd, fixup_table);
  260. for (i=0; i< cfi->numchips; i++) {
  261. cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
  262. cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
  263. cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
  264. }
  265. map->fldrv = &cfi_amdstd_chipdrv;
  266. return cfi_amdstd_setup(mtd);
  267. }
  268. static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
  269. {
  270. struct map_info *map = mtd->priv;
  271. struct cfi_private *cfi = map->fldrv_priv;
  272. unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
  273. unsigned long offset = 0;
  274. int i,j;
  275. printk(KERN_NOTICE "number of %s chips: %d\n",
  276. (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
  277. /* Select the correct geometry setup */
  278. mtd->size = devsize * cfi->numchips;
  279. mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
  280. mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
  281. * mtd->numeraseregions, GFP_KERNEL);
  282. if (!mtd->eraseregions) {
  283. printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
  284. goto setup_err;
  285. }
  286. for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
  287. unsigned long ernum, ersize;
  288. ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
  289. ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
  290. if (mtd->erasesize < ersize) {
  291. mtd->erasesize = ersize;
  292. }
  293. for (j=0; j<cfi->numchips; j++) {
  294. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
  295. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
  296. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
  297. }
  298. offset += (ersize * ernum);
  299. }
  300. if (offset != devsize) {
  301. /* Argh */
  302. printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
  303. goto setup_err;
  304. }
  305. #if 0
  306. // debug
  307. for (i=0; i<mtd->numeraseregions;i++){
  308. printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
  309. i,mtd->eraseregions[i].offset,
  310. mtd->eraseregions[i].erasesize,
  311. mtd->eraseregions[i].numblocks);
  312. }
  313. #endif
  314. /* FIXME: erase-suspend-program is broken. See
  315. http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
  316. printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
  317. __module_get(THIS_MODULE);
  318. return mtd;
  319. setup_err:
  320. if(mtd) {
  321. if(mtd->eraseregions)
  322. kfree(mtd->eraseregions);
  323. kfree(mtd);
  324. }
  325. kfree(cfi->cmdset_priv);
  326. kfree(cfi->cfiq);
  327. return NULL;
  328. }
  329. /*
  330. * Return true if the chip is ready.
  331. *
  332. * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
  333. * non-suspended sector) and is indicated by no toggle bits toggling.
  334. *
  335. * Note that anything more complicated than checking if no bits are toggling
  336. * (including checking DQ5 for an error status) is tricky to get working
  337. * correctly and is therefore not done (particulary with interleaved chips
  338. * as each chip must be checked independantly of the others).
  339. */
  340. static int chip_ready(struct map_info *map, unsigned long addr)
  341. {
  342. map_word d, t;
  343. d = map_read(map, addr);
  344. t = map_read(map, addr);
  345. return map_word_equal(map, d, t);
  346. }
  347. /*
  348. * Return true if the chip is ready and has the correct value.
  349. *
  350. * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
  351. * non-suspended sector) and it is indicated by no bits toggling.
  352. *
  353. * Error are indicated by toggling bits or bits held with the wrong value,
  354. * or with bits toggling.
  355. *
  356. * Note that anything more complicated than checking if no bits are toggling
  357. * (including checking DQ5 for an error status) is tricky to get working
  358. * correctly and is therefore not done (particulary with interleaved chips
  359. * as each chip must be checked independantly of the others).
  360. *
  361. */
  362. static int chip_good(struct map_info *map, unsigned long addr, map_word expected)
  363. {
  364. map_word oldd, curd;
  365. oldd = map_read(map, addr);
  366. curd = map_read(map, addr);
  367. return map_word_equal(map, oldd, curd) &&
  368. map_word_equal(map, curd, expected);
  369. }
  370. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
  371. {
  372. DECLARE_WAITQUEUE(wait, current);
  373. struct cfi_private *cfi = map->fldrv_priv;
  374. unsigned long timeo;
  375. struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
  376. resettime:
  377. timeo = jiffies + HZ;
  378. retry:
  379. switch (chip->state) {
  380. case FL_STATUS:
  381. for (;;) {
  382. if (chip_ready(map, adr))
  383. break;
  384. if (time_after(jiffies, timeo)) {
  385. printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
  386. cfi_spin_unlock(chip->mutex);
  387. return -EIO;
  388. }
  389. cfi_spin_unlock(chip->mutex);
  390. cfi_udelay(1);
  391. cfi_spin_lock(chip->mutex);
  392. /* Someone else might have been playing with it. */
  393. goto retry;
  394. }
  395. case FL_READY:
  396. case FL_CFI_QUERY:
  397. case FL_JEDEC_QUERY:
  398. return 0;
  399. case FL_ERASING:
  400. if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
  401. goto sleep;
  402. if (!(mode == FL_READY || mode == FL_POINT
  403. || !cfip
  404. || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
  405. || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
  406. goto sleep;
  407. /* We could check to see if we're trying to access the sector
  408. * that is currently being erased. However, no user will try
  409. * anything like that so we just wait for the timeout. */
  410. /* Erase suspend */
  411. /* It's harmless to issue the Erase-Suspend and Erase-Resume
  412. * commands when the erase algorithm isn't in progress. */
  413. map_write(map, CMD(0xB0), chip->in_progress_block_addr);
  414. chip->oldstate = FL_ERASING;
  415. chip->state = FL_ERASE_SUSPENDING;
  416. chip->erase_suspended = 1;
  417. for (;;) {
  418. if (chip_ready(map, adr))
  419. break;
  420. if (time_after(jiffies, timeo)) {
  421. /* Should have suspended the erase by now.
  422. * Send an Erase-Resume command as either
  423. * there was an error (so leave the erase
  424. * routine to recover from it) or we trying to
  425. * use the erase-in-progress sector. */
  426. map_write(map, CMD(0x30), chip->in_progress_block_addr);
  427. chip->state = FL_ERASING;
  428. chip->oldstate = FL_READY;
  429. printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
  430. return -EIO;
  431. }
  432. cfi_spin_unlock(chip->mutex);
  433. cfi_udelay(1);
  434. cfi_spin_lock(chip->mutex);
  435. /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
  436. So we can just loop here. */
  437. }
  438. chip->state = FL_READY;
  439. return 0;
  440. case FL_POINT:
  441. /* Only if there's no operation suspended... */
  442. if (mode == FL_READY && chip->oldstate == FL_READY)
  443. return 0;
  444. default:
  445. sleep:
  446. set_current_state(TASK_UNINTERRUPTIBLE);
  447. add_wait_queue(&chip->wq, &wait);
  448. cfi_spin_unlock(chip->mutex);
  449. schedule();
  450. remove_wait_queue(&chip->wq, &wait);
  451. cfi_spin_lock(chip->mutex);
  452. goto resettime;
  453. }
  454. }
  455. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
  456. {
  457. struct cfi_private *cfi = map->fldrv_priv;
  458. switch(chip->oldstate) {
  459. case FL_ERASING:
  460. chip->state = chip->oldstate;
  461. map_write(map, CMD(0x30), chip->in_progress_block_addr);
  462. chip->oldstate = FL_READY;
  463. chip->state = FL_ERASING;
  464. break;
  465. case FL_READY:
  466. case FL_STATUS:
  467. /* We should really make set_vpp() count, rather than doing this */
  468. DISABLE_VPP(map);
  469. break;
  470. default:
  471. printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
  472. }
  473. wake_up(&chip->wq);
  474. }
  475. static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  476. {
  477. unsigned long cmd_addr;
  478. struct cfi_private *cfi = map->fldrv_priv;
  479. int ret;
  480. adr += chip->start;
  481. /* Ensure cmd read/writes are aligned. */
  482. cmd_addr = adr & ~(map_bankwidth(map)-1);
  483. cfi_spin_lock(chip->mutex);
  484. ret = get_chip(map, chip, cmd_addr, FL_READY);
  485. if (ret) {
  486. cfi_spin_unlock(chip->mutex);
  487. return ret;
  488. }
  489. if (chip->state != FL_POINT && chip->state != FL_READY) {
  490. map_write(map, CMD(0xf0), cmd_addr);
  491. chip->state = FL_READY;
  492. }
  493. map_copy_from(map, buf, adr, len);
  494. put_chip(map, chip, cmd_addr);
  495. cfi_spin_unlock(chip->mutex);
  496. return 0;
  497. }
  498. static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  499. {
  500. struct map_info *map = mtd->priv;
  501. struct cfi_private *cfi = map->fldrv_priv;
  502. unsigned long ofs;
  503. int chipnum;
  504. int ret = 0;
  505. /* ofs: offset within the first chip that the first read should start */
  506. chipnum = (from >> cfi->chipshift);
  507. ofs = from - (chipnum << cfi->chipshift);
  508. *retlen = 0;
  509. while (len) {
  510. unsigned long thislen;
  511. if (chipnum >= cfi->numchips)
  512. break;
  513. if ((len + ofs -1) >> cfi->chipshift)
  514. thislen = (1<<cfi->chipshift) - ofs;
  515. else
  516. thislen = len;
  517. ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  518. if (ret)
  519. break;
  520. *retlen += thislen;
  521. len -= thislen;
  522. buf += thislen;
  523. ofs = 0;
  524. chipnum++;
  525. }
  526. return ret;
  527. }
  528. static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  529. {
  530. DECLARE_WAITQUEUE(wait, current);
  531. unsigned long timeo = jiffies + HZ;
  532. struct cfi_private *cfi = map->fldrv_priv;
  533. retry:
  534. cfi_spin_lock(chip->mutex);
  535. if (chip->state != FL_READY){
  536. #if 0
  537. printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
  538. #endif
  539. set_current_state(TASK_UNINTERRUPTIBLE);
  540. add_wait_queue(&chip->wq, &wait);
  541. cfi_spin_unlock(chip->mutex);
  542. schedule();
  543. remove_wait_queue(&chip->wq, &wait);
  544. #if 0
  545. if(signal_pending(current))
  546. return -EINTR;
  547. #endif
  548. timeo = jiffies + HZ;
  549. goto retry;
  550. }
  551. adr += chip->start;
  552. chip->state = FL_READY;
  553. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  554. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  555. cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  556. map_copy_from(map, buf, adr, len);
  557. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  558. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  559. cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  560. cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  561. wake_up(&chip->wq);
  562. cfi_spin_unlock(chip->mutex);
  563. return 0;
  564. }
  565. static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  566. {
  567. struct map_info *map = mtd->priv;
  568. struct cfi_private *cfi = map->fldrv_priv;
  569. unsigned long ofs;
  570. int chipnum;
  571. int ret = 0;
  572. /* ofs: offset within the first chip that the first read should start */
  573. /* 8 secsi bytes per chip */
  574. chipnum=from>>3;
  575. ofs=from & 7;
  576. *retlen = 0;
  577. while (len) {
  578. unsigned long thislen;
  579. if (chipnum >= cfi->numchips)
  580. break;
  581. if ((len + ofs -1) >> 3)
  582. thislen = (1<<3) - ofs;
  583. else
  584. thislen = len;
  585. ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  586. if (ret)
  587. break;
  588. *retlen += thislen;
  589. len -= thislen;
  590. buf += thislen;
  591. ofs = 0;
  592. chipnum++;
  593. }
  594. return ret;
  595. }
  596. static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
  597. {
  598. struct cfi_private *cfi = map->fldrv_priv;
  599. unsigned long timeo = jiffies + HZ;
  600. /*
  601. * We use a 1ms + 1 jiffies generic timeout for writes (most devices
  602. * have a max write time of a few hundreds usec). However, we should
  603. * use the maximum timeout value given by the chip at probe time
  604. * instead. Unfortunately, struct flchip does have a field for
  605. * maximum timeout, only for typical which can be far too short
  606. * depending of the conditions. The ' + 1' is to avoid having a
  607. * timeout of 0 jiffies if HZ is smaller than 1000.
  608. */
  609. unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
  610. int ret = 0;
  611. map_word oldd;
  612. int retry_cnt = 0;
  613. adr += chip->start;
  614. cfi_spin_lock(chip->mutex);
  615. ret = get_chip(map, chip, adr, FL_WRITING);
  616. if (ret) {
  617. cfi_spin_unlock(chip->mutex);
  618. return ret;
  619. }
  620. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
  621. __func__, adr, datum.x[0] );
  622. /*
  623. * Check for a NOP for the case when the datum to write is already
  624. * present - it saves time and works around buggy chips that corrupt
  625. * data at other locations when 0xff is written to a location that
  626. * already contains 0xff.
  627. */
  628. oldd = map_read(map, adr);
  629. if (map_word_equal(map, oldd, datum)) {
  630. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
  631. __func__);
  632. goto op_done;
  633. }
  634. ENABLE_VPP(map);
  635. retry:
  636. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  637. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  638. cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  639. map_write(map, datum, adr);
  640. chip->state = FL_WRITING;
  641. cfi_spin_unlock(chip->mutex);
  642. cfi_udelay(chip->word_write_time);
  643. cfi_spin_lock(chip->mutex);
  644. /* See comment above for timeout value. */
  645. timeo = jiffies + uWriteTimeout;
  646. for (;;) {
  647. if (chip->state != FL_WRITING) {
  648. /* Someone's suspended the write. Sleep */
  649. DECLARE_WAITQUEUE(wait, current);
  650. set_current_state(TASK_UNINTERRUPTIBLE);
  651. add_wait_queue(&chip->wq, &wait);
  652. cfi_spin_unlock(chip->mutex);
  653. schedule();
  654. remove_wait_queue(&chip->wq, &wait);
  655. timeo = jiffies + (HZ / 2); /* FIXME */
  656. cfi_spin_lock(chip->mutex);
  657. continue;
  658. }
  659. if (chip_ready(map, adr))
  660. break;
  661. if (time_after(jiffies, timeo)) {
  662. printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
  663. break;
  664. }
  665. /* Latency issues. Drop the lock, wait a while and retry */
  666. cfi_spin_unlock(chip->mutex);
  667. cfi_udelay(1);
  668. cfi_spin_lock(chip->mutex);
  669. }
  670. /* Did we succeed? */
  671. if (!chip_good(map, adr, datum)) {
  672. /* reset on all failures. */
  673. map_write( map, CMD(0xF0), chip->start );
  674. /* FIXME - should have reset delay before continuing */
  675. if (++retry_cnt <= MAX_WORD_RETRIES)
  676. goto retry;
  677. ret = -EIO;
  678. }
  679. op_done:
  680. chip->state = FL_READY;
  681. put_chip(map, chip, adr);
  682. cfi_spin_unlock(chip->mutex);
  683. return ret;
  684. }
  685. static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
  686. size_t *retlen, const u_char *buf)
  687. {
  688. struct map_info *map = mtd->priv;
  689. struct cfi_private *cfi = map->fldrv_priv;
  690. int ret = 0;
  691. int chipnum;
  692. unsigned long ofs, chipstart;
  693. DECLARE_WAITQUEUE(wait, current);
  694. *retlen = 0;
  695. if (!len)
  696. return 0;
  697. chipnum = to >> cfi->chipshift;
  698. ofs = to - (chipnum << cfi->chipshift);
  699. chipstart = cfi->chips[chipnum].start;
  700. /* If it's not bus-aligned, do the first byte write */
  701. if (ofs & (map_bankwidth(map)-1)) {
  702. unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
  703. int i = ofs - bus_ofs;
  704. int n = 0;
  705. map_word tmp_buf;
  706. retry:
  707. cfi_spin_lock(cfi->chips[chipnum].mutex);
  708. if (cfi->chips[chipnum].state != FL_READY) {
  709. #if 0
  710. printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
  711. #endif
  712. set_current_state(TASK_UNINTERRUPTIBLE);
  713. add_wait_queue(&cfi->chips[chipnum].wq, &wait);
  714. cfi_spin_unlock(cfi->chips[chipnum].mutex);
  715. schedule();
  716. remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
  717. #if 0
  718. if(signal_pending(current))
  719. return -EINTR;
  720. #endif
  721. goto retry;
  722. }
  723. /* Load 'tmp_buf' with old contents of flash */
  724. tmp_buf = map_read(map, bus_ofs+chipstart);
  725. cfi_spin_unlock(cfi->chips[chipnum].mutex);
  726. /* Number of bytes to copy from buffer */
  727. n = min_t(int, len, map_bankwidth(map)-i);
  728. tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
  729. ret = do_write_oneword(map, &cfi->chips[chipnum],
  730. bus_ofs, tmp_buf);
  731. if (ret)
  732. return ret;
  733. ofs += n;
  734. buf += n;
  735. (*retlen) += n;
  736. len -= n;
  737. if (ofs >> cfi->chipshift) {
  738. chipnum ++;
  739. ofs = 0;
  740. if (chipnum == cfi->numchips)
  741. return 0;
  742. }
  743. }
  744. /* We are now aligned, write as much as possible */
  745. while(len >= map_bankwidth(map)) {
  746. map_word datum;
  747. datum = map_word_load(map, buf);
  748. ret = do_write_oneword(map, &cfi->chips[chipnum],
  749. ofs, datum);
  750. if (ret)
  751. return ret;
  752. ofs += map_bankwidth(map);
  753. buf += map_bankwidth(map);
  754. (*retlen) += map_bankwidth(map);
  755. len -= map_bankwidth(map);
  756. if (ofs >> cfi->chipshift) {
  757. chipnum ++;
  758. ofs = 0;
  759. if (chipnum == cfi->numchips)
  760. return 0;
  761. chipstart = cfi->chips[chipnum].start;
  762. }
  763. }
  764. /* Write the trailing bytes if any */
  765. if (len & (map_bankwidth(map)-1)) {
  766. map_word tmp_buf;
  767. retry1:
  768. cfi_spin_lock(cfi->chips[chipnum].mutex);
  769. if (cfi->chips[chipnum].state != FL_READY) {
  770. #if 0
  771. printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
  772. #endif
  773. set_current_state(TASK_UNINTERRUPTIBLE);
  774. add_wait_queue(&cfi->chips[chipnum].wq, &wait);
  775. cfi_spin_unlock(cfi->chips[chipnum].mutex);
  776. schedule();
  777. remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
  778. #if 0
  779. if(signal_pending(current))
  780. return -EINTR;
  781. #endif
  782. goto retry1;
  783. }
  784. tmp_buf = map_read(map, ofs + chipstart);
  785. cfi_spin_unlock(cfi->chips[chipnum].mutex);
  786. tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
  787. ret = do_write_oneword(map, &cfi->chips[chipnum],
  788. ofs, tmp_buf);
  789. if (ret)
  790. return ret;
  791. (*retlen) += len;
  792. }
  793. return 0;
  794. }
  795. /*
  796. * FIXME: interleaved mode not tested, and probably not supported!
  797. */
  798. static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
  799. unsigned long adr, const u_char *buf, int len)
  800. {
  801. struct cfi_private *cfi = map->fldrv_priv;
  802. unsigned long timeo = jiffies + HZ;
  803. /* see comments in do_write_oneword() regarding uWriteTimeo. */
  804. unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
  805. int ret = -EIO;
  806. unsigned long cmd_adr;
  807. int z, words;
  808. map_word datum;
  809. adr += chip->start;
  810. cmd_adr = adr;
  811. cfi_spin_lock(chip->mutex);
  812. ret = get_chip(map, chip, adr, FL_WRITING);
  813. if (ret) {
  814. cfi_spin_unlock(chip->mutex);
  815. return ret;
  816. }
  817. datum = map_word_load(map, buf);
  818. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
  819. __func__, adr, datum.x[0] );
  820. ENABLE_VPP(map);
  821. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  822. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  823. //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  824. /* Write Buffer Load */
  825. map_write(map, CMD(0x25), cmd_adr);
  826. chip->state = FL_WRITING_TO_BUFFER;
  827. /* Write length of data to come */
  828. words = len / map_bankwidth(map);
  829. map_write(map, CMD(words - 1), cmd_adr);
  830. /* Write data */
  831. z = 0;
  832. while(z < words * map_bankwidth(map)) {
  833. datum = map_word_load(map, buf);
  834. map_write(map, datum, adr + z);
  835. z += map_bankwidth(map);
  836. buf += map_bankwidth(map);
  837. }
  838. z -= map_bankwidth(map);
  839. adr += z;
  840. /* Write Buffer Program Confirm: GO GO GO */
  841. map_write(map, CMD(0x29), cmd_adr);
  842. chip->state = FL_WRITING;
  843. cfi_spin_unlock(chip->mutex);
  844. cfi_udelay(chip->buffer_write_time);
  845. cfi_spin_lock(chip->mutex);
  846. timeo = jiffies + uWriteTimeout;
  847. for (;;) {
  848. if (chip->state != FL_WRITING) {
  849. /* Someone's suspended the write. Sleep */
  850. DECLARE_WAITQUEUE(wait, current);
  851. set_current_state(TASK_UNINTERRUPTIBLE);
  852. add_wait_queue(&chip->wq, &wait);
  853. cfi_spin_unlock(chip->mutex);
  854. schedule();
  855. remove_wait_queue(&chip->wq, &wait);
  856. timeo = jiffies + (HZ / 2); /* FIXME */
  857. cfi_spin_lock(chip->mutex);
  858. continue;
  859. }
  860. if (chip_ready(map, adr))
  861. goto op_done;
  862. if( time_after(jiffies, timeo))
  863. break;
  864. /* Latency issues. Drop the lock, wait a while and retry */
  865. cfi_spin_unlock(chip->mutex);
  866. cfi_udelay(1);
  867. cfi_spin_lock(chip->mutex);
  868. }
  869. printk(KERN_WARNING "MTD %s(): software timeout\n",
  870. __func__ );
  871. /* reset on all failures. */
  872. map_write( map, CMD(0xF0), chip->start );
  873. /* FIXME - should have reset delay before continuing */
  874. ret = -EIO;
  875. op_done:
  876. chip->state = FL_READY;
  877. put_chip(map, chip, adr);
  878. cfi_spin_unlock(chip->mutex);
  879. return ret;
  880. }
  881. static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
  882. size_t *retlen, const u_char *buf)
  883. {
  884. struct map_info *map = mtd->priv;
  885. struct cfi_private *cfi = map->fldrv_priv;
  886. int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  887. int ret = 0;
  888. int chipnum;
  889. unsigned long ofs;
  890. *retlen = 0;
  891. if (!len)
  892. return 0;
  893. chipnum = to >> cfi->chipshift;
  894. ofs = to - (chipnum << cfi->chipshift);
  895. /* If it's not bus-aligned, do the first word write */
  896. if (ofs & (map_bankwidth(map)-1)) {
  897. size_t local_len = (-ofs)&(map_bankwidth(map)-1);
  898. if (local_len > len)
  899. local_len = len;
  900. ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
  901. local_len, retlen, buf);
  902. if (ret)
  903. return ret;
  904. ofs += local_len;
  905. buf += local_len;
  906. len -= local_len;
  907. if (ofs >> cfi->chipshift) {
  908. chipnum ++;
  909. ofs = 0;
  910. if (chipnum == cfi->numchips)
  911. return 0;
  912. }
  913. }
  914. /* Write buffer is worth it only if more than one word to write... */
  915. while (len >= map_bankwidth(map) * 2) {
  916. /* We must not cross write block boundaries */
  917. int size = wbufsize - (ofs & (wbufsize-1));
  918. if (size > len)
  919. size = len;
  920. if (size % map_bankwidth(map))
  921. size -= size % map_bankwidth(map);
  922. ret = do_write_buffer(map, &cfi->chips[chipnum],
  923. ofs, buf, size);
  924. if (ret)
  925. return ret;
  926. ofs += size;
  927. buf += size;
  928. (*retlen) += size;
  929. len -= size;
  930. if (ofs >> cfi->chipshift) {
  931. chipnum ++;
  932. ofs = 0;
  933. if (chipnum == cfi->numchips)
  934. return 0;
  935. }
  936. }
  937. if (len) {
  938. size_t retlen_dregs = 0;
  939. ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
  940. len, &retlen_dregs, buf);
  941. *retlen += retlen_dregs;
  942. return ret;
  943. }
  944. return 0;
  945. }
  946. /*
  947. * Handle devices with one erase region, that only implement
  948. * the chip erase command.
  949. */
  950. static inline int do_erase_chip(struct map_info *map, struct flchip *chip)
  951. {
  952. struct cfi_private *cfi = map->fldrv_priv;
  953. unsigned long timeo = jiffies + HZ;
  954. unsigned long int adr;
  955. DECLARE_WAITQUEUE(wait, current);
  956. int ret = 0;
  957. adr = cfi->addr_unlock1;
  958. cfi_spin_lock(chip->mutex);
  959. ret = get_chip(map, chip, adr, FL_WRITING);
  960. if (ret) {
  961. cfi_spin_unlock(chip->mutex);
  962. return ret;
  963. }
  964. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
  965. __func__, chip->start );
  966. ENABLE_VPP(map);
  967. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  968. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  969. cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  970. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  971. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  972. cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  973. chip->state = FL_ERASING;
  974. chip->erase_suspended = 0;
  975. chip->in_progress_block_addr = adr;
  976. cfi_spin_unlock(chip->mutex);
  977. msleep(chip->erase_time/2);
  978. cfi_spin_lock(chip->mutex);
  979. timeo = jiffies + (HZ*20);
  980. for (;;) {
  981. if (chip->state != FL_ERASING) {
  982. /* Someone's suspended the erase. Sleep */
  983. set_current_state(TASK_UNINTERRUPTIBLE);
  984. add_wait_queue(&chip->wq, &wait);
  985. cfi_spin_unlock(chip->mutex);
  986. schedule();
  987. remove_wait_queue(&chip->wq, &wait);
  988. cfi_spin_lock(chip->mutex);
  989. continue;
  990. }
  991. if (chip->erase_suspended) {
  992. /* This erase was suspended and resumed.
  993. Adjust the timeout */
  994. timeo = jiffies + (HZ*20); /* FIXME */
  995. chip->erase_suspended = 0;
  996. }
  997. if (chip_ready(map, adr))
  998. break;
  999. if (time_after(jiffies, timeo)) {
  1000. printk(KERN_WARNING "MTD %s(): software timeout\n",
  1001. __func__ );
  1002. break;
  1003. }
  1004. /* Latency issues. Drop the lock, wait a while and retry */
  1005. cfi_spin_unlock(chip->mutex);
  1006. set_current_state(TASK_UNINTERRUPTIBLE);
  1007. schedule_timeout(1);
  1008. cfi_spin_lock(chip->mutex);
  1009. }
  1010. /* Did we succeed? */
  1011. if (!chip_good(map, adr, map_word_ff(map))) {
  1012. /* reset on all failures. */
  1013. map_write( map, CMD(0xF0), chip->start );
  1014. /* FIXME - should have reset delay before continuing */
  1015. ret = -EIO;
  1016. }
  1017. chip->state = FL_READY;
  1018. put_chip(map, chip, adr);
  1019. cfi_spin_unlock(chip->mutex);
  1020. return ret;
  1021. }
  1022. static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
  1023. {
  1024. struct cfi_private *cfi = map->fldrv_priv;
  1025. unsigned long timeo = jiffies + HZ;
  1026. DECLARE_WAITQUEUE(wait, current);
  1027. int ret = 0;
  1028. adr += chip->start;
  1029. cfi_spin_lock(chip->mutex);
  1030. ret = get_chip(map, chip, adr, FL_ERASING);
  1031. if (ret) {
  1032. cfi_spin_unlock(chip->mutex);
  1033. return ret;
  1034. }
  1035. DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
  1036. __func__, adr );
  1037. ENABLE_VPP(map);
  1038. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1039. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1040. cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1041. cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
  1042. cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
  1043. map_write(map, CMD(0x30), adr);
  1044. chip->state = FL_ERASING;
  1045. chip->erase_suspended = 0;
  1046. chip->in_progress_block_addr = adr;
  1047. cfi_spin_unlock(chip->mutex);
  1048. msleep(chip->erase_time/2);
  1049. cfi_spin_lock(chip->mutex);
  1050. timeo = jiffies + (HZ*20);
  1051. for (;;) {
  1052. if (chip->state != FL_ERASING) {
  1053. /* Someone's suspended the erase. Sleep */
  1054. set_current_state(TASK_UNINTERRUPTIBLE);
  1055. add_wait_queue(&chip->wq, &wait);
  1056. cfi_spin_unlock(chip->mutex);
  1057. schedule();
  1058. remove_wait_queue(&chip->wq, &wait);
  1059. cfi_spin_lock(chip->mutex);
  1060. continue;
  1061. }
  1062. if (chip->erase_suspended) {
  1063. /* This erase was suspended and resumed.
  1064. Adjust the timeout */
  1065. timeo = jiffies + (HZ*20); /* FIXME */
  1066. chip->erase_suspended = 0;
  1067. }
  1068. if (chip_ready(map, adr))
  1069. break;
  1070. if (time_after(jiffies, timeo)) {
  1071. printk(KERN_WARNING "MTD %s(): software timeout\n",
  1072. __func__ );
  1073. break;
  1074. }
  1075. /* Latency issues. Drop the lock, wait a while and retry */
  1076. cfi_spin_unlock(chip->mutex);
  1077. set_current_state(TASK_UNINTERRUPTIBLE);
  1078. schedule_timeout(1);
  1079. cfi_spin_lock(chip->mutex);
  1080. }
  1081. /* Did we succeed? */
  1082. if (chip_good(map, adr, map_word_ff(map))) {
  1083. /* reset on all failures. */
  1084. map_write( map, CMD(0xF0), chip->start );
  1085. /* FIXME - should have reset delay before continuing */
  1086. ret = -EIO;
  1087. }
  1088. chip->state = FL_READY;
  1089. put_chip(map, chip, adr);
  1090. cfi_spin_unlock(chip->mutex);
  1091. return ret;
  1092. }
  1093. int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
  1094. {
  1095. unsigned long ofs, len;
  1096. int ret;
  1097. ofs = instr->addr;
  1098. len = instr->len;
  1099. ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
  1100. if (ret)
  1101. return ret;
  1102. instr->state = MTD_ERASE_DONE;
  1103. mtd_erase_callback(instr);
  1104. return 0;
  1105. }
  1106. static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
  1107. {
  1108. struct map_info *map = mtd->priv;
  1109. struct cfi_private *cfi = map->fldrv_priv;
  1110. int ret = 0;
  1111. if (instr->addr != 0)
  1112. return -EINVAL;
  1113. if (instr->len != mtd->size)
  1114. return -EINVAL;
  1115. ret = do_erase_chip(map, &cfi->chips[0]);
  1116. if (ret)
  1117. return ret;
  1118. instr->state = MTD_ERASE_DONE;
  1119. mtd_erase_callback(instr);
  1120. return 0;
  1121. }
  1122. static void cfi_amdstd_sync (struct mtd_info *mtd)
  1123. {
  1124. struct map_info *map = mtd->priv;
  1125. struct cfi_private *cfi = map->fldrv_priv;
  1126. int i;
  1127. struct flchip *chip;
  1128. int ret = 0;
  1129. DECLARE_WAITQUEUE(wait, current);
  1130. for (i=0; !ret && i<cfi->numchips; i++) {
  1131. chip = &cfi->chips[i];
  1132. retry:
  1133. cfi_spin_lock(chip->mutex);
  1134. switch(chip->state) {
  1135. case FL_READY:
  1136. case FL_STATUS:
  1137. case FL_CFI_QUERY:
  1138. case FL_JEDEC_QUERY:
  1139. chip->oldstate = chip->state;
  1140. chip->state = FL_SYNCING;
  1141. /* No need to wake_up() on this state change -
  1142. * as the whole point is that nobody can do anything
  1143. * with the chip now anyway.
  1144. */
  1145. case FL_SYNCING:
  1146. cfi_spin_unlock(chip->mutex);
  1147. break;
  1148. default:
  1149. /* Not an idle state */
  1150. add_wait_queue(&chip->wq, &wait);
  1151. cfi_spin_unlock(chip->mutex);
  1152. schedule();
  1153. remove_wait_queue(&chip->wq, &wait);
  1154. goto retry;
  1155. }
  1156. }
  1157. /* Unlock the chips again */
  1158. for (i--; i >=0; i--) {
  1159. chip = &cfi->chips[i];
  1160. cfi_spin_lock(chip->mutex);
  1161. if (chip->state == FL_SYNCING) {
  1162. chip->state = chip->oldstate;
  1163. wake_up(&chip->wq);
  1164. }
  1165. cfi_spin_unlock(chip->mutex);
  1166. }
  1167. }
  1168. static int cfi_amdstd_suspend(struct mtd_info *mtd)
  1169. {
  1170. struct map_info *map = mtd->priv;
  1171. struct cfi_private *cfi = map->fldrv_priv;
  1172. int i;
  1173. struct flchip *chip;
  1174. int ret = 0;
  1175. for (i=0; !ret && i<cfi->numchips; i++) {
  1176. chip = &cfi->chips[i];
  1177. cfi_spin_lock(chip->mutex);
  1178. switch(chip->state) {
  1179. case FL_READY:
  1180. case FL_STATUS:
  1181. case FL_CFI_QUERY:
  1182. case FL_JEDEC_QUERY:
  1183. chip->oldstate = chip->state;
  1184. chip->state = FL_PM_SUSPENDED;
  1185. /* No need to wake_up() on this state change -
  1186. * as the whole point is that nobody can do anything
  1187. * with the chip now anyway.
  1188. */
  1189. case FL_PM_SUSPENDED:
  1190. break;
  1191. default:
  1192. ret = -EAGAIN;
  1193. break;
  1194. }
  1195. cfi_spin_unlock(chip->mutex);
  1196. }
  1197. /* Unlock the chips again */
  1198. if (ret) {
  1199. for (i--; i >=0; i--) {
  1200. chip = &cfi->chips[i];
  1201. cfi_spin_lock(chip->mutex);
  1202. if (chip->state == FL_PM_SUSPENDED) {
  1203. chip->state = chip->oldstate;
  1204. wake_up(&chip->wq);
  1205. }
  1206. cfi_spin_unlock(chip->mutex);
  1207. }
  1208. }
  1209. return ret;
  1210. }
  1211. static void cfi_amdstd_resume(struct mtd_info *mtd)
  1212. {
  1213. struct map_info *map = mtd->priv;
  1214. struct cfi_private *cfi = map->fldrv_priv;
  1215. int i;
  1216. struct flchip *chip;
  1217. for (i=0; i<cfi->numchips; i++) {
  1218. chip = &cfi->chips[i];
  1219. cfi_spin_lock(chip->mutex);
  1220. if (chip->state == FL_PM_SUSPENDED) {
  1221. chip->state = FL_READY;
  1222. map_write(map, CMD(0xF0), chip->start);
  1223. wake_up(&chip->wq);
  1224. }
  1225. else
  1226. printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
  1227. cfi_spin_unlock(chip->mutex);
  1228. }
  1229. }
  1230. static void cfi_amdstd_destroy(struct mtd_info *mtd)
  1231. {
  1232. struct map_info *map = mtd->priv;
  1233. struct cfi_private *cfi = map->fldrv_priv;
  1234. kfree(cfi->cmdset_priv);
  1235. kfree(cfi->cfiq);
  1236. kfree(cfi);
  1237. kfree(mtd->eraseregions);
  1238. }
  1239. static char im_name[]="cfi_cmdset_0002";
  1240. static int __init cfi_amdstd_init(void)
  1241. {
  1242. inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
  1243. return 0;
  1244. }
  1245. static void __exit cfi_amdstd_exit(void)
  1246. {
  1247. inter_module_unregister(im_name);
  1248. }
  1249. module_init(cfi_amdstd_init);
  1250. module_exit(cfi_amdstd_exit);
  1251. MODULE_LICENSE("GPL");
  1252. MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
  1253. MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");