cfi_cmdset_0020.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. /*
  2. * Common Flash Interface support:
  3. * ST Advanced Architecture Command Set (ID 0x0020)
  4. *
  5. * (C) 2000 Red Hat. GPL'd
  6. *
  7. * 10/10/2000 Nicolas Pitre <nico@cam.org>
  8. * - completely revamped method functions so they are aware and
  9. * independent of the flash geometry (buswidth, interleave, etc.)
  10. * - scalability vs code size is completely set at compile-time
  11. * (see include/linux/mtd/cfi.h for selection)
  12. * - optimized write buffer method
  13. * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
  14. * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
  15. * (command set 0x0020)
  16. * - added a writev function
  17. * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
  18. * - Plugged memory leak in cfi_staa_writev().
  19. */
  20. #include <linux/module.h>
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/sched.h>
  24. #include <linux/init.h>
  25. #include <asm/io.h>
  26. #include <asm/byteorder.h>
  27. #include <linux/errno.h>
  28. #include <linux/slab.h>
  29. #include <linux/delay.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/mtd/map.h>
  32. #include <linux/mtd/cfi.h>
  33. #include <linux/mtd/mtd.h>
  34. #include <linux/mtd/compatmac.h>
  35. static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  36. static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  37. static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
  38. unsigned long count, loff_t to, size_t *retlen);
  39. static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
  40. static void cfi_staa_sync (struct mtd_info *);
  41. static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
  42. static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
  43. static int cfi_staa_suspend (struct mtd_info *);
  44. static void cfi_staa_resume (struct mtd_info *);
  45. static void cfi_staa_destroy(struct mtd_info *);
  46. struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
  47. static struct mtd_info *cfi_staa_setup (struct map_info *);
  48. static struct mtd_chip_driver cfi_staa_chipdrv = {
  49. .probe = NULL, /* Not usable directly */
  50. .destroy = cfi_staa_destroy,
  51. .name = "cfi_cmdset_0020",
  52. .module = THIS_MODULE
  53. };
  54. /* #define DEBUG_LOCK_BITS */
  55. //#define DEBUG_CFI_FEATURES
  56. #ifdef DEBUG_CFI_FEATURES
  57. static void cfi_tell_features(struct cfi_pri_intelext *extp)
  58. {
  59. int i;
  60. printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
  61. printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
  62. printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
  63. printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
  64. printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
  65. printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
  66. printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
  67. printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
  68. printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
  69. printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
  70. for (i=9; i<32; i++) {
  71. if (extp->FeatureSupport & (1<<i))
  72. printk(" - Unknown Bit %X: supported\n", i);
  73. }
  74. printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
  75. printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
  76. for (i=1; i<8; i++) {
  77. if (extp->SuspendCmdSupport & (1<<i))
  78. printk(" - Unknown Bit %X: supported\n", i);
  79. }
  80. printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
  81. printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
  82. printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
  83. for (i=2; i<16; i++) {
  84. if (extp->BlkStatusRegMask & (1<<i))
  85. printk(" - Unknown Bit %X Active: yes\n",i);
  86. }
  87. printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
  88. extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
  89. if (extp->VppOptimal)
  90. printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
  91. extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
  92. }
  93. #endif
  94. /* This routine is made available to other mtd code via
  95. * inter_module_register. It must only be accessed through
  96. * inter_module_get which will bump the use count of this module. The
  97. * addresses passed back in cfi are valid as long as the use count of
  98. * this module is non-zero, i.e. between inter_module_get and
  99. * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
  100. */
  101. struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
  102. {
  103. struct cfi_private *cfi = map->fldrv_priv;
  104. int i;
  105. if (cfi->cfi_mode) {
  106. /*
  107. * It's a real CFI chip, not one for which the probe
  108. * routine faked a CFI structure. So we read the feature
  109. * table from it.
  110. */
  111. __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
  112. struct cfi_pri_intelext *extp;
  113. extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
  114. if (!extp)
  115. return NULL;
  116. if (extp->MajorVersion != '1' ||
  117. (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
  118. printk(KERN_ERR " Unknown ST Microelectronics"
  119. " Extended Query version %c.%c.\n",
  120. extp->MajorVersion, extp->MinorVersion);
  121. kfree(extp);
  122. return NULL;
  123. }
  124. /* Do some byteswapping if necessary */
  125. extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
  126. extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
  127. #ifdef DEBUG_CFI_FEATURES
  128. /* Tell the user about it in lots of lovely detail */
  129. cfi_tell_features(extp);
  130. #endif
  131. /* Install our own private info structure */
  132. cfi->cmdset_priv = extp;
  133. }
  134. for (i=0; i< cfi->numchips; i++) {
  135. cfi->chips[i].word_write_time = 128;
  136. cfi->chips[i].buffer_write_time = 128;
  137. cfi->chips[i].erase_time = 1024;
  138. cfi->chips[i].ref_point_counter = 0;
  139. init_waitqueue_head(&(cfi->chips[i].wq));
  140. }
  141. return cfi_staa_setup(map);
  142. }
  143. EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
  144. static struct mtd_info *cfi_staa_setup(struct map_info *map)
  145. {
  146. struct cfi_private *cfi = map->fldrv_priv;
  147. struct mtd_info *mtd;
  148. unsigned long offset = 0;
  149. int i,j;
  150. unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
  151. mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
  152. //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
  153. if (!mtd) {
  154. printk(KERN_ERR "Failed to allocate memory for MTD device\n");
  155. kfree(cfi->cmdset_priv);
  156. return NULL;
  157. }
  158. mtd->priv = map;
  159. mtd->type = MTD_NORFLASH;
  160. mtd->size = devsize * cfi->numchips;
  161. mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
  162. mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
  163. * mtd->numeraseregions, GFP_KERNEL);
  164. if (!mtd->eraseregions) {
  165. printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
  166. kfree(cfi->cmdset_priv);
  167. kfree(mtd);
  168. return NULL;
  169. }
  170. for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
  171. unsigned long ernum, ersize;
  172. ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
  173. ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
  174. if (mtd->erasesize < ersize) {
  175. mtd->erasesize = ersize;
  176. }
  177. for (j=0; j<cfi->numchips; j++) {
  178. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
  179. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
  180. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
  181. }
  182. offset += (ersize * ernum);
  183. }
  184. if (offset != devsize) {
  185. /* Argh */
  186. printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
  187. kfree(mtd->eraseregions);
  188. kfree(cfi->cmdset_priv);
  189. kfree(mtd);
  190. return NULL;
  191. }
  192. for (i=0; i<mtd->numeraseregions;i++){
  193. printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
  194. i,mtd->eraseregions[i].offset,
  195. mtd->eraseregions[i].erasesize,
  196. mtd->eraseregions[i].numblocks);
  197. }
  198. /* Also select the correct geometry setup too */
  199. mtd->erase = cfi_staa_erase_varsize;
  200. mtd->read = cfi_staa_read;
  201. mtd->write = cfi_staa_write_buffers;
  202. mtd->writev = cfi_staa_writev;
  203. mtd->sync = cfi_staa_sync;
  204. mtd->lock = cfi_staa_lock;
  205. mtd->unlock = cfi_staa_unlock;
  206. mtd->suspend = cfi_staa_suspend;
  207. mtd->resume = cfi_staa_resume;
  208. mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
  209. mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
  210. map->fldrv = &cfi_staa_chipdrv;
  211. __module_get(THIS_MODULE);
  212. mtd->name = map->name;
  213. return mtd;
  214. }
  215. static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  216. {
  217. map_word status, status_OK;
  218. unsigned long timeo;
  219. DECLARE_WAITQUEUE(wait, current);
  220. int suspended = 0;
  221. unsigned long cmd_addr;
  222. struct cfi_private *cfi = map->fldrv_priv;
  223. adr += chip->start;
  224. /* Ensure cmd read/writes are aligned. */
  225. cmd_addr = adr & ~(map_bankwidth(map)-1);
  226. /* Let's determine this according to the interleave only once */
  227. status_OK = CMD(0x80);
  228. timeo = jiffies + HZ;
  229. retry:
  230. spin_lock_bh(chip->mutex);
  231. /* Check that the chip's ready to talk to us.
  232. * If it's in FL_ERASING state, suspend it and make it talk now.
  233. */
  234. switch (chip->state) {
  235. case FL_ERASING:
  236. if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
  237. goto sleep; /* We don't support erase suspend */
  238. map_write (map, CMD(0xb0), cmd_addr);
  239. /* If the flash has finished erasing, then 'erase suspend'
  240. * appears to make some (28F320) flash devices switch to
  241. * 'read' mode. Make sure that we switch to 'read status'
  242. * mode so we get the right data. --rmk
  243. */
  244. map_write(map, CMD(0x70), cmd_addr);
  245. chip->oldstate = FL_ERASING;
  246. chip->state = FL_ERASE_SUSPENDING;
  247. // printk("Erase suspending at 0x%lx\n", cmd_addr);
  248. for (;;) {
  249. status = map_read(map, cmd_addr);
  250. if (map_word_andequal(map, status, status_OK, status_OK))
  251. break;
  252. if (time_after(jiffies, timeo)) {
  253. /* Urgh */
  254. map_write(map, CMD(0xd0), cmd_addr);
  255. /* make sure we're in 'read status' mode */
  256. map_write(map, CMD(0x70), cmd_addr);
  257. chip->state = FL_ERASING;
  258. spin_unlock_bh(chip->mutex);
  259. printk(KERN_ERR "Chip not ready after erase "
  260. "suspended: status = 0x%lx\n", status.x[0]);
  261. return -EIO;
  262. }
  263. spin_unlock_bh(chip->mutex);
  264. cfi_udelay(1);
  265. spin_lock_bh(chip->mutex);
  266. }
  267. suspended = 1;
  268. map_write(map, CMD(0xff), cmd_addr);
  269. chip->state = FL_READY;
  270. break;
  271. #if 0
  272. case FL_WRITING:
  273. /* Not quite yet */
  274. #endif
  275. case FL_READY:
  276. break;
  277. case FL_CFI_QUERY:
  278. case FL_JEDEC_QUERY:
  279. map_write(map, CMD(0x70), cmd_addr);
  280. chip->state = FL_STATUS;
  281. case FL_STATUS:
  282. status = map_read(map, cmd_addr);
  283. if (map_word_andequal(map, status, status_OK, status_OK)) {
  284. map_write(map, CMD(0xff), cmd_addr);
  285. chip->state = FL_READY;
  286. break;
  287. }
  288. /* Urgh. Chip not yet ready to talk to us. */
  289. if (time_after(jiffies, timeo)) {
  290. spin_unlock_bh(chip->mutex);
  291. printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
  292. return -EIO;
  293. }
  294. /* Latency issues. Drop the lock, wait a while and retry */
  295. spin_unlock_bh(chip->mutex);
  296. cfi_udelay(1);
  297. goto retry;
  298. default:
  299. sleep:
  300. /* Stick ourselves on a wait queue to be woken when
  301. someone changes the status */
  302. set_current_state(TASK_UNINTERRUPTIBLE);
  303. add_wait_queue(&chip->wq, &wait);
  304. spin_unlock_bh(chip->mutex);
  305. schedule();
  306. remove_wait_queue(&chip->wq, &wait);
  307. timeo = jiffies + HZ;
  308. goto retry;
  309. }
  310. map_copy_from(map, buf, adr, len);
  311. if (suspended) {
  312. chip->state = chip->oldstate;
  313. /* What if one interleaved chip has finished and the
  314. other hasn't? The old code would leave the finished
  315. one in READY mode. That's bad, and caused -EROFS
  316. errors to be returned from do_erase_oneblock because
  317. that's the only bit it checked for at the time.
  318. As the state machine appears to explicitly allow
  319. sending the 0x70 (Read Status) command to an erasing
  320. chip and expecting it to be ignored, that's what we
  321. do. */
  322. map_write(map, CMD(0xd0), cmd_addr);
  323. map_write(map, CMD(0x70), cmd_addr);
  324. }
  325. wake_up(&chip->wq);
  326. spin_unlock_bh(chip->mutex);
  327. return 0;
  328. }
  329. static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  330. {
  331. struct map_info *map = mtd->priv;
  332. struct cfi_private *cfi = map->fldrv_priv;
  333. unsigned long ofs;
  334. int chipnum;
  335. int ret = 0;
  336. /* ofs: offset within the first chip that the first read should start */
  337. chipnum = (from >> cfi->chipshift);
  338. ofs = from - (chipnum << cfi->chipshift);
  339. *retlen = 0;
  340. while (len) {
  341. unsigned long thislen;
  342. if (chipnum >= cfi->numchips)
  343. break;
  344. if ((len + ofs -1) >> cfi->chipshift)
  345. thislen = (1<<cfi->chipshift) - ofs;
  346. else
  347. thislen = len;
  348. ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  349. if (ret)
  350. break;
  351. *retlen += thislen;
  352. len -= thislen;
  353. buf += thislen;
  354. ofs = 0;
  355. chipnum++;
  356. }
  357. return ret;
  358. }
  359. static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
  360. unsigned long adr, const u_char *buf, int len)
  361. {
  362. struct cfi_private *cfi = map->fldrv_priv;
  363. map_word status, status_OK;
  364. unsigned long cmd_adr, timeo;
  365. DECLARE_WAITQUEUE(wait, current);
  366. int wbufsize, z;
  367. /* M58LW064A requires bus alignment for buffer wriets -- saw */
  368. if (adr & (map_bankwidth(map)-1))
  369. return -EINVAL;
  370. wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  371. adr += chip->start;
  372. cmd_adr = adr & ~(wbufsize-1);
  373. /* Let's determine this according to the interleave only once */
  374. status_OK = CMD(0x80);
  375. timeo = jiffies + HZ;
  376. retry:
  377. #ifdef DEBUG_CFI_FEATURES
  378. printk("%s: chip->state[%d]\n", __func__, chip->state);
  379. #endif
  380. spin_lock_bh(chip->mutex);
  381. /* Check that the chip's ready to talk to us.
  382. * Later, we can actually think about interrupting it
  383. * if it's in FL_ERASING state.
  384. * Not just yet, though.
  385. */
  386. switch (chip->state) {
  387. case FL_READY:
  388. break;
  389. case FL_CFI_QUERY:
  390. case FL_JEDEC_QUERY:
  391. map_write(map, CMD(0x70), cmd_adr);
  392. chip->state = FL_STATUS;
  393. #ifdef DEBUG_CFI_FEATURES
  394. printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
  395. #endif
  396. case FL_STATUS:
  397. status = map_read(map, cmd_adr);
  398. if (map_word_andequal(map, status, status_OK, status_OK))
  399. break;
  400. /* Urgh. Chip not yet ready to talk to us. */
  401. if (time_after(jiffies, timeo)) {
  402. spin_unlock_bh(chip->mutex);
  403. printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
  404. status.x[0], map_read(map, cmd_adr).x[0]);
  405. return -EIO;
  406. }
  407. /* Latency issues. Drop the lock, wait a while and retry */
  408. spin_unlock_bh(chip->mutex);
  409. cfi_udelay(1);
  410. goto retry;
  411. default:
  412. /* Stick ourselves on a wait queue to be woken when
  413. someone changes the status */
  414. set_current_state(TASK_UNINTERRUPTIBLE);
  415. add_wait_queue(&chip->wq, &wait);
  416. spin_unlock_bh(chip->mutex);
  417. schedule();
  418. remove_wait_queue(&chip->wq, &wait);
  419. timeo = jiffies + HZ;
  420. goto retry;
  421. }
  422. ENABLE_VPP(map);
  423. map_write(map, CMD(0xe8), cmd_adr);
  424. chip->state = FL_WRITING_TO_BUFFER;
  425. z = 0;
  426. for (;;) {
  427. status = map_read(map, cmd_adr);
  428. if (map_word_andequal(map, status, status_OK, status_OK))
  429. break;
  430. spin_unlock_bh(chip->mutex);
  431. cfi_udelay(1);
  432. spin_lock_bh(chip->mutex);
  433. if (++z > 100) {
  434. /* Argh. Not ready for write to buffer */
  435. DISABLE_VPP(map);
  436. map_write(map, CMD(0x70), cmd_adr);
  437. chip->state = FL_STATUS;
  438. spin_unlock_bh(chip->mutex);
  439. printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
  440. return -EIO;
  441. }
  442. }
  443. /* Write length of data to come */
  444. map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
  445. /* Write data */
  446. for (z = 0; z < len;
  447. z += map_bankwidth(map), buf += map_bankwidth(map)) {
  448. map_word d;
  449. d = map_word_load(map, buf);
  450. map_write(map, d, adr+z);
  451. }
  452. /* GO GO GO */
  453. map_write(map, CMD(0xd0), cmd_adr);
  454. chip->state = FL_WRITING;
  455. spin_unlock_bh(chip->mutex);
  456. cfi_udelay(chip->buffer_write_time);
  457. spin_lock_bh(chip->mutex);
  458. timeo = jiffies + (HZ/2);
  459. z = 0;
  460. for (;;) {
  461. if (chip->state != FL_WRITING) {
  462. /* Someone's suspended the write. Sleep */
  463. set_current_state(TASK_UNINTERRUPTIBLE);
  464. add_wait_queue(&chip->wq, &wait);
  465. spin_unlock_bh(chip->mutex);
  466. schedule();
  467. remove_wait_queue(&chip->wq, &wait);
  468. timeo = jiffies + (HZ / 2); /* FIXME */
  469. spin_lock_bh(chip->mutex);
  470. continue;
  471. }
  472. status = map_read(map, cmd_adr);
  473. if (map_word_andequal(map, status, status_OK, status_OK))
  474. break;
  475. /* OK Still waiting */
  476. if (time_after(jiffies, timeo)) {
  477. /* clear status */
  478. map_write(map, CMD(0x50), cmd_adr);
  479. /* put back into read status register mode */
  480. map_write(map, CMD(0x70), adr);
  481. chip->state = FL_STATUS;
  482. DISABLE_VPP(map);
  483. spin_unlock_bh(chip->mutex);
  484. printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
  485. return -EIO;
  486. }
  487. /* Latency issues. Drop the lock, wait a while and retry */
  488. spin_unlock_bh(chip->mutex);
  489. cfi_udelay(1);
  490. z++;
  491. spin_lock_bh(chip->mutex);
  492. }
  493. if (!z) {
  494. chip->buffer_write_time--;
  495. if (!chip->buffer_write_time)
  496. chip->buffer_write_time++;
  497. }
  498. if (z > 1)
  499. chip->buffer_write_time++;
  500. /* Done and happy. */
  501. DISABLE_VPP(map);
  502. chip->state = FL_STATUS;
  503. /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
  504. if (map_word_bitsset(map, status, CMD(0x3a))) {
  505. #ifdef DEBUG_CFI_FEATURES
  506. printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
  507. #endif
  508. /* clear status */
  509. map_write(map, CMD(0x50), cmd_adr);
  510. /* put back into read status register mode */
  511. map_write(map, CMD(0x70), adr);
  512. wake_up(&chip->wq);
  513. spin_unlock_bh(chip->mutex);
  514. return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
  515. }
  516. wake_up(&chip->wq);
  517. spin_unlock_bh(chip->mutex);
  518. return 0;
  519. }
  520. static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
  521. size_t len, size_t *retlen, const u_char *buf)
  522. {
  523. struct map_info *map = mtd->priv;
  524. struct cfi_private *cfi = map->fldrv_priv;
  525. int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  526. int ret = 0;
  527. int chipnum;
  528. unsigned long ofs;
  529. *retlen = 0;
  530. if (!len)
  531. return 0;
  532. chipnum = to >> cfi->chipshift;
  533. ofs = to - (chipnum << cfi->chipshift);
  534. #ifdef DEBUG_CFI_FEATURES
  535. printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
  536. printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
  537. printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
  538. #endif
  539. /* Write buffer is worth it only if more than one word to write... */
  540. while (len > 0) {
  541. /* We must not cross write block boundaries */
  542. int size = wbufsize - (ofs & (wbufsize-1));
  543. if (size > len)
  544. size = len;
  545. ret = do_write_buffer(map, &cfi->chips[chipnum],
  546. ofs, buf, size);
  547. if (ret)
  548. return ret;
  549. ofs += size;
  550. buf += size;
  551. (*retlen) += size;
  552. len -= size;
  553. if (ofs >> cfi->chipshift) {
  554. chipnum ++;
  555. ofs = 0;
  556. if (chipnum == cfi->numchips)
  557. return 0;
  558. }
  559. }
  560. return 0;
  561. }
  562. /*
  563. * Writev for ECC-Flashes is a little more complicated. We need to maintain
  564. * a small buffer for this.
  565. * XXX: If the buffer size is not a multiple of 2, this will break
  566. */
  567. #define ECCBUF_SIZE (mtd->writesize)
  568. #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
  569. #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
  570. static int
  571. cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
  572. unsigned long count, loff_t to, size_t *retlen)
  573. {
  574. unsigned long i;
  575. size_t totlen = 0, thislen;
  576. int ret = 0;
  577. size_t buflen = 0;
  578. static char *buffer;
  579. if (!ECCBUF_SIZE) {
  580. /* We should fall back to a general writev implementation.
  581. * Until that is written, just break.
  582. */
  583. return -EIO;
  584. }
  585. buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
  586. if (!buffer)
  587. return -ENOMEM;
  588. for (i=0; i<count; i++) {
  589. size_t elem_len = vecs[i].iov_len;
  590. void *elem_base = vecs[i].iov_base;
  591. if (!elem_len) /* FIXME: Might be unnecessary. Check that */
  592. continue;
  593. if (buflen) { /* cut off head */
  594. if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
  595. memcpy(buffer+buflen, elem_base, elem_len);
  596. buflen += elem_len;
  597. continue;
  598. }
  599. memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
  600. ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
  601. totlen += thislen;
  602. if (ret || thislen != ECCBUF_SIZE)
  603. goto write_error;
  604. elem_len -= thislen-buflen;
  605. elem_base += thislen-buflen;
  606. to += ECCBUF_SIZE;
  607. }
  608. if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
  609. ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
  610. totlen += thislen;
  611. if (ret || thislen != ECCBUF_DIV(elem_len))
  612. goto write_error;
  613. to += thislen;
  614. }
  615. buflen = ECCBUF_MOD(elem_len); /* cut off tail */
  616. if (buflen) {
  617. memset(buffer, 0xff, ECCBUF_SIZE);
  618. memcpy(buffer, elem_base + thislen, buflen);
  619. }
  620. }
  621. if (buflen) { /* flush last page, even if not full */
  622. /* This is sometimes intended behaviour, really */
  623. ret = mtd->write(mtd, to, buflen, &thislen, buffer);
  624. totlen += thislen;
  625. if (ret || thislen != ECCBUF_SIZE)
  626. goto write_error;
  627. }
  628. write_error:
  629. if (retlen)
  630. *retlen = totlen;
  631. kfree(buffer);
  632. return ret;
  633. }
  634. static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
  635. {
  636. struct cfi_private *cfi = map->fldrv_priv;
  637. map_word status, status_OK;
  638. unsigned long timeo;
  639. int retries = 3;
  640. DECLARE_WAITQUEUE(wait, current);
  641. int ret = 0;
  642. adr += chip->start;
  643. /* Let's determine this according to the interleave only once */
  644. status_OK = CMD(0x80);
  645. timeo = jiffies + HZ;
  646. retry:
  647. spin_lock_bh(chip->mutex);
  648. /* Check that the chip's ready to talk to us. */
  649. switch (chip->state) {
  650. case FL_CFI_QUERY:
  651. case FL_JEDEC_QUERY:
  652. case FL_READY:
  653. map_write(map, CMD(0x70), adr);
  654. chip->state = FL_STATUS;
  655. case FL_STATUS:
  656. status = map_read(map, adr);
  657. if (map_word_andequal(map, status, status_OK, status_OK))
  658. break;
  659. /* Urgh. Chip not yet ready to talk to us. */
  660. if (time_after(jiffies, timeo)) {
  661. spin_unlock_bh(chip->mutex);
  662. printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
  663. return -EIO;
  664. }
  665. /* Latency issues. Drop the lock, wait a while and retry */
  666. spin_unlock_bh(chip->mutex);
  667. cfi_udelay(1);
  668. goto retry;
  669. default:
  670. /* Stick ourselves on a wait queue to be woken when
  671. someone changes the status */
  672. set_current_state(TASK_UNINTERRUPTIBLE);
  673. add_wait_queue(&chip->wq, &wait);
  674. spin_unlock_bh(chip->mutex);
  675. schedule();
  676. remove_wait_queue(&chip->wq, &wait);
  677. timeo = jiffies + HZ;
  678. goto retry;
  679. }
  680. ENABLE_VPP(map);
  681. /* Clear the status register first */
  682. map_write(map, CMD(0x50), adr);
  683. /* Now erase */
  684. map_write(map, CMD(0x20), adr);
  685. map_write(map, CMD(0xD0), adr);
  686. chip->state = FL_ERASING;
  687. spin_unlock_bh(chip->mutex);
  688. msleep(1000);
  689. spin_lock_bh(chip->mutex);
  690. /* FIXME. Use a timer to check this, and return immediately. */
  691. /* Once the state machine's known to be working I'll do that */
  692. timeo = jiffies + (HZ*20);
  693. for (;;) {
  694. if (chip->state != FL_ERASING) {
  695. /* Someone's suspended the erase. Sleep */
  696. set_current_state(TASK_UNINTERRUPTIBLE);
  697. add_wait_queue(&chip->wq, &wait);
  698. spin_unlock_bh(chip->mutex);
  699. schedule();
  700. remove_wait_queue(&chip->wq, &wait);
  701. timeo = jiffies + (HZ*20); /* FIXME */
  702. spin_lock_bh(chip->mutex);
  703. continue;
  704. }
  705. status = map_read(map, adr);
  706. if (map_word_andequal(map, status, status_OK, status_OK))
  707. break;
  708. /* OK Still waiting */
  709. if (time_after(jiffies, timeo)) {
  710. map_write(map, CMD(0x70), adr);
  711. chip->state = FL_STATUS;
  712. printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
  713. DISABLE_VPP(map);
  714. spin_unlock_bh(chip->mutex);
  715. return -EIO;
  716. }
  717. /* Latency issues. Drop the lock, wait a while and retry */
  718. spin_unlock_bh(chip->mutex);
  719. cfi_udelay(1);
  720. spin_lock_bh(chip->mutex);
  721. }
  722. DISABLE_VPP(map);
  723. ret = 0;
  724. /* We've broken this before. It doesn't hurt to be safe */
  725. map_write(map, CMD(0x70), adr);
  726. chip->state = FL_STATUS;
  727. status = map_read(map, adr);
  728. /* check for lock bit */
  729. if (map_word_bitsset(map, status, CMD(0x3a))) {
  730. unsigned char chipstatus = status.x[0];
  731. if (!map_word_equal(map, status, CMD(chipstatus))) {
  732. int i, w;
  733. for (w=0; w<map_words(map); w++) {
  734. for (i = 0; i<cfi_interleave(cfi); i++) {
  735. chipstatus |= status.x[w] >> (cfi->device_type * 8);
  736. }
  737. }
  738. printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
  739. status.x[0], chipstatus);
  740. }
  741. /* Reset the error bits */
  742. map_write(map, CMD(0x50), adr);
  743. map_write(map, CMD(0x70), adr);
  744. if ((chipstatus & 0x30) == 0x30) {
  745. printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
  746. ret = -EIO;
  747. } else if (chipstatus & 0x02) {
  748. /* Protection bit set */
  749. ret = -EROFS;
  750. } else if (chipstatus & 0x8) {
  751. /* Voltage */
  752. printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
  753. ret = -EIO;
  754. } else if (chipstatus & 0x20) {
  755. if (retries--) {
  756. printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
  757. timeo = jiffies + HZ;
  758. chip->state = FL_STATUS;
  759. spin_unlock_bh(chip->mutex);
  760. goto retry;
  761. }
  762. printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
  763. ret = -EIO;
  764. }
  765. }
  766. wake_up(&chip->wq);
  767. spin_unlock_bh(chip->mutex);
  768. return ret;
  769. }
  770. static int cfi_staa_erase_varsize(struct mtd_info *mtd,
  771. struct erase_info *instr)
  772. { struct map_info *map = mtd->priv;
  773. struct cfi_private *cfi = map->fldrv_priv;
  774. unsigned long adr, len;
  775. int chipnum, ret = 0;
  776. int i, first;
  777. struct mtd_erase_region_info *regions = mtd->eraseregions;
  778. if (instr->addr > mtd->size)
  779. return -EINVAL;
  780. if ((instr->len + instr->addr) > mtd->size)
  781. return -EINVAL;
  782. /* Check that both start and end of the requested erase are
  783. * aligned with the erasesize at the appropriate addresses.
  784. */
  785. i = 0;
  786. /* Skip all erase regions which are ended before the start of
  787. the requested erase. Actually, to save on the calculations,
  788. we skip to the first erase region which starts after the
  789. start of the requested erase, and then go back one.
  790. */
  791. while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
  792. i++;
  793. i--;
  794. /* OK, now i is pointing at the erase region in which this
  795. erase request starts. Check the start of the requested
  796. erase range is aligned with the erase size which is in
  797. effect here.
  798. */
  799. if (instr->addr & (regions[i].erasesize-1))
  800. return -EINVAL;
  801. /* Remember the erase region we start on */
  802. first = i;
  803. /* Next, check that the end of the requested erase is aligned
  804. * with the erase region at that address.
  805. */
  806. while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
  807. i++;
  808. /* As before, drop back one to point at the region in which
  809. the address actually falls
  810. */
  811. i--;
  812. if ((instr->addr + instr->len) & (regions[i].erasesize-1))
  813. return -EINVAL;
  814. chipnum = instr->addr >> cfi->chipshift;
  815. adr = instr->addr - (chipnum << cfi->chipshift);
  816. len = instr->len;
  817. i=first;
  818. while(len) {
  819. ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
  820. if (ret)
  821. return ret;
  822. adr += regions[i].erasesize;
  823. len -= regions[i].erasesize;
  824. if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
  825. i++;
  826. if (adr >> cfi->chipshift) {
  827. adr = 0;
  828. chipnum++;
  829. if (chipnum >= cfi->numchips)
  830. break;
  831. }
  832. }
  833. instr->state = MTD_ERASE_DONE;
  834. mtd_erase_callback(instr);
  835. return 0;
  836. }
  837. static void cfi_staa_sync (struct mtd_info *mtd)
  838. {
  839. struct map_info *map = mtd->priv;
  840. struct cfi_private *cfi = map->fldrv_priv;
  841. int i;
  842. struct flchip *chip;
  843. int ret = 0;
  844. DECLARE_WAITQUEUE(wait, current);
  845. for (i=0; !ret && i<cfi->numchips; i++) {
  846. chip = &cfi->chips[i];
  847. retry:
  848. spin_lock_bh(chip->mutex);
  849. switch(chip->state) {
  850. case FL_READY:
  851. case FL_STATUS:
  852. case FL_CFI_QUERY:
  853. case FL_JEDEC_QUERY:
  854. chip->oldstate = chip->state;
  855. chip->state = FL_SYNCING;
  856. /* No need to wake_up() on this state change -
  857. * as the whole point is that nobody can do anything
  858. * with the chip now anyway.
  859. */
  860. case FL_SYNCING:
  861. spin_unlock_bh(chip->mutex);
  862. break;
  863. default:
  864. /* Not an idle state */
  865. set_current_state(TASK_UNINTERRUPTIBLE);
  866. add_wait_queue(&chip->wq, &wait);
  867. spin_unlock_bh(chip->mutex);
  868. schedule();
  869. remove_wait_queue(&chip->wq, &wait);
  870. goto retry;
  871. }
  872. }
  873. /* Unlock the chips again */
  874. for (i--; i >=0; i--) {
  875. chip = &cfi->chips[i];
  876. spin_lock_bh(chip->mutex);
  877. if (chip->state == FL_SYNCING) {
  878. chip->state = chip->oldstate;
  879. wake_up(&chip->wq);
  880. }
  881. spin_unlock_bh(chip->mutex);
  882. }
  883. }
  884. static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
  885. {
  886. struct cfi_private *cfi = map->fldrv_priv;
  887. map_word status, status_OK;
  888. unsigned long timeo = jiffies + HZ;
  889. DECLARE_WAITQUEUE(wait, current);
  890. adr += chip->start;
  891. /* Let's determine this according to the interleave only once */
  892. status_OK = CMD(0x80);
  893. timeo = jiffies + HZ;
  894. retry:
  895. spin_lock_bh(chip->mutex);
  896. /* Check that the chip's ready to talk to us. */
  897. switch (chip->state) {
  898. case FL_CFI_QUERY:
  899. case FL_JEDEC_QUERY:
  900. case FL_READY:
  901. map_write(map, CMD(0x70), adr);
  902. chip->state = FL_STATUS;
  903. case FL_STATUS:
  904. status = map_read(map, adr);
  905. if (map_word_andequal(map, status, status_OK, status_OK))
  906. break;
  907. /* Urgh. Chip not yet ready to talk to us. */
  908. if (time_after(jiffies, timeo)) {
  909. spin_unlock_bh(chip->mutex);
  910. printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
  911. return -EIO;
  912. }
  913. /* Latency issues. Drop the lock, wait a while and retry */
  914. spin_unlock_bh(chip->mutex);
  915. cfi_udelay(1);
  916. goto retry;
  917. default:
  918. /* Stick ourselves on a wait queue to be woken when
  919. someone changes the status */
  920. set_current_state(TASK_UNINTERRUPTIBLE);
  921. add_wait_queue(&chip->wq, &wait);
  922. spin_unlock_bh(chip->mutex);
  923. schedule();
  924. remove_wait_queue(&chip->wq, &wait);
  925. timeo = jiffies + HZ;
  926. goto retry;
  927. }
  928. ENABLE_VPP(map);
  929. map_write(map, CMD(0x60), adr);
  930. map_write(map, CMD(0x01), adr);
  931. chip->state = FL_LOCKING;
  932. spin_unlock_bh(chip->mutex);
  933. msleep(1000);
  934. spin_lock_bh(chip->mutex);
  935. /* FIXME. Use a timer to check this, and return immediately. */
  936. /* Once the state machine's known to be working I'll do that */
  937. timeo = jiffies + (HZ*2);
  938. for (;;) {
  939. status = map_read(map, adr);
  940. if (map_word_andequal(map, status, status_OK, status_OK))
  941. break;
  942. /* OK Still waiting */
  943. if (time_after(jiffies, timeo)) {
  944. map_write(map, CMD(0x70), adr);
  945. chip->state = FL_STATUS;
  946. printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
  947. DISABLE_VPP(map);
  948. spin_unlock_bh(chip->mutex);
  949. return -EIO;
  950. }
  951. /* Latency issues. Drop the lock, wait a while and retry */
  952. spin_unlock_bh(chip->mutex);
  953. cfi_udelay(1);
  954. spin_lock_bh(chip->mutex);
  955. }
  956. /* Done and happy. */
  957. chip->state = FL_STATUS;
  958. DISABLE_VPP(map);
  959. wake_up(&chip->wq);
  960. spin_unlock_bh(chip->mutex);
  961. return 0;
  962. }
  963. static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
  964. {
  965. struct map_info *map = mtd->priv;
  966. struct cfi_private *cfi = map->fldrv_priv;
  967. unsigned long adr;
  968. int chipnum, ret = 0;
  969. #ifdef DEBUG_LOCK_BITS
  970. int ofs_factor = cfi->interleave * cfi->device_type;
  971. #endif
  972. if (ofs & (mtd->erasesize - 1))
  973. return -EINVAL;
  974. if (len & (mtd->erasesize -1))
  975. return -EINVAL;
  976. if ((len + ofs) > mtd->size)
  977. return -EINVAL;
  978. chipnum = ofs >> cfi->chipshift;
  979. adr = ofs - (chipnum << cfi->chipshift);
  980. while(len) {
  981. #ifdef DEBUG_LOCK_BITS
  982. cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
  983. printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
  984. cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
  985. #endif
  986. ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
  987. #ifdef DEBUG_LOCK_BITS
  988. cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
  989. printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
  990. cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
  991. #endif
  992. if (ret)
  993. return ret;
  994. adr += mtd->erasesize;
  995. len -= mtd->erasesize;
  996. if (adr >> cfi->chipshift) {
  997. adr = 0;
  998. chipnum++;
  999. if (chipnum >= cfi->numchips)
  1000. break;
  1001. }
  1002. }
  1003. return 0;
  1004. }
  1005. static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
  1006. {
  1007. struct cfi_private *cfi = map->fldrv_priv;
  1008. map_word status, status_OK;
  1009. unsigned long timeo = jiffies + HZ;
  1010. DECLARE_WAITQUEUE(wait, current);
  1011. adr += chip->start;
  1012. /* Let's determine this according to the interleave only once */
  1013. status_OK = CMD(0x80);
  1014. timeo = jiffies + HZ;
  1015. retry:
  1016. spin_lock_bh(chip->mutex);
  1017. /* Check that the chip's ready to talk to us. */
  1018. switch (chip->state) {
  1019. case FL_CFI_QUERY:
  1020. case FL_JEDEC_QUERY:
  1021. case FL_READY:
  1022. map_write(map, CMD(0x70), adr);
  1023. chip->state = FL_STATUS;
  1024. case FL_STATUS:
  1025. status = map_read(map, adr);
  1026. if (map_word_andequal(map, status, status_OK, status_OK))
  1027. break;
  1028. /* Urgh. Chip not yet ready to talk to us. */
  1029. if (time_after(jiffies, timeo)) {
  1030. spin_unlock_bh(chip->mutex);
  1031. printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
  1032. return -EIO;
  1033. }
  1034. /* Latency issues. Drop the lock, wait a while and retry */
  1035. spin_unlock_bh(chip->mutex);
  1036. cfi_udelay(1);
  1037. goto retry;
  1038. default:
  1039. /* Stick ourselves on a wait queue to be woken when
  1040. someone changes the status */
  1041. set_current_state(TASK_UNINTERRUPTIBLE);
  1042. add_wait_queue(&chip->wq, &wait);
  1043. spin_unlock_bh(chip->mutex);
  1044. schedule();
  1045. remove_wait_queue(&chip->wq, &wait);
  1046. timeo = jiffies + HZ;
  1047. goto retry;
  1048. }
  1049. ENABLE_VPP(map);
  1050. map_write(map, CMD(0x60), adr);
  1051. map_write(map, CMD(0xD0), adr);
  1052. chip->state = FL_UNLOCKING;
  1053. spin_unlock_bh(chip->mutex);
  1054. msleep(1000);
  1055. spin_lock_bh(chip->mutex);
  1056. /* FIXME. Use a timer to check this, and return immediately. */
  1057. /* Once the state machine's known to be working I'll do that */
  1058. timeo = jiffies + (HZ*2);
  1059. for (;;) {
  1060. status = map_read(map, adr);
  1061. if (map_word_andequal(map, status, status_OK, status_OK))
  1062. break;
  1063. /* OK Still waiting */
  1064. if (time_after(jiffies, timeo)) {
  1065. map_write(map, CMD(0x70), adr);
  1066. chip->state = FL_STATUS;
  1067. printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
  1068. DISABLE_VPP(map);
  1069. spin_unlock_bh(chip->mutex);
  1070. return -EIO;
  1071. }
  1072. /* Latency issues. Drop the unlock, wait a while and retry */
  1073. spin_unlock_bh(chip->mutex);
  1074. cfi_udelay(1);
  1075. spin_lock_bh(chip->mutex);
  1076. }
  1077. /* Done and happy. */
  1078. chip->state = FL_STATUS;
  1079. DISABLE_VPP(map);
  1080. wake_up(&chip->wq);
  1081. spin_unlock_bh(chip->mutex);
  1082. return 0;
  1083. }
  1084. static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
  1085. {
  1086. struct map_info *map = mtd->priv;
  1087. struct cfi_private *cfi = map->fldrv_priv;
  1088. unsigned long adr;
  1089. int chipnum, ret = 0;
  1090. #ifdef DEBUG_LOCK_BITS
  1091. int ofs_factor = cfi->interleave * cfi->device_type;
  1092. #endif
  1093. chipnum = ofs >> cfi->chipshift;
  1094. adr = ofs - (chipnum << cfi->chipshift);
  1095. #ifdef DEBUG_LOCK_BITS
  1096. {
  1097. unsigned long temp_adr = adr;
  1098. unsigned long temp_len = len;
  1099. cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
  1100. while (temp_len) {
  1101. printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
  1102. temp_adr += mtd->erasesize;
  1103. temp_len -= mtd->erasesize;
  1104. }
  1105. cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
  1106. }
  1107. #endif
  1108. ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
  1109. #ifdef DEBUG_LOCK_BITS
  1110. cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
  1111. printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
  1112. cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
  1113. #endif
  1114. return ret;
  1115. }
  1116. static int cfi_staa_suspend(struct mtd_info *mtd)
  1117. {
  1118. struct map_info *map = mtd->priv;
  1119. struct cfi_private *cfi = map->fldrv_priv;
  1120. int i;
  1121. struct flchip *chip;
  1122. int ret = 0;
  1123. for (i=0; !ret && i<cfi->numchips; i++) {
  1124. chip = &cfi->chips[i];
  1125. spin_lock_bh(chip->mutex);
  1126. switch(chip->state) {
  1127. case FL_READY:
  1128. case FL_STATUS:
  1129. case FL_CFI_QUERY:
  1130. case FL_JEDEC_QUERY:
  1131. chip->oldstate = chip->state;
  1132. chip->state = FL_PM_SUSPENDED;
  1133. /* No need to wake_up() on this state change -
  1134. * as the whole point is that nobody can do anything
  1135. * with the chip now anyway.
  1136. */
  1137. case FL_PM_SUSPENDED:
  1138. break;
  1139. default:
  1140. ret = -EAGAIN;
  1141. break;
  1142. }
  1143. spin_unlock_bh(chip->mutex);
  1144. }
  1145. /* Unlock the chips again */
  1146. if (ret) {
  1147. for (i--; i >=0; i--) {
  1148. chip = &cfi->chips[i];
  1149. spin_lock_bh(chip->mutex);
  1150. if (chip->state == FL_PM_SUSPENDED) {
  1151. /* No need to force it into a known state here,
  1152. because we're returning failure, and it didn't
  1153. get power cycled */
  1154. chip->state = chip->oldstate;
  1155. wake_up(&chip->wq);
  1156. }
  1157. spin_unlock_bh(chip->mutex);
  1158. }
  1159. }
  1160. return ret;
  1161. }
  1162. static void cfi_staa_resume(struct mtd_info *mtd)
  1163. {
  1164. struct map_info *map = mtd->priv;
  1165. struct cfi_private *cfi = map->fldrv_priv;
  1166. int i;
  1167. struct flchip *chip;
  1168. for (i=0; i<cfi->numchips; i++) {
  1169. chip = &cfi->chips[i];
  1170. spin_lock_bh(chip->mutex);
  1171. /* Go to known state. Chip may have been power cycled */
  1172. if (chip->state == FL_PM_SUSPENDED) {
  1173. map_write(map, CMD(0xFF), 0);
  1174. chip->state = FL_READY;
  1175. wake_up(&chip->wq);
  1176. }
  1177. spin_unlock_bh(chip->mutex);
  1178. }
  1179. }
  1180. static void cfi_staa_destroy(struct mtd_info *mtd)
  1181. {
  1182. struct map_info *map = mtd->priv;
  1183. struct cfi_private *cfi = map->fldrv_priv;
  1184. kfree(cfi->cmdset_priv);
  1185. kfree(cfi);
  1186. }
  1187. MODULE_LICENSE("GPL");