cfi_cmdset_0001.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160
  1. /*
  2. * Common Flash Interface support:
  3. * Intel Extended Vendor Command Set (ID 0x0001)
  4. *
  5. * (C) 2000 Red Hat. GPL'd
  6. *
  7. * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
  8. *
  9. *
  10. * 10/10/2000 Nicolas Pitre <nico@cam.org>
  11. * - completely revamped method functions so they are aware and
  12. * independent of the flash geometry (buswidth, interleave, etc.)
  13. * - scalability vs code size is completely set at compile-time
  14. * (see include/linux/mtd/cfi.h for selection)
  15. * - optimized write buffer method
  16. * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  17. * - reworked lock/unlock/erase support for var size flash
  18. */
  19. #include <linux/module.h>
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/init.h>
  24. #include <asm/io.h>
  25. #include <asm/byteorder.h>
  26. #include <linux/errno.h>
  27. #include <linux/slab.h>
  28. #include <linux/delay.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/mtd/xip.h>
  31. #include <linux/mtd/map.h>
  32. #include <linux/mtd/mtd.h>
  33. #include <linux/mtd/compatmac.h>
  34. #include <linux/mtd/cfi.h>
  35. /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
  36. /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
  37. // debugging, turns off buffer write mode if set to 1
  38. #define FORCE_WORD_WRITE 0
  39. #define MANUFACTURER_INTEL 0x0089
  40. #define I82802AB 0x00ad
  41. #define I82802AC 0x00ac
  42. #define MANUFACTURER_ST 0x0020
  43. #define M50LPW080 0x002F
  44. static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  45. //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  46. //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  47. static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  48. static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  49. static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  50. static void cfi_intelext_sync (struct mtd_info *);
  51. static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
  52. static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
  53. static int cfi_intelext_suspend (struct mtd_info *);
  54. static void cfi_intelext_resume (struct mtd_info *);
  55. static void cfi_intelext_destroy(struct mtd_info *);
  56. struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  57. static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
  58. static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
  59. static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
  60. size_t *retlen, u_char **mtdbuf);
  61. static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
  62. size_t len);
  63. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
  64. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
  65. #include "fwh_lock.h"
  66. /*
  67. * *********** SETUP AND PROBE BITS ***********
  68. */
  69. static struct mtd_chip_driver cfi_intelext_chipdrv = {
  70. .probe = NULL, /* Not usable directly */
  71. .destroy = cfi_intelext_destroy,
  72. .name = "cfi_cmdset_0001",
  73. .module = THIS_MODULE
  74. };
  75. /* #define DEBUG_LOCK_BITS */
  76. /* #define DEBUG_CFI_FEATURES */
  77. #ifdef DEBUG_CFI_FEATURES
  78. static void cfi_tell_features(struct cfi_pri_intelext *extp)
  79. {
  80. int i;
  81. printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
  82. printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
  83. printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
  84. printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
  85. printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
  86. printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
  87. printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
  88. printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
  89. printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
  90. printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
  91. printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
  92. for (i=10; i<32; i++) {
  93. if (extp->FeatureSupport & (1<<i))
  94. printk(" - Unknown Bit %X: supported\n", i);
  95. }
  96. printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
  97. printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
  98. for (i=1; i<8; i++) {
  99. if (extp->SuspendCmdSupport & (1<<i))
  100. printk(" - Unknown Bit %X: supported\n", i);
  101. }
  102. printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
  103. printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
  104. printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
  105. for (i=2; i<16; i++) {
  106. if (extp->BlkStatusRegMask & (1<<i))
  107. printk(" - Unknown Bit %X Active: yes\n",i);
  108. }
  109. printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
  110. extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
  111. if (extp->VppOptimal)
  112. printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
  113. extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
  114. }
  115. #endif
  116. #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
  117. /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
  118. static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
  119. {
  120. struct map_info *map = mtd->priv;
  121. struct cfi_private *cfi = map->fldrv_priv;
  122. struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
  123. printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
  124. "erase on write disabled.\n");
  125. extp->SuspendCmdSupport &= ~1;
  126. }
  127. #endif
  128. #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
  129. static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
  130. {
  131. struct map_info *map = mtd->priv;
  132. struct cfi_private *cfi = map->fldrv_priv;
  133. struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
  134. if (cfip && (cfip->FeatureSupport&4)) {
  135. cfip->FeatureSupport &= ~4;
  136. printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
  137. }
  138. }
  139. #endif
  140. static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
  141. {
  142. struct map_info *map = mtd->priv;
  143. struct cfi_private *cfi = map->fldrv_priv;
  144. cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
  145. cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
  146. }
  147. static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
  148. {
  149. struct map_info *map = mtd->priv;
  150. struct cfi_private *cfi = map->fldrv_priv;
  151. /* Note this is done after the region info is endian swapped */
  152. cfi->cfiq->EraseRegionInfo[1] =
  153. (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
  154. };
  155. static void fixup_use_point(struct mtd_info *mtd, void *param)
  156. {
  157. struct map_info *map = mtd->priv;
  158. if (!mtd->point && map_is_linear(map)) {
  159. mtd->point = cfi_intelext_point;
  160. mtd->unpoint = cfi_intelext_unpoint;
  161. }
  162. }
  163. static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
  164. {
  165. struct map_info *map = mtd->priv;
  166. struct cfi_private *cfi = map->fldrv_priv;
  167. if (cfi->cfiq->BufWriteTimeoutTyp) {
  168. printk(KERN_INFO "Using buffer write method\n" );
  169. mtd->write = cfi_intelext_write_buffers;
  170. }
  171. }
  172. static struct cfi_fixup cfi_fixup_table[] = {
  173. #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
  174. { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
  175. #endif
  176. #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
  177. { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
  178. #endif
  179. #if !FORCE_WORD_WRITE
  180. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
  181. #endif
  182. { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
  183. { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
  184. { 0, 0, NULL, NULL }
  185. };
  186. static struct cfi_fixup jedec_fixup_table[] = {
  187. { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
  188. { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
  189. { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
  190. { 0, 0, NULL, NULL }
  191. };
  192. static struct cfi_fixup fixup_table[] = {
  193. /* The CFI vendor ids and the JEDEC vendor IDs appear
  194. * to be common. It is like the devices id's are as
  195. * well. This table is to pick all cases where
  196. * we know that is the case.
  197. */
  198. { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
  199. { 0, 0, NULL, NULL }
  200. };
  201. static inline struct cfi_pri_intelext *
  202. read_pri_intelext(struct map_info *map, __u16 adr)
  203. {
  204. struct cfi_pri_intelext *extp;
  205. unsigned int extp_size = sizeof(*extp);
  206. again:
  207. extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
  208. if (!extp)
  209. return NULL;
  210. /* Do some byteswapping if necessary */
  211. extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
  212. extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
  213. extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
  214. if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
  215. unsigned int extra_size = 0;
  216. int nb_parts, i;
  217. /* Protection Register info */
  218. extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
  219. /* Burst Read info */
  220. extra_size += 6;
  221. /* Number of hardware-partitions */
  222. extra_size += 1;
  223. if (extp_size < sizeof(*extp) + extra_size)
  224. goto need_more;
  225. nb_parts = extp->extra[extra_size - 1];
  226. for (i = 0; i < nb_parts; i++) {
  227. struct cfi_intelext_regioninfo *rinfo;
  228. rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
  229. extra_size += sizeof(*rinfo);
  230. if (extp_size < sizeof(*extp) + extra_size)
  231. goto need_more;
  232. rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
  233. extra_size += (rinfo->NumBlockTypes - 1)
  234. * sizeof(struct cfi_intelext_blockinfo);
  235. }
  236. if (extp_size < sizeof(*extp) + extra_size) {
  237. need_more:
  238. extp_size = sizeof(*extp) + extra_size;
  239. kfree(extp);
  240. if (extp_size > 4096) {
  241. printk(KERN_ERR
  242. "%s: cfi_pri_intelext is too fat\n",
  243. __FUNCTION__);
  244. return NULL;
  245. }
  246. goto again;
  247. }
  248. }
  249. return extp;
  250. }
  251. /* This routine is made available to other mtd code via
  252. * inter_module_register. It must only be accessed through
  253. * inter_module_get which will bump the use count of this module. The
  254. * addresses passed back in cfi are valid as long as the use count of
  255. * this module is non-zero, i.e. between inter_module_get and
  256. * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
  257. */
  258. struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
  259. {
  260. struct cfi_private *cfi = map->fldrv_priv;
  261. struct mtd_info *mtd;
  262. int i;
  263. mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
  264. if (!mtd) {
  265. printk(KERN_ERR "Failed to allocate memory for MTD device\n");
  266. return NULL;
  267. }
  268. memset(mtd, 0, sizeof(*mtd));
  269. mtd->priv = map;
  270. mtd->type = MTD_NORFLASH;
  271. /* Fill in the default mtd operations */
  272. mtd->erase = cfi_intelext_erase_varsize;
  273. mtd->read = cfi_intelext_read;
  274. mtd->write = cfi_intelext_write_words;
  275. mtd->sync = cfi_intelext_sync;
  276. mtd->lock = cfi_intelext_lock;
  277. mtd->unlock = cfi_intelext_unlock;
  278. mtd->suspend = cfi_intelext_suspend;
  279. mtd->resume = cfi_intelext_resume;
  280. mtd->flags = MTD_CAP_NORFLASH;
  281. mtd->name = map->name;
  282. if (cfi->cfi_mode == CFI_MODE_CFI) {
  283. /*
  284. * It's a real CFI chip, not one for which the probe
  285. * routine faked a CFI structure. So we read the feature
  286. * table from it.
  287. */
  288. __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
  289. struct cfi_pri_intelext *extp;
  290. extp = read_pri_intelext(map, adr);
  291. if (!extp) {
  292. kfree(mtd);
  293. return NULL;
  294. }
  295. /* Install our own private info structure */
  296. cfi->cmdset_priv = extp;
  297. cfi_fixup(mtd, cfi_fixup_table);
  298. #ifdef DEBUG_CFI_FEATURES
  299. /* Tell the user about it in lots of lovely detail */
  300. cfi_tell_features(extp);
  301. #endif
  302. if(extp->SuspendCmdSupport & 1) {
  303. printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
  304. }
  305. }
  306. else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
  307. /* Apply jedec specific fixups */
  308. cfi_fixup(mtd, jedec_fixup_table);
  309. }
  310. /* Apply generic fixups */
  311. cfi_fixup(mtd, fixup_table);
  312. for (i=0; i< cfi->numchips; i++) {
  313. cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
  314. cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
  315. cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
  316. cfi->chips[i].ref_point_counter = 0;
  317. }
  318. map->fldrv = &cfi_intelext_chipdrv;
  319. return cfi_intelext_setup(mtd);
  320. }
  321. static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
  322. {
  323. struct map_info *map = mtd->priv;
  324. struct cfi_private *cfi = map->fldrv_priv;
  325. unsigned long offset = 0;
  326. int i,j;
  327. unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
  328. //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
  329. mtd->size = devsize * cfi->numchips;
  330. mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
  331. mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
  332. * mtd->numeraseregions, GFP_KERNEL);
  333. if (!mtd->eraseregions) {
  334. printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
  335. goto setup_err;
  336. }
  337. for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
  338. unsigned long ernum, ersize;
  339. ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
  340. ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
  341. if (mtd->erasesize < ersize) {
  342. mtd->erasesize = ersize;
  343. }
  344. for (j=0; j<cfi->numchips; j++) {
  345. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
  346. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
  347. mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
  348. }
  349. offset += (ersize * ernum);
  350. }
  351. if (offset != devsize) {
  352. /* Argh */
  353. printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
  354. goto setup_err;
  355. }
  356. for (i=0; i<mtd->numeraseregions;i++){
  357. printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
  358. i,mtd->eraseregions[i].offset,
  359. mtd->eraseregions[i].erasesize,
  360. mtd->eraseregions[i].numblocks);
  361. }
  362. #if 0
  363. mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
  364. mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
  365. #endif
  366. /* This function has the potential to distort the reality
  367. a bit and therefore should be called last. */
  368. if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
  369. goto setup_err;
  370. __module_get(THIS_MODULE);
  371. return mtd;
  372. setup_err:
  373. if(mtd) {
  374. if(mtd->eraseregions)
  375. kfree(mtd->eraseregions);
  376. kfree(mtd);
  377. }
  378. kfree(cfi->cmdset_priv);
  379. return NULL;
  380. }
  381. static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
  382. struct cfi_private **pcfi)
  383. {
  384. struct map_info *map = mtd->priv;
  385. struct cfi_private *cfi = *pcfi;
  386. struct cfi_pri_intelext *extp = cfi->cmdset_priv;
  387. /*
  388. * Probing of multi-partition flash ships.
  389. *
  390. * To support multiple partitions when available, we simply arrange
  391. * for each of them to have their own flchip structure even if they
  392. * are on the same physical chip. This means completely recreating
  393. * a new cfi_private structure right here which is a blatent code
  394. * layering violation, but this is still the least intrusive
  395. * arrangement at this point. This can be rearranged in the future
  396. * if someone feels motivated enough. --nico
  397. */
  398. if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
  399. && extp->FeatureSupport & (1 << 9)) {
  400. struct cfi_private *newcfi;
  401. struct flchip *chip;
  402. struct flchip_shared *shared;
  403. int offs, numregions, numparts, partshift, numvirtchips, i, j;
  404. /* Protection Register info */
  405. offs = (extp->NumProtectionFields - 1) * (4 + 6);
  406. /* Burst Read info */
  407. offs += 6;
  408. /* Number of partition regions */
  409. numregions = extp->extra[offs];
  410. offs += 1;
  411. /* Number of hardware partitions */
  412. numparts = 0;
  413. for (i = 0; i < numregions; i++) {
  414. struct cfi_intelext_regioninfo *rinfo;
  415. rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
  416. numparts += rinfo->NumIdentPartitions;
  417. offs += sizeof(*rinfo)
  418. + (rinfo->NumBlockTypes - 1) *
  419. sizeof(struct cfi_intelext_blockinfo);
  420. }
  421. /*
  422. * All functions below currently rely on all chips having
  423. * the same geometry so we'll just assume that all hardware
  424. * partitions are of the same size too.
  425. */
  426. partshift = cfi->chipshift - __ffs(numparts);
  427. if ((1 << partshift) < mtd->erasesize) {
  428. printk( KERN_ERR
  429. "%s: bad number of hw partitions (%d)\n",
  430. __FUNCTION__, numparts);
  431. return -EINVAL;
  432. }
  433. numvirtchips = cfi->numchips * numparts;
  434. newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
  435. if (!newcfi)
  436. return -ENOMEM;
  437. shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
  438. if (!shared) {
  439. kfree(newcfi);
  440. return -ENOMEM;
  441. }
  442. memcpy(newcfi, cfi, sizeof(struct cfi_private));
  443. newcfi->numchips = numvirtchips;
  444. newcfi->chipshift = partshift;
  445. chip = &newcfi->chips[0];
  446. for (i = 0; i < cfi->numchips; i++) {
  447. shared[i].writing = shared[i].erasing = NULL;
  448. spin_lock_init(&shared[i].lock);
  449. for (j = 0; j < numparts; j++) {
  450. *chip = cfi->chips[i];
  451. chip->start += j << partshift;
  452. chip->priv = &shared[i];
  453. /* those should be reset too since
  454. they create memory references. */
  455. init_waitqueue_head(&chip->wq);
  456. spin_lock_init(&chip->_spinlock);
  457. chip->mutex = &chip->_spinlock;
  458. chip++;
  459. }
  460. }
  461. printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
  462. "--> %d partitions of %d KiB\n",
  463. map->name, cfi->numchips, cfi->interleave,
  464. newcfi->numchips, 1<<(newcfi->chipshift-10));
  465. map->fldrv_priv = newcfi;
  466. *pcfi = newcfi;
  467. kfree(cfi);
  468. }
  469. return 0;
  470. }
  471. /*
  472. * *********** CHIP ACCESS FUNCTIONS ***********
  473. */
  474. static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
  475. {
  476. DECLARE_WAITQUEUE(wait, current);
  477. struct cfi_private *cfi = map->fldrv_priv;
  478. map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
  479. unsigned long timeo;
  480. struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
  481. resettime:
  482. timeo = jiffies + HZ;
  483. retry:
  484. if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
  485. /*
  486. * OK. We have possibility for contension on the write/erase
  487. * operations which are global to the real chip and not per
  488. * partition. So let's fight it over in the partition which
  489. * currently has authority on the operation.
  490. *
  491. * The rules are as follows:
  492. *
  493. * - any write operation must own shared->writing.
  494. *
  495. * - any erase operation must own _both_ shared->writing and
  496. * shared->erasing.
  497. *
  498. * - contension arbitration is handled in the owner's context.
  499. *
  500. * The 'shared' struct can be read when its lock is taken.
  501. * However any writes to it can only be made when the current
  502. * owner's lock is also held.
  503. */
  504. struct flchip_shared *shared = chip->priv;
  505. struct flchip *contender;
  506. spin_lock(&shared->lock);
  507. contender = shared->writing;
  508. if (contender && contender != chip) {
  509. /*
  510. * The engine to perform desired operation on this
  511. * partition is already in use by someone else.
  512. * Let's fight over it in the context of the chip
  513. * currently using it. If it is possible to suspend,
  514. * that other partition will do just that, otherwise
  515. * it'll happily send us to sleep. In any case, when
  516. * get_chip returns success we're clear to go ahead.
  517. */
  518. int ret = spin_trylock(contender->mutex);
  519. spin_unlock(&shared->lock);
  520. if (!ret)
  521. goto retry;
  522. spin_unlock(chip->mutex);
  523. ret = get_chip(map, contender, contender->start, mode);
  524. spin_lock(chip->mutex);
  525. if (ret) {
  526. spin_unlock(contender->mutex);
  527. return ret;
  528. }
  529. timeo = jiffies + HZ;
  530. spin_lock(&shared->lock);
  531. }
  532. /* We now own it */
  533. shared->writing = chip;
  534. if (mode == FL_ERASING)
  535. shared->erasing = chip;
  536. if (contender && contender != chip)
  537. spin_unlock(contender->mutex);
  538. spin_unlock(&shared->lock);
  539. }
  540. switch (chip->state) {
  541. case FL_STATUS:
  542. for (;;) {
  543. status = map_read(map, adr);
  544. if (map_word_andequal(map, status, status_OK, status_OK))
  545. break;
  546. /* At this point we're fine with write operations
  547. in other partitions as they don't conflict. */
  548. if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
  549. break;
  550. if (time_after(jiffies, timeo)) {
  551. printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
  552. status.x[0]);
  553. return -EIO;
  554. }
  555. spin_unlock(chip->mutex);
  556. cfi_udelay(1);
  557. spin_lock(chip->mutex);
  558. /* Someone else might have been playing with it. */
  559. goto retry;
  560. }
  561. case FL_READY:
  562. case FL_CFI_QUERY:
  563. case FL_JEDEC_QUERY:
  564. return 0;
  565. case FL_ERASING:
  566. if (!cfip ||
  567. !(cfip->FeatureSupport & 2) ||
  568. !(mode == FL_READY || mode == FL_POINT ||
  569. (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
  570. goto sleep;
  571. /* Erase suspend */
  572. map_write(map, CMD(0xB0), adr);
  573. /* If the flash has finished erasing, then 'erase suspend'
  574. * appears to make some (28F320) flash devices switch to
  575. * 'read' mode. Make sure that we switch to 'read status'
  576. * mode so we get the right data. --rmk
  577. */
  578. map_write(map, CMD(0x70), adr);
  579. chip->oldstate = FL_ERASING;
  580. chip->state = FL_ERASE_SUSPENDING;
  581. chip->erase_suspended = 1;
  582. for (;;) {
  583. status = map_read(map, adr);
  584. if (map_word_andequal(map, status, status_OK, status_OK))
  585. break;
  586. if (time_after(jiffies, timeo)) {
  587. /* Urgh. Resume and pretend we weren't here. */
  588. map_write(map, CMD(0xd0), adr);
  589. /* Make sure we're in 'read status' mode if it had finished */
  590. map_write(map, CMD(0x70), adr);
  591. chip->state = FL_ERASING;
  592. chip->oldstate = FL_READY;
  593. printk(KERN_ERR "Chip not ready after erase "
  594. "suspended: status = 0x%lx\n", status.x[0]);
  595. return -EIO;
  596. }
  597. spin_unlock(chip->mutex);
  598. cfi_udelay(1);
  599. spin_lock(chip->mutex);
  600. /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
  601. So we can just loop here. */
  602. }
  603. chip->state = FL_STATUS;
  604. return 0;
  605. case FL_XIP_WHILE_ERASING:
  606. if (mode != FL_READY && mode != FL_POINT &&
  607. (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
  608. goto sleep;
  609. chip->oldstate = chip->state;
  610. chip->state = FL_READY;
  611. return 0;
  612. case FL_POINT:
  613. /* Only if there's no operation suspended... */
  614. if (mode == FL_READY && chip->oldstate == FL_READY)
  615. return 0;
  616. default:
  617. sleep:
  618. set_current_state(TASK_UNINTERRUPTIBLE);
  619. add_wait_queue(&chip->wq, &wait);
  620. spin_unlock(chip->mutex);
  621. schedule();
  622. remove_wait_queue(&chip->wq, &wait);
  623. spin_lock(chip->mutex);
  624. goto resettime;
  625. }
  626. }
  627. static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
  628. {
  629. struct cfi_private *cfi = map->fldrv_priv;
  630. if (chip->priv) {
  631. struct flchip_shared *shared = chip->priv;
  632. spin_lock(&shared->lock);
  633. if (shared->writing == chip && chip->oldstate == FL_READY) {
  634. /* We own the ability to write, but we're done */
  635. shared->writing = shared->erasing;
  636. if (shared->writing && shared->writing != chip) {
  637. /* give back ownership to who we loaned it from */
  638. struct flchip *loaner = shared->writing;
  639. spin_lock(loaner->mutex);
  640. spin_unlock(&shared->lock);
  641. spin_unlock(chip->mutex);
  642. put_chip(map, loaner, loaner->start);
  643. spin_lock(chip->mutex);
  644. spin_unlock(loaner->mutex);
  645. wake_up(&chip->wq);
  646. return;
  647. }
  648. shared->erasing = NULL;
  649. shared->writing = NULL;
  650. } else if (shared->erasing == chip && shared->writing != chip) {
  651. /*
  652. * We own the ability to erase without the ability
  653. * to write, which means the erase was suspended
  654. * and some other partition is currently writing.
  655. * Don't let the switch below mess things up since
  656. * we don't have ownership to resume anything.
  657. */
  658. spin_unlock(&shared->lock);
  659. wake_up(&chip->wq);
  660. return;
  661. }
  662. spin_unlock(&shared->lock);
  663. }
  664. switch(chip->oldstate) {
  665. case FL_ERASING:
  666. chip->state = chip->oldstate;
  667. /* What if one interleaved chip has finished and the
  668. other hasn't? The old code would leave the finished
  669. one in READY mode. That's bad, and caused -EROFS
  670. errors to be returned from do_erase_oneblock because
  671. that's the only bit it checked for at the time.
  672. As the state machine appears to explicitly allow
  673. sending the 0x70 (Read Status) command to an erasing
  674. chip and expecting it to be ignored, that's what we
  675. do. */
  676. map_write(map, CMD(0xd0), adr);
  677. map_write(map, CMD(0x70), adr);
  678. chip->oldstate = FL_READY;
  679. chip->state = FL_ERASING;
  680. break;
  681. case FL_XIP_WHILE_ERASING:
  682. chip->state = chip->oldstate;
  683. chip->oldstate = FL_READY;
  684. break;
  685. case FL_READY:
  686. case FL_STATUS:
  687. case FL_JEDEC_QUERY:
  688. /* We should really make set_vpp() count, rather than doing this */
  689. DISABLE_VPP(map);
  690. break;
  691. default:
  692. printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
  693. }
  694. wake_up(&chip->wq);
  695. }
  696. #ifdef CONFIG_MTD_XIP
  697. /*
  698. * No interrupt what so ever can be serviced while the flash isn't in array
  699. * mode. This is ensured by the xip_disable() and xip_enable() functions
  700. * enclosing any code path where the flash is known not to be in array mode.
  701. * And within a XIP disabled code path, only functions marked with __xipram
  702. * may be called and nothing else (it's a good thing to inspect generated
  703. * assembly to make sure inline functions were actually inlined and that gcc
  704. * didn't emit calls to its own support functions). Also configuring MTD CFI
  705. * support to a single buswidth and a single interleave is also recommended.
  706. * Note that not only IRQs are disabled but the preemption count is also
  707. * increased to prevent other locking primitives (namely spin_unlock) from
  708. * decrementing the preempt count to zero and scheduling the CPU away while
  709. * not in array mode.
  710. */
  711. static void xip_disable(struct map_info *map, struct flchip *chip,
  712. unsigned long adr)
  713. {
  714. /* TODO: chips with no XIP use should ignore and return */
  715. (void) map_read(map, adr); /* ensure mmu mapping is up to date */
  716. preempt_disable();
  717. local_irq_disable();
  718. }
  719. static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
  720. unsigned long adr)
  721. {
  722. struct cfi_private *cfi = map->fldrv_priv;
  723. if (chip->state != FL_POINT && chip->state != FL_READY) {
  724. map_write(map, CMD(0xff), adr);
  725. chip->state = FL_READY;
  726. }
  727. (void) map_read(map, adr);
  728. asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
  729. local_irq_enable();
  730. preempt_enable();
  731. }
  732. /*
  733. * When a delay is required for the flash operation to complete, the
  734. * xip_udelay() function is polling for both the given timeout and pending
  735. * (but still masked) hardware interrupts. Whenever there is an interrupt
  736. * pending then the flash erase or write operation is suspended, array mode
  737. * restored and interrupts unmasked. Task scheduling might also happen at that
  738. * point. The CPU eventually returns from the interrupt or the call to
  739. * schedule() and the suspended flash operation is resumed for the remaining
  740. * of the delay period.
  741. *
  742. * Warning: this function _will_ fool interrupt latency tracing tools.
  743. */
  744. static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
  745. unsigned long adr, int usec)
  746. {
  747. struct cfi_private *cfi = map->fldrv_priv;
  748. struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
  749. map_word status, OK = CMD(0x80);
  750. unsigned long suspended, start = xip_currtime();
  751. flstate_t oldstate, newstate;
  752. do {
  753. cpu_relax();
  754. if (xip_irqpending() && cfip &&
  755. ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
  756. (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
  757. (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
  758. /*
  759. * Let's suspend the erase or write operation when
  760. * supported. Note that we currently don't try to
  761. * suspend interleaved chips if there is already
  762. * another operation suspended (imagine what happens
  763. * when one chip was already done with the current
  764. * operation while another chip suspended it, then
  765. * we resume the whole thing at once). Yes, it
  766. * can happen!
  767. */
  768. map_write(map, CMD(0xb0), adr);
  769. map_write(map, CMD(0x70), adr);
  770. usec -= xip_elapsed_since(start);
  771. suspended = xip_currtime();
  772. do {
  773. if (xip_elapsed_since(suspended) > 100000) {
  774. /*
  775. * The chip doesn't want to suspend
  776. * after waiting for 100 msecs.
  777. * This is a critical error but there
  778. * is not much we can do here.
  779. */
  780. return;
  781. }
  782. status = map_read(map, adr);
  783. } while (!map_word_andequal(map, status, OK, OK));
  784. /* Suspend succeeded */
  785. oldstate = chip->state;
  786. if (oldstate == FL_ERASING) {
  787. if (!map_word_bitsset(map, status, CMD(0x40)))
  788. break;
  789. newstate = FL_XIP_WHILE_ERASING;
  790. chip->erase_suspended = 1;
  791. } else {
  792. if (!map_word_bitsset(map, status, CMD(0x04)))
  793. break;
  794. newstate = FL_XIP_WHILE_WRITING;
  795. chip->write_suspended = 1;
  796. }
  797. chip->state = newstate;
  798. map_write(map, CMD(0xff), adr);
  799. (void) map_read(map, adr);
  800. asm volatile (".rep 8; nop; .endr");
  801. local_irq_enable();
  802. preempt_enable();
  803. asm volatile (".rep 8; nop; .endr");
  804. cond_resched();
  805. /*
  806. * We're back. However someone else might have
  807. * decided to go write to the chip if we are in
  808. * a suspended erase state. If so let's wait
  809. * until it's done.
  810. */
  811. preempt_disable();
  812. while (chip->state != newstate) {
  813. DECLARE_WAITQUEUE(wait, current);
  814. set_current_state(TASK_UNINTERRUPTIBLE);
  815. add_wait_queue(&chip->wq, &wait);
  816. preempt_enable();
  817. schedule();
  818. remove_wait_queue(&chip->wq, &wait);
  819. preempt_disable();
  820. }
  821. /* Disallow XIP again */
  822. local_irq_disable();
  823. /* Resume the write or erase operation */
  824. map_write(map, CMD(0xd0), adr);
  825. map_write(map, CMD(0x70), adr);
  826. chip->state = oldstate;
  827. start = xip_currtime();
  828. } else if (usec >= 1000000/HZ) {
  829. /*
  830. * Try to save on CPU power when waiting delay
  831. * is at least a system timer tick period.
  832. * No need to be extremely accurate here.
  833. */
  834. xip_cpu_idle();
  835. }
  836. status = map_read(map, adr);
  837. } while (!map_word_andequal(map, status, OK, OK)
  838. && xip_elapsed_since(start) < usec);
  839. }
  840. #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
  841. /*
  842. * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
  843. * the flash is actively programming or erasing since we have to poll for
  844. * the operation to complete anyway. We can't do that in a generic way with
  845. * a XIP setup so do it before the actual flash operation in this case.
  846. */
  847. #undef INVALIDATE_CACHED_RANGE
  848. #define INVALIDATE_CACHED_RANGE(x...)
  849. #define XIP_INVAL_CACHED_RANGE(map, from, size) \
  850. do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
  851. /*
  852. * Extra notes:
  853. *
  854. * Activating this XIP support changes the way the code works a bit. For
  855. * example the code to suspend the current process when concurrent access
  856. * happens is never executed because xip_udelay() will always return with the
  857. * same chip state as it was entered with. This is why there is no care for
  858. * the presence of add_wait_queue() or schedule() calls from within a couple
  859. * xip_disable()'d areas of code, like in do_erase_oneblock for example.
  860. * The queueing and scheduling are always happening within xip_udelay().
  861. *
  862. * Similarly, get_chip() and put_chip() just happen to always be executed
  863. * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
  864. * is in array mode, therefore never executing many cases therein and not
  865. * causing any problem with XIP.
  866. */
  867. #else
  868. #define xip_disable(map, chip, adr)
  869. #define xip_enable(map, chip, adr)
  870. #define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
  871. #define XIP_INVAL_CACHED_RANGE(x...)
  872. #endif
  873. static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
  874. {
  875. unsigned long cmd_addr;
  876. struct cfi_private *cfi = map->fldrv_priv;
  877. int ret = 0;
  878. adr += chip->start;
  879. /* Ensure cmd read/writes are aligned. */
  880. cmd_addr = adr & ~(map_bankwidth(map)-1);
  881. spin_lock(chip->mutex);
  882. ret = get_chip(map, chip, cmd_addr, FL_POINT);
  883. if (!ret) {
  884. if (chip->state != FL_POINT && chip->state != FL_READY)
  885. map_write(map, CMD(0xff), cmd_addr);
  886. chip->state = FL_POINT;
  887. chip->ref_point_counter++;
  888. }
  889. spin_unlock(chip->mutex);
  890. return ret;
  891. }
  892. static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
  893. {
  894. struct map_info *map = mtd->priv;
  895. struct cfi_private *cfi = map->fldrv_priv;
  896. unsigned long ofs;
  897. int chipnum;
  898. int ret = 0;
  899. if (!map->virt || (from + len > mtd->size))
  900. return -EINVAL;
  901. *mtdbuf = (void *)map->virt + from;
  902. *retlen = 0;
  903. /* Now lock the chip(s) to POINT state */
  904. /* ofs: offset within the first chip that the first read should start */
  905. chipnum = (from >> cfi->chipshift);
  906. ofs = from - (chipnum << cfi->chipshift);
  907. while (len) {
  908. unsigned long thislen;
  909. if (chipnum >= cfi->numchips)
  910. break;
  911. if ((len + ofs -1) >> cfi->chipshift)
  912. thislen = (1<<cfi->chipshift) - ofs;
  913. else
  914. thislen = len;
  915. ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
  916. if (ret)
  917. break;
  918. *retlen += thislen;
  919. len -= thislen;
  920. ofs = 0;
  921. chipnum++;
  922. }
  923. return 0;
  924. }
  925. static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
  926. {
  927. struct map_info *map = mtd->priv;
  928. struct cfi_private *cfi = map->fldrv_priv;
  929. unsigned long ofs;
  930. int chipnum;
  931. /* Now unlock the chip(s) POINT state */
  932. /* ofs: offset within the first chip that the first read should start */
  933. chipnum = (from >> cfi->chipshift);
  934. ofs = from - (chipnum << cfi->chipshift);
  935. while (len) {
  936. unsigned long thislen;
  937. struct flchip *chip;
  938. chip = &cfi->chips[chipnum];
  939. if (chipnum >= cfi->numchips)
  940. break;
  941. if ((len + ofs -1) >> cfi->chipshift)
  942. thislen = (1<<cfi->chipshift) - ofs;
  943. else
  944. thislen = len;
  945. spin_lock(chip->mutex);
  946. if (chip->state == FL_POINT) {
  947. chip->ref_point_counter--;
  948. if(chip->ref_point_counter == 0)
  949. chip->state = FL_READY;
  950. } else
  951. printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
  952. put_chip(map, chip, chip->start);
  953. spin_unlock(chip->mutex);
  954. len -= thislen;
  955. ofs = 0;
  956. chipnum++;
  957. }
  958. }
  959. static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
  960. {
  961. unsigned long cmd_addr;
  962. struct cfi_private *cfi = map->fldrv_priv;
  963. int ret;
  964. adr += chip->start;
  965. /* Ensure cmd read/writes are aligned. */
  966. cmd_addr = adr & ~(map_bankwidth(map)-1);
  967. spin_lock(chip->mutex);
  968. ret = get_chip(map, chip, cmd_addr, FL_READY);
  969. if (ret) {
  970. spin_unlock(chip->mutex);
  971. return ret;
  972. }
  973. if (chip->state != FL_POINT && chip->state != FL_READY) {
  974. map_write(map, CMD(0xff), cmd_addr);
  975. chip->state = FL_READY;
  976. }
  977. map_copy_from(map, buf, adr, len);
  978. put_chip(map, chip, cmd_addr);
  979. spin_unlock(chip->mutex);
  980. return 0;
  981. }
  982. static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  983. {
  984. struct map_info *map = mtd->priv;
  985. struct cfi_private *cfi = map->fldrv_priv;
  986. unsigned long ofs;
  987. int chipnum;
  988. int ret = 0;
  989. /* ofs: offset within the first chip that the first read should start */
  990. chipnum = (from >> cfi->chipshift);
  991. ofs = from - (chipnum << cfi->chipshift);
  992. *retlen = 0;
  993. while (len) {
  994. unsigned long thislen;
  995. if (chipnum >= cfi->numchips)
  996. break;
  997. if ((len + ofs -1) >> cfi->chipshift)
  998. thislen = (1<<cfi->chipshift) - ofs;
  999. else
  1000. thislen = len;
  1001. ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
  1002. if (ret)
  1003. break;
  1004. *retlen += thislen;
  1005. len -= thislen;
  1006. buf += thislen;
  1007. ofs = 0;
  1008. chipnum++;
  1009. }
  1010. return ret;
  1011. }
  1012. #if 0
  1013. static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
  1014. loff_t from, size_t len,
  1015. size_t *retlen,
  1016. u_char *buf,
  1017. int base_offst, int reg_sz)
  1018. {
  1019. struct map_info *map = mtd->priv;
  1020. struct cfi_private *cfi = map->fldrv_priv;
  1021. struct cfi_pri_intelext *extp = cfi->cmdset_priv;
  1022. struct flchip *chip;
  1023. int ofs_factor = cfi->interleave * cfi->device_type;
  1024. int count = len;
  1025. int chip_num, offst;
  1026. int ret;
  1027. chip_num = ((unsigned int)from/reg_sz);
  1028. offst = from - (reg_sz*chip_num)+base_offst;
  1029. while (count) {
  1030. /* Calculate which chip & protection register offset we need */
  1031. if (chip_num >= cfi->numchips)
  1032. goto out;
  1033. chip = &cfi->chips[chip_num];
  1034. spin_lock(chip->mutex);
  1035. ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
  1036. if (ret) {
  1037. spin_unlock(chip->mutex);
  1038. return (len-count)?:ret;
  1039. }
  1040. xip_disable(map, chip, chip->start);
  1041. if (chip->state != FL_JEDEC_QUERY) {
  1042. map_write(map, CMD(0x90), chip->start);
  1043. chip->state = FL_JEDEC_QUERY;
  1044. }
  1045. while (count && ((offst-base_offst) < reg_sz)) {
  1046. *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
  1047. buf++;
  1048. offst++;
  1049. count--;
  1050. }
  1051. xip_enable(map, chip, chip->start);
  1052. put_chip(map, chip, chip->start);
  1053. spin_unlock(chip->mutex);
  1054. /* Move on to the next chip */
  1055. chip_num++;
  1056. offst = base_offst;
  1057. }
  1058. out:
  1059. return len-count;
  1060. }
  1061. static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  1062. {
  1063. struct map_info *map = mtd->priv;
  1064. struct cfi_private *cfi = map->fldrv_priv;
  1065. struct cfi_pri_intelext *extp=cfi->cmdset_priv;
  1066. int base_offst,reg_sz;
  1067. /* Check that we actually have some protection registers */
  1068. if(!extp || !(extp->FeatureSupport&64)){
  1069. printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
  1070. return 0;
  1071. }
  1072. base_offst=(1<<extp->FactProtRegSize);
  1073. reg_sz=(1<<extp->UserProtRegSize);
  1074. return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
  1075. }
  1076. static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
  1077. {
  1078. struct map_info *map = mtd->priv;
  1079. struct cfi_private *cfi = map->fldrv_priv;
  1080. struct cfi_pri_intelext *extp=cfi->cmdset_priv;
  1081. int base_offst,reg_sz;
  1082. /* Check that we actually have some protection registers */
  1083. if(!extp || !(extp->FeatureSupport&64)){
  1084. printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
  1085. return 0;
  1086. }
  1087. base_offst=0;
  1088. reg_sz=(1<<extp->FactProtRegSize);
  1089. return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
  1090. }
  1091. #endif
  1092. static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
  1093. unsigned long adr, map_word datum)
  1094. {
  1095. struct cfi_private *cfi = map->fldrv_priv;
  1096. map_word status, status_OK;
  1097. unsigned long timeo;
  1098. int z, ret=0;
  1099. adr += chip->start;
  1100. /* Let's determine this according to the interleave only once */
  1101. status_OK = CMD(0x80);
  1102. spin_lock(chip->mutex);
  1103. ret = get_chip(map, chip, adr, FL_WRITING);
  1104. if (ret) {
  1105. spin_unlock(chip->mutex);
  1106. return ret;
  1107. }
  1108. XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
  1109. ENABLE_VPP(map);
  1110. xip_disable(map, chip, adr);
  1111. map_write(map, CMD(0x40), adr);
  1112. map_write(map, datum, adr);
  1113. chip->state = FL_WRITING;
  1114. spin_unlock(chip->mutex);
  1115. INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
  1116. UDELAY(map, chip, adr, chip->word_write_time);
  1117. spin_lock(chip->mutex);
  1118. timeo = jiffies + (HZ/2);
  1119. z = 0;
  1120. for (;;) {
  1121. if (chip->state != FL_WRITING) {
  1122. /* Someone's suspended the write. Sleep */
  1123. DECLARE_WAITQUEUE(wait, current);
  1124. set_current_state(TASK_UNINTERRUPTIBLE);
  1125. add_wait_queue(&chip->wq, &wait);
  1126. spin_unlock(chip->mutex);
  1127. schedule();
  1128. remove_wait_queue(&chip->wq, &wait);
  1129. timeo = jiffies + (HZ / 2); /* FIXME */
  1130. spin_lock(chip->mutex);
  1131. continue;
  1132. }
  1133. status = map_read(map, adr);
  1134. if (map_word_andequal(map, status, status_OK, status_OK))
  1135. break;
  1136. /* OK Still waiting */
  1137. if (time_after(jiffies, timeo)) {
  1138. chip->state = FL_STATUS;
  1139. xip_enable(map, chip, adr);
  1140. printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
  1141. ret = -EIO;
  1142. goto out;
  1143. }
  1144. /* Latency issues. Drop the lock, wait a while and retry */
  1145. spin_unlock(chip->mutex);
  1146. z++;
  1147. UDELAY(map, chip, adr, 1);
  1148. spin_lock(chip->mutex);
  1149. }
  1150. if (!z) {
  1151. chip->word_write_time--;
  1152. if (!chip->word_write_time)
  1153. chip->word_write_time++;
  1154. }
  1155. if (z > 1)
  1156. chip->word_write_time++;
  1157. /* Done and happy. */
  1158. chip->state = FL_STATUS;
  1159. /* check for lock bit */
  1160. if (map_word_bitsset(map, status, CMD(0x02))) {
  1161. /* clear status */
  1162. map_write(map, CMD(0x50), adr);
  1163. /* put back into read status register mode */
  1164. map_write(map, CMD(0x70), adr);
  1165. ret = -EROFS;
  1166. }
  1167. xip_enable(map, chip, adr);
  1168. out: put_chip(map, chip, adr);
  1169. spin_unlock(chip->mutex);
  1170. return ret;
  1171. }
  1172. static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
  1173. {
  1174. struct map_info *map = mtd->priv;
  1175. struct cfi_private *cfi = map->fldrv_priv;
  1176. int ret = 0;
  1177. int chipnum;
  1178. unsigned long ofs;
  1179. *retlen = 0;
  1180. if (!len)
  1181. return 0;
  1182. chipnum = to >> cfi->chipshift;
  1183. ofs = to - (chipnum << cfi->chipshift);
  1184. /* If it's not bus-aligned, do the first byte write */
  1185. if (ofs & (map_bankwidth(map)-1)) {
  1186. unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
  1187. int gap = ofs - bus_ofs;
  1188. int n;
  1189. map_word datum;
  1190. n = min_t(int, len, map_bankwidth(map)-gap);
  1191. datum = map_word_ff(map);
  1192. datum = map_word_load_partial(map, datum, buf, gap, n);
  1193. ret = do_write_oneword(map, &cfi->chips[chipnum],
  1194. bus_ofs, datum);
  1195. if (ret)
  1196. return ret;
  1197. len -= n;
  1198. ofs += n;
  1199. buf += n;
  1200. (*retlen) += n;
  1201. if (ofs >> cfi->chipshift) {
  1202. chipnum ++;
  1203. ofs = 0;
  1204. if (chipnum == cfi->numchips)
  1205. return 0;
  1206. }
  1207. }
  1208. while(len >= map_bankwidth(map)) {
  1209. map_word datum = map_word_load(map, buf);
  1210. ret = do_write_oneword(map, &cfi->chips[chipnum],
  1211. ofs, datum);
  1212. if (ret)
  1213. return ret;
  1214. ofs += map_bankwidth(map);
  1215. buf += map_bankwidth(map);
  1216. (*retlen) += map_bankwidth(map);
  1217. len -= map_bankwidth(map);
  1218. if (ofs >> cfi->chipshift) {
  1219. chipnum ++;
  1220. ofs = 0;
  1221. if (chipnum == cfi->numchips)
  1222. return 0;
  1223. }
  1224. }
  1225. if (len & (map_bankwidth(map)-1)) {
  1226. map_word datum;
  1227. datum = map_word_ff(map);
  1228. datum = map_word_load_partial(map, datum, buf, 0, len);
  1229. ret = do_write_oneword(map, &cfi->chips[chipnum],
  1230. ofs, datum);
  1231. if (ret)
  1232. return ret;
  1233. (*retlen) += len;
  1234. }
  1235. return 0;
  1236. }
  1237. static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
  1238. unsigned long adr, const u_char *buf, int len)
  1239. {
  1240. struct cfi_private *cfi = map->fldrv_priv;
  1241. map_word status, status_OK;
  1242. unsigned long cmd_adr, timeo;
  1243. int wbufsize, z, ret=0, bytes, words;
  1244. wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  1245. adr += chip->start;
  1246. cmd_adr = adr & ~(wbufsize-1);
  1247. /* Let's determine this according to the interleave only once */
  1248. status_OK = CMD(0x80);
  1249. spin_lock(chip->mutex);
  1250. ret = get_chip(map, chip, cmd_adr, FL_WRITING);
  1251. if (ret) {
  1252. spin_unlock(chip->mutex);
  1253. return ret;
  1254. }
  1255. XIP_INVAL_CACHED_RANGE(map, adr, len);
  1256. ENABLE_VPP(map);
  1257. xip_disable(map, chip, cmd_adr);
  1258. /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
  1259. [...], the device will not accept any more Write to Buffer commands".
  1260. So we must check here and reset those bits if they're set. Otherwise
  1261. we're just pissing in the wind */
  1262. if (chip->state != FL_STATUS)
  1263. map_write(map, CMD(0x70), cmd_adr);
  1264. status = map_read(map, cmd_adr);
  1265. if (map_word_bitsset(map, status, CMD(0x30))) {
  1266. xip_enable(map, chip, cmd_adr);
  1267. printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
  1268. xip_disable(map, chip, cmd_adr);
  1269. map_write(map, CMD(0x50), cmd_adr);
  1270. map_write(map, CMD(0x70), cmd_adr);
  1271. }
  1272. chip->state = FL_WRITING_TO_BUFFER;
  1273. z = 0;
  1274. for (;;) {
  1275. map_write(map, CMD(0xe8), cmd_adr);
  1276. status = map_read(map, cmd_adr);
  1277. if (map_word_andequal(map, status, status_OK, status_OK))
  1278. break;
  1279. spin_unlock(chip->mutex);
  1280. UDELAY(map, chip, cmd_adr, 1);
  1281. spin_lock(chip->mutex);
  1282. if (++z > 20) {
  1283. /* Argh. Not ready for write to buffer */
  1284. map_word Xstatus;
  1285. map_write(map, CMD(0x70), cmd_adr);
  1286. chip->state = FL_STATUS;
  1287. Xstatus = map_read(map, cmd_adr);
  1288. /* Odd. Clear status bits */
  1289. map_write(map, CMD(0x50), cmd_adr);
  1290. map_write(map, CMD(0x70), cmd_adr);
  1291. xip_enable(map, chip, cmd_adr);
  1292. printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
  1293. status.x[0], Xstatus.x[0]);
  1294. ret = -EIO;
  1295. goto out;
  1296. }
  1297. }
  1298. /* Write length of data to come */
  1299. bytes = len & (map_bankwidth(map)-1);
  1300. words = len / map_bankwidth(map);
  1301. map_write(map, CMD(words - !bytes), cmd_adr );
  1302. /* Write data */
  1303. z = 0;
  1304. while(z < words * map_bankwidth(map)) {
  1305. map_word datum = map_word_load(map, buf);
  1306. map_write(map, datum, adr+z);
  1307. z += map_bankwidth(map);
  1308. buf += map_bankwidth(map);
  1309. }
  1310. if (bytes) {
  1311. map_word datum;
  1312. datum = map_word_ff(map);
  1313. datum = map_word_load_partial(map, datum, buf, 0, bytes);
  1314. map_write(map, datum, adr+z);
  1315. }
  1316. /* GO GO GO */
  1317. map_write(map, CMD(0xd0), cmd_adr);
  1318. chip->state = FL_WRITING;
  1319. spin_unlock(chip->mutex);
  1320. INVALIDATE_CACHED_RANGE(map, adr, len);
  1321. UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
  1322. spin_lock(chip->mutex);
  1323. timeo = jiffies + (HZ/2);
  1324. z = 0;
  1325. for (;;) {
  1326. if (chip->state != FL_WRITING) {
  1327. /* Someone's suspended the write. Sleep */
  1328. DECLARE_WAITQUEUE(wait, current);
  1329. set_current_state(TASK_UNINTERRUPTIBLE);
  1330. add_wait_queue(&chip->wq, &wait);
  1331. spin_unlock(chip->mutex);
  1332. schedule();
  1333. remove_wait_queue(&chip->wq, &wait);
  1334. timeo = jiffies + (HZ / 2); /* FIXME */
  1335. spin_lock(chip->mutex);
  1336. continue;
  1337. }
  1338. status = map_read(map, cmd_adr);
  1339. if (map_word_andequal(map, status, status_OK, status_OK))
  1340. break;
  1341. /* OK Still waiting */
  1342. if (time_after(jiffies, timeo)) {
  1343. chip->state = FL_STATUS;
  1344. xip_enable(map, chip, cmd_adr);
  1345. printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
  1346. ret = -EIO;
  1347. goto out;
  1348. }
  1349. /* Latency issues. Drop the lock, wait a while and retry */
  1350. spin_unlock(chip->mutex);
  1351. UDELAY(map, chip, cmd_adr, 1);
  1352. z++;
  1353. spin_lock(chip->mutex);
  1354. }
  1355. if (!z) {
  1356. chip->buffer_write_time--;
  1357. if (!chip->buffer_write_time)
  1358. chip->buffer_write_time++;
  1359. }
  1360. if (z > 1)
  1361. chip->buffer_write_time++;
  1362. /* Done and happy. */
  1363. chip->state = FL_STATUS;
  1364. /* check for lock bit */
  1365. if (map_word_bitsset(map, status, CMD(0x02))) {
  1366. /* clear status */
  1367. map_write(map, CMD(0x50), cmd_adr);
  1368. /* put back into read status register mode */
  1369. map_write(map, CMD(0x70), adr);
  1370. ret = -EROFS;
  1371. }
  1372. xip_enable(map, chip, cmd_adr);
  1373. out: put_chip(map, chip, cmd_adr);
  1374. spin_unlock(chip->mutex);
  1375. return ret;
  1376. }
  1377. static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
  1378. size_t len, size_t *retlen, const u_char *buf)
  1379. {
  1380. struct map_info *map = mtd->priv;
  1381. struct cfi_private *cfi = map->fldrv_priv;
  1382. int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
  1383. int ret = 0;
  1384. int chipnum;
  1385. unsigned long ofs;
  1386. *retlen = 0;
  1387. if (!len)
  1388. return 0;
  1389. chipnum = to >> cfi->chipshift;
  1390. ofs = to - (chipnum << cfi->chipshift);
  1391. /* If it's not bus-aligned, do the first word write */
  1392. if (ofs & (map_bankwidth(map)-1)) {
  1393. size_t local_len = (-ofs)&(map_bankwidth(map)-1);
  1394. if (local_len > len)
  1395. local_len = len;
  1396. ret = cfi_intelext_write_words(mtd, to, local_len,
  1397. retlen, buf);
  1398. if (ret)
  1399. return ret;
  1400. ofs += local_len;
  1401. buf += local_len;
  1402. len -= local_len;
  1403. if (ofs >> cfi->chipshift) {
  1404. chipnum ++;
  1405. ofs = 0;
  1406. if (chipnum == cfi->numchips)
  1407. return 0;
  1408. }
  1409. }
  1410. while(len) {
  1411. /* We must not cross write block boundaries */
  1412. int size = wbufsize - (ofs & (wbufsize-1));
  1413. if (size > len)
  1414. size = len;
  1415. ret = do_write_buffer(map, &cfi->chips[chipnum],
  1416. ofs, buf, size);
  1417. if (ret)
  1418. return ret;
  1419. ofs += size;
  1420. buf += size;
  1421. (*retlen) += size;
  1422. len -= size;
  1423. if (ofs >> cfi->chipshift) {
  1424. chipnum ++;
  1425. ofs = 0;
  1426. if (chipnum == cfi->numchips)
  1427. return 0;
  1428. }
  1429. }
  1430. return 0;
  1431. }
  1432. static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
  1433. unsigned long adr, int len, void *thunk)
  1434. {
  1435. struct cfi_private *cfi = map->fldrv_priv;
  1436. map_word status, status_OK;
  1437. unsigned long timeo;
  1438. int retries = 3;
  1439. DECLARE_WAITQUEUE(wait, current);
  1440. int ret = 0;
  1441. adr += chip->start;
  1442. /* Let's determine this according to the interleave only once */
  1443. status_OK = CMD(0x80);
  1444. retry:
  1445. spin_lock(chip->mutex);
  1446. ret = get_chip(map, chip, adr, FL_ERASING);
  1447. if (ret) {
  1448. spin_unlock(chip->mutex);
  1449. return ret;
  1450. }
  1451. XIP_INVAL_CACHED_RANGE(map, adr, len);
  1452. ENABLE_VPP(map);
  1453. xip_disable(map, chip, adr);
  1454. /* Clear the status register first */
  1455. map_write(map, CMD(0x50), adr);
  1456. /* Now erase */
  1457. map_write(map, CMD(0x20), adr);
  1458. map_write(map, CMD(0xD0), adr);
  1459. chip->state = FL_ERASING;
  1460. chip->erase_suspended = 0;
  1461. spin_unlock(chip->mutex);
  1462. INVALIDATE_CACHED_RANGE(map, adr, len);
  1463. UDELAY(map, chip, adr, chip->erase_time*1000/2);
  1464. spin_lock(chip->mutex);
  1465. /* FIXME. Use a timer to check this, and return immediately. */
  1466. /* Once the state machine's known to be working I'll do that */
  1467. timeo = jiffies + (HZ*20);
  1468. for (;;) {
  1469. if (chip->state != FL_ERASING) {
  1470. /* Someone's suspended the erase. Sleep */
  1471. set_current_state(TASK_UNINTERRUPTIBLE);
  1472. add_wait_queue(&chip->wq, &wait);
  1473. spin_unlock(chip->mutex);
  1474. schedule();
  1475. remove_wait_queue(&chip->wq, &wait);
  1476. spin_lock(chip->mutex);
  1477. continue;
  1478. }
  1479. if (chip->erase_suspended) {
  1480. /* This erase was suspended and resumed.
  1481. Adjust the timeout */
  1482. timeo = jiffies + (HZ*20); /* FIXME */
  1483. chip->erase_suspended = 0;
  1484. }
  1485. status = map_read(map, adr);
  1486. if (map_word_andequal(map, status, status_OK, status_OK))
  1487. break;
  1488. /* OK Still waiting */
  1489. if (time_after(jiffies, timeo)) {
  1490. map_word Xstatus;
  1491. map_write(map, CMD(0x70), adr);
  1492. chip->state = FL_STATUS;
  1493. Xstatus = map_read(map, adr);
  1494. /* Clear status bits */
  1495. map_write(map, CMD(0x50), adr);
  1496. map_write(map, CMD(0x70), adr);
  1497. xip_enable(map, chip, adr);
  1498. printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
  1499. adr, status.x[0], Xstatus.x[0]);
  1500. ret = -EIO;
  1501. goto out;
  1502. }
  1503. /* Latency issues. Drop the lock, wait a while and retry */
  1504. spin_unlock(chip->mutex);
  1505. UDELAY(map, chip, adr, 1000000/HZ);
  1506. spin_lock(chip->mutex);
  1507. }
  1508. /* We've broken this before. It doesn't hurt to be safe */
  1509. map_write(map, CMD(0x70), adr);
  1510. chip->state = FL_STATUS;
  1511. status = map_read(map, adr);
  1512. /* check for lock bit */
  1513. if (map_word_bitsset(map, status, CMD(0x3a))) {
  1514. unsigned char chipstatus;
  1515. /* Reset the error bits */
  1516. map_write(map, CMD(0x50), adr);
  1517. map_write(map, CMD(0x70), adr);
  1518. xip_enable(map, chip, adr);
  1519. chipstatus = status.x[0];
  1520. if (!map_word_equal(map, status, CMD(chipstatus))) {
  1521. int i, w;
  1522. for (w=0; w<map_words(map); w++) {
  1523. for (i = 0; i<cfi_interleave(cfi); i++) {
  1524. chipstatus |= status.x[w] >> (cfi->device_type * 8);
  1525. }
  1526. }
  1527. printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
  1528. status.x[0], chipstatus);
  1529. }
  1530. if ((chipstatus & 0x30) == 0x30) {
  1531. printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
  1532. ret = -EIO;
  1533. } else if (chipstatus & 0x02) {
  1534. /* Protection bit set */
  1535. ret = -EROFS;
  1536. } else if (chipstatus & 0x8) {
  1537. /* Voltage */
  1538. printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
  1539. ret = -EIO;
  1540. } else if (chipstatus & 0x20) {
  1541. if (retries--) {
  1542. printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
  1543. timeo = jiffies + HZ;
  1544. put_chip(map, chip, adr);
  1545. spin_unlock(chip->mutex);
  1546. goto retry;
  1547. }
  1548. printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
  1549. ret = -EIO;
  1550. }
  1551. } else {
  1552. xip_enable(map, chip, adr);
  1553. ret = 0;
  1554. }
  1555. out: put_chip(map, chip, adr);
  1556. spin_unlock(chip->mutex);
  1557. return ret;
  1558. }
  1559. int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
  1560. {
  1561. unsigned long ofs, len;
  1562. int ret;
  1563. ofs = instr->addr;
  1564. len = instr->len;
  1565. ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
  1566. if (ret)
  1567. return ret;
  1568. instr->state = MTD_ERASE_DONE;
  1569. mtd_erase_callback(instr);
  1570. return 0;
  1571. }
  1572. static void cfi_intelext_sync (struct mtd_info *mtd)
  1573. {
  1574. struct map_info *map = mtd->priv;
  1575. struct cfi_private *cfi = map->fldrv_priv;
  1576. int i;
  1577. struct flchip *chip;
  1578. int ret = 0;
  1579. for (i=0; !ret && i<cfi->numchips; i++) {
  1580. chip = &cfi->chips[i];
  1581. spin_lock(chip->mutex);
  1582. ret = get_chip(map, chip, chip->start, FL_SYNCING);
  1583. if (!ret) {
  1584. chip->oldstate = chip->state;
  1585. chip->state = FL_SYNCING;
  1586. /* No need to wake_up() on this state change -
  1587. * as the whole point is that nobody can do anything
  1588. * with the chip now anyway.
  1589. */
  1590. }
  1591. spin_unlock(chip->mutex);
  1592. }
  1593. /* Unlock the chips again */
  1594. for (i--; i >=0; i--) {
  1595. chip = &cfi->chips[i];
  1596. spin_lock(chip->mutex);
  1597. if (chip->state == FL_SYNCING) {
  1598. chip->state = chip->oldstate;
  1599. wake_up(&chip->wq);
  1600. }
  1601. spin_unlock(chip->mutex);
  1602. }
  1603. }
  1604. #ifdef DEBUG_LOCK_BITS
  1605. static int __xipram do_printlockstatus_oneblock(struct map_info *map,
  1606. struct flchip *chip,
  1607. unsigned long adr,
  1608. int len, void *thunk)
  1609. {
  1610. struct cfi_private *cfi = map->fldrv_priv;
  1611. int status, ofs_factor = cfi->interleave * cfi->device_type;
  1612. xip_disable(map, chip, adr+(2*ofs_factor));
  1613. cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
  1614. chip->state = FL_JEDEC_QUERY;
  1615. status = cfi_read_query(map, adr+(2*ofs_factor));
  1616. xip_enable(map, chip, 0);
  1617. printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
  1618. adr, status);
  1619. return 0;
  1620. }
  1621. #endif
  1622. #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
  1623. #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
  1624. static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
  1625. unsigned long adr, int len, void *thunk)
  1626. {
  1627. struct cfi_private *cfi = map->fldrv_priv;
  1628. map_word status, status_OK;
  1629. unsigned long timeo = jiffies + HZ;
  1630. int ret;
  1631. adr += chip->start;
  1632. /* Let's determine this according to the interleave only once */
  1633. status_OK = CMD(0x80);
  1634. spin_lock(chip->mutex);
  1635. ret = get_chip(map, chip, adr, FL_LOCKING);
  1636. if (ret) {
  1637. spin_unlock(chip->mutex);
  1638. return ret;
  1639. }
  1640. ENABLE_VPP(map);
  1641. xip_disable(map, chip, adr);
  1642. map_write(map, CMD(0x60), adr);
  1643. if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
  1644. map_write(map, CMD(0x01), adr);
  1645. chip->state = FL_LOCKING;
  1646. } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
  1647. map_write(map, CMD(0xD0), adr);
  1648. chip->state = FL_UNLOCKING;
  1649. } else
  1650. BUG();
  1651. spin_unlock(chip->mutex);
  1652. UDELAY(map, chip, adr, 1000000/HZ);
  1653. spin_lock(chip->mutex);
  1654. /* FIXME. Use a timer to check this, and return immediately. */
  1655. /* Once the state machine's known to be working I'll do that */
  1656. timeo = jiffies + (HZ*20);
  1657. for (;;) {
  1658. status = map_read(map, adr);
  1659. if (map_word_andequal(map, status, status_OK, status_OK))
  1660. break;
  1661. /* OK Still waiting */
  1662. if (time_after(jiffies, timeo)) {
  1663. map_word Xstatus;
  1664. map_write(map, CMD(0x70), adr);
  1665. chip->state = FL_STATUS;
  1666. Xstatus = map_read(map, adr);
  1667. xip_enable(map, chip, adr);
  1668. printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
  1669. status.x[0], Xstatus.x[0]);
  1670. put_chip(map, chip, adr);
  1671. spin_unlock(chip->mutex);
  1672. return -EIO;
  1673. }
  1674. /* Latency issues. Drop the lock, wait a while and retry */
  1675. spin_unlock(chip->mutex);
  1676. UDELAY(map, chip, adr, 1);
  1677. spin_lock(chip->mutex);
  1678. }
  1679. /* Done and happy. */
  1680. chip->state = FL_STATUS;
  1681. xip_enable(map, chip, adr);
  1682. put_chip(map, chip, adr);
  1683. spin_unlock(chip->mutex);
  1684. return 0;
  1685. }
  1686. static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
  1687. {
  1688. int ret;
  1689. #ifdef DEBUG_LOCK_BITS
  1690. printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
  1691. __FUNCTION__, ofs, len);
  1692. cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
  1693. ofs, len, 0);
  1694. #endif
  1695. ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
  1696. ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
  1697. #ifdef DEBUG_LOCK_BITS
  1698. printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
  1699. __FUNCTION__, ret);
  1700. cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
  1701. ofs, len, 0);
  1702. #endif
  1703. return ret;
  1704. }
  1705. static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
  1706. {
  1707. int ret;
  1708. #ifdef DEBUG_LOCK_BITS
  1709. printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
  1710. __FUNCTION__, ofs, len);
  1711. cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
  1712. ofs, len, 0);
  1713. #endif
  1714. ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
  1715. ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
  1716. #ifdef DEBUG_LOCK_BITS
  1717. printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
  1718. __FUNCTION__, ret);
  1719. cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
  1720. ofs, len, 0);
  1721. #endif
  1722. return ret;
  1723. }
  1724. static int cfi_intelext_suspend(struct mtd_info *mtd)
  1725. {
  1726. struct map_info *map = mtd->priv;
  1727. struct cfi_private *cfi = map->fldrv_priv;
  1728. int i;
  1729. struct flchip *chip;
  1730. int ret = 0;
  1731. for (i=0; !ret && i<cfi->numchips; i++) {
  1732. chip = &cfi->chips[i];
  1733. spin_lock(chip->mutex);
  1734. switch (chip->state) {
  1735. case FL_READY:
  1736. case FL_STATUS:
  1737. case FL_CFI_QUERY:
  1738. case FL_JEDEC_QUERY:
  1739. if (chip->oldstate == FL_READY) {
  1740. chip->oldstate = chip->state;
  1741. chip->state = FL_PM_SUSPENDED;
  1742. /* No need to wake_up() on this state change -
  1743. * as the whole point is that nobody can do anything
  1744. * with the chip now anyway.
  1745. */
  1746. } else {
  1747. /* There seems to be an operation pending. We must wait for it. */
  1748. printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
  1749. ret = -EAGAIN;
  1750. }
  1751. break;
  1752. default:
  1753. /* Should we actually wait? Once upon a time these routines weren't
  1754. allowed to. Or should we return -EAGAIN, because the upper layers
  1755. ought to have already shut down anything which was using the device
  1756. anyway? The latter for now. */
  1757. printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
  1758. ret = -EAGAIN;
  1759. case FL_PM_SUSPENDED:
  1760. break;
  1761. }
  1762. spin_unlock(chip->mutex);
  1763. }
  1764. /* Unlock the chips again */
  1765. if (ret) {
  1766. for (i--; i >=0; i--) {
  1767. chip = &cfi->chips[i];
  1768. spin_lock(chip->mutex);
  1769. if (chip->state == FL_PM_SUSPENDED) {
  1770. /* No need to force it into a known state here,
  1771. because we're returning failure, and it didn't
  1772. get power cycled */
  1773. chip->state = chip->oldstate;
  1774. chip->oldstate = FL_READY;
  1775. wake_up(&chip->wq);
  1776. }
  1777. spin_unlock(chip->mutex);
  1778. }
  1779. }
  1780. return ret;
  1781. }
  1782. static void cfi_intelext_resume(struct mtd_info *mtd)
  1783. {
  1784. struct map_info *map = mtd->priv;
  1785. struct cfi_private *cfi = map->fldrv_priv;
  1786. int i;
  1787. struct flchip *chip;
  1788. for (i=0; i<cfi->numchips; i++) {
  1789. chip = &cfi->chips[i];
  1790. spin_lock(chip->mutex);
  1791. /* Go to known state. Chip may have been power cycled */
  1792. if (chip->state == FL_PM_SUSPENDED) {
  1793. map_write(map, CMD(0xFF), cfi->chips[i].start);
  1794. chip->oldstate = chip->state = FL_READY;
  1795. wake_up(&chip->wq);
  1796. }
  1797. spin_unlock(chip->mutex);
  1798. }
  1799. }
  1800. static void cfi_intelext_destroy(struct mtd_info *mtd)
  1801. {
  1802. struct map_info *map = mtd->priv;
  1803. struct cfi_private *cfi = map->fldrv_priv;
  1804. kfree(cfi->cmdset_priv);
  1805. kfree(cfi->cfiq);
  1806. kfree(cfi->chips[0].priv);
  1807. kfree(cfi);
  1808. kfree(mtd->eraseregions);
  1809. }
  1810. static char im_name_1[]="cfi_cmdset_0001";
  1811. static char im_name_3[]="cfi_cmdset_0003";
  1812. static int __init cfi_intelext_init(void)
  1813. {
  1814. inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
  1815. inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
  1816. return 0;
  1817. }
  1818. static void __exit cfi_intelext_exit(void)
  1819. {
  1820. inter_module_unregister(im_name_1);
  1821. inter_module_unregister(im_name_3);
  1822. }
  1823. module_init(cfi_intelext_init);
  1824. module_exit(cfi_intelext_exit);
  1825. MODULE_LICENSE("GPL");
  1826. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
  1827. MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");