omap2.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536
  1. /*
  2. * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
  3. * Copyright © 2004 Micron Technology Inc.
  4. * Copyright © 2004 David Brownell
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/platform_device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/delay.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/sched.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/omap-dma.h>
  22. #include <linux/io.h>
  23. #include <linux/slab.h>
  24. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  25. #include <linux/bch.h>
  26. #endif
  27. #include <plat/dma.h>
  28. #include <plat/gpmc.h>
  29. #include <linux/platform_data/mtd-nand-omap2.h>
  30. #define DRIVER_NAME "omap2-nand"
  31. #define OMAP_NAND_TIMEOUT_MS 5000
  32. #define NAND_Ecc_P1e (1 << 0)
  33. #define NAND_Ecc_P2e (1 << 1)
  34. #define NAND_Ecc_P4e (1 << 2)
  35. #define NAND_Ecc_P8e (1 << 3)
  36. #define NAND_Ecc_P16e (1 << 4)
  37. #define NAND_Ecc_P32e (1 << 5)
  38. #define NAND_Ecc_P64e (1 << 6)
  39. #define NAND_Ecc_P128e (1 << 7)
  40. #define NAND_Ecc_P256e (1 << 8)
  41. #define NAND_Ecc_P512e (1 << 9)
  42. #define NAND_Ecc_P1024e (1 << 10)
  43. #define NAND_Ecc_P2048e (1 << 11)
  44. #define NAND_Ecc_P1o (1 << 16)
  45. #define NAND_Ecc_P2o (1 << 17)
  46. #define NAND_Ecc_P4o (1 << 18)
  47. #define NAND_Ecc_P8o (1 << 19)
  48. #define NAND_Ecc_P16o (1 << 20)
  49. #define NAND_Ecc_P32o (1 << 21)
  50. #define NAND_Ecc_P64o (1 << 22)
  51. #define NAND_Ecc_P128o (1 << 23)
  52. #define NAND_Ecc_P256o (1 << 24)
  53. #define NAND_Ecc_P512o (1 << 25)
  54. #define NAND_Ecc_P1024o (1 << 26)
  55. #define NAND_Ecc_P2048o (1 << 27)
  56. #define TF(value) (value ? 1 : 0)
  57. #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
  58. #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
  59. #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
  60. #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
  61. #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
  62. #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
  63. #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
  64. #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
  65. #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
  66. #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
  67. #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
  68. #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
  69. #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
  70. #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
  71. #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
  72. #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
  73. #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
  74. #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
  75. #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
  76. #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
  77. #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
  78. #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
  79. #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
  80. #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
  81. #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
  82. #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
  83. #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
  84. #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
  85. #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
  86. #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
  87. #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
  88. #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
  89. #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
  90. #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
  91. #define PREFETCH_CONFIG1_CS_SHIFT 24
  92. #define ECC_CONFIG_CS_SHIFT 1
  93. #define CS_MASK 0x7
  94. #define ENABLE_PREFETCH (0x1 << 7)
  95. #define DMA_MPU_MODE_SHIFT 2
  96. #define ECCSIZE1_SHIFT 22
  97. #define ECC1RESULTSIZE 0x1
  98. #define ECCCLEAR 0x100
  99. #define ECC1 0x1
  100. #define OMAP24XX_DMA_GPMC 4
  101. /* oob info generated runtime depending on ecc algorithm and layout selected */
  102. static struct nand_ecclayout omap_oobinfo;
  103. /* Define some generic bad / good block scan pattern which are used
  104. * while scanning a device for factory marked good / bad blocks
  105. */
  106. static uint8_t scan_ff_pattern[] = { 0xff };
  107. static struct nand_bbt_descr bb_descrip_flashbased = {
  108. .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
  109. .offs = 0,
  110. .len = 1,
  111. .pattern = scan_ff_pattern,
  112. };
  113. struct omap_nand_info {
  114. struct nand_hw_control controller;
  115. struct omap_nand_platform_data *pdata;
  116. struct mtd_info mtd;
  117. struct nand_chip nand;
  118. struct platform_device *pdev;
  119. int gpmc_cs;
  120. unsigned long phys_base;
  121. unsigned long mem_size;
  122. struct completion comp;
  123. struct dma_chan *dma;
  124. int gpmc_irq_fifo;
  125. int gpmc_irq_count;
  126. enum {
  127. OMAP_NAND_IO_READ = 0, /* read */
  128. OMAP_NAND_IO_WRITE, /* write */
  129. } iomode;
  130. u_char *buf;
  131. int buf_len;
  132. struct gpmc_nand_regs reg;
  133. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  134. struct bch_control *bch;
  135. struct nand_ecclayout ecclayout;
  136. #endif
  137. };
  138. /**
  139. * omap_prefetch_enable - configures and starts prefetch transfer
  140. * @cs: cs (chip select) number
  141. * @fifo_th: fifo threshold to be used for read/ write
  142. * @dma_mode: dma mode enable (1) or disable (0)
  143. * @u32_count: number of bytes to be transferred
  144. * @is_write: prefetch read(0) or write post(1) mode
  145. */
  146. static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
  147. unsigned int u32_count, int is_write, struct omap_nand_info *info)
  148. {
  149. u32 val;
  150. if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
  151. return -1;
  152. if (readl(info->reg.gpmc_prefetch_control))
  153. return -EBUSY;
  154. /* Set the amount of bytes to be prefetched */
  155. writel(u32_count, info->reg.gpmc_prefetch_config2);
  156. /* Set dma/mpu mode, the prefetch read / post write and
  157. * enable the engine. Set which cs is has requested for.
  158. */
  159. val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
  160. PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
  161. (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
  162. writel(val, info->reg.gpmc_prefetch_config1);
  163. /* Start the prefetch engine */
  164. writel(0x1, info->reg.gpmc_prefetch_control);
  165. return 0;
  166. }
  167. /**
  168. * omap_prefetch_reset - disables and stops the prefetch engine
  169. */
  170. static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
  171. {
  172. u32 config1;
  173. /* check if the same module/cs is trying to reset */
  174. config1 = readl(info->reg.gpmc_prefetch_config1);
  175. if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
  176. return -EINVAL;
  177. /* Stop the PFPW engine */
  178. writel(0x0, info->reg.gpmc_prefetch_control);
  179. /* Reset/disable the PFPW engine */
  180. writel(0x0, info->reg.gpmc_prefetch_config1);
  181. return 0;
  182. }
  183. /**
  184. * omap_hwcontrol - hardware specific access to control-lines
  185. * @mtd: MTD device structure
  186. * @cmd: command to device
  187. * @ctrl:
  188. * NAND_NCE: bit 0 -> don't care
  189. * NAND_CLE: bit 1 -> Command Latch
  190. * NAND_ALE: bit 2 -> Address Latch
  191. *
  192. * NOTE: boards may use different bits for these!!
  193. */
  194. static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
  195. {
  196. struct omap_nand_info *info = container_of(mtd,
  197. struct omap_nand_info, mtd);
  198. if (cmd != NAND_CMD_NONE) {
  199. if (ctrl & NAND_CLE)
  200. writeb(cmd, info->reg.gpmc_nand_command);
  201. else if (ctrl & NAND_ALE)
  202. writeb(cmd, info->reg.gpmc_nand_address);
  203. else /* NAND_NCE */
  204. writeb(cmd, info->reg.gpmc_nand_data);
  205. }
  206. }
  207. /**
  208. * omap_read_buf8 - read data from NAND controller into buffer
  209. * @mtd: MTD device structure
  210. * @buf: buffer to store date
  211. * @len: number of bytes to read
  212. */
  213. static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
  214. {
  215. struct nand_chip *nand = mtd->priv;
  216. ioread8_rep(nand->IO_ADDR_R, buf, len);
  217. }
  218. /**
  219. * omap_write_buf8 - write buffer to NAND controller
  220. * @mtd: MTD device structure
  221. * @buf: data buffer
  222. * @len: number of bytes to write
  223. */
  224. static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
  225. {
  226. struct omap_nand_info *info = container_of(mtd,
  227. struct omap_nand_info, mtd);
  228. u_char *p = (u_char *)buf;
  229. u32 status = 0;
  230. while (len--) {
  231. iowrite8(*p++, info->nand.IO_ADDR_W);
  232. /* wait until buffer is available for write */
  233. do {
  234. status = readl(info->reg.gpmc_status) &
  235. GPMC_STATUS_BUFF_EMPTY;
  236. } while (!status);
  237. }
  238. }
  239. /**
  240. * omap_read_buf16 - read data from NAND controller into buffer
  241. * @mtd: MTD device structure
  242. * @buf: buffer to store date
  243. * @len: number of bytes to read
  244. */
  245. static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
  246. {
  247. struct nand_chip *nand = mtd->priv;
  248. ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
  249. }
  250. /**
  251. * omap_write_buf16 - write buffer to NAND controller
  252. * @mtd: MTD device structure
  253. * @buf: data buffer
  254. * @len: number of bytes to write
  255. */
  256. static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
  257. {
  258. struct omap_nand_info *info = container_of(mtd,
  259. struct omap_nand_info, mtd);
  260. u16 *p = (u16 *) buf;
  261. u32 status = 0;
  262. /* FIXME try bursts of writesw() or DMA ... */
  263. len >>= 1;
  264. while (len--) {
  265. iowrite16(*p++, info->nand.IO_ADDR_W);
  266. /* wait until buffer is available for write */
  267. do {
  268. status = readl(info->reg.gpmc_status) &
  269. GPMC_STATUS_BUFF_EMPTY;
  270. } while (!status);
  271. }
  272. }
  273. /**
  274. * omap_read_buf_pref - read data from NAND controller into buffer
  275. * @mtd: MTD device structure
  276. * @buf: buffer to store date
  277. * @len: number of bytes to read
  278. */
  279. static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
  280. {
  281. struct omap_nand_info *info = container_of(mtd,
  282. struct omap_nand_info, mtd);
  283. uint32_t r_count = 0;
  284. int ret = 0;
  285. u32 *p = (u32 *)buf;
  286. /* take care of subpage reads */
  287. if (len % 4) {
  288. if (info->nand.options & NAND_BUSWIDTH_16)
  289. omap_read_buf16(mtd, buf, len % 4);
  290. else
  291. omap_read_buf8(mtd, buf, len % 4);
  292. p = (u32 *) (buf + len % 4);
  293. len -= len % 4;
  294. }
  295. /* configure and start prefetch transfer */
  296. ret = omap_prefetch_enable(info->gpmc_cs,
  297. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
  298. if (ret) {
  299. /* PFPW engine is busy, use cpu copy method */
  300. if (info->nand.options & NAND_BUSWIDTH_16)
  301. omap_read_buf16(mtd, (u_char *)p, len);
  302. else
  303. omap_read_buf8(mtd, (u_char *)p, len);
  304. } else {
  305. do {
  306. r_count = readl(info->reg.gpmc_prefetch_status);
  307. r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
  308. r_count = r_count >> 2;
  309. ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
  310. p += r_count;
  311. len -= r_count << 2;
  312. } while (len);
  313. /* disable and stop the PFPW engine */
  314. omap_prefetch_reset(info->gpmc_cs, info);
  315. }
  316. }
  317. /**
  318. * omap_write_buf_pref - write buffer to NAND controller
  319. * @mtd: MTD device structure
  320. * @buf: data buffer
  321. * @len: number of bytes to write
  322. */
  323. static void omap_write_buf_pref(struct mtd_info *mtd,
  324. const u_char *buf, int len)
  325. {
  326. struct omap_nand_info *info = container_of(mtd,
  327. struct omap_nand_info, mtd);
  328. uint32_t w_count = 0;
  329. int i = 0, ret = 0;
  330. u16 *p = (u16 *)buf;
  331. unsigned long tim, limit;
  332. u32 val;
  333. /* take care of subpage writes */
  334. if (len % 2 != 0) {
  335. writeb(*buf, info->nand.IO_ADDR_W);
  336. p = (u16 *)(buf + 1);
  337. len--;
  338. }
  339. /* configure and start prefetch transfer */
  340. ret = omap_prefetch_enable(info->gpmc_cs,
  341. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
  342. if (ret) {
  343. /* PFPW engine is busy, use cpu copy method */
  344. if (info->nand.options & NAND_BUSWIDTH_16)
  345. omap_write_buf16(mtd, (u_char *)p, len);
  346. else
  347. omap_write_buf8(mtd, (u_char *)p, len);
  348. } else {
  349. while (len) {
  350. w_count = readl(info->reg.gpmc_prefetch_status);
  351. w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
  352. w_count = w_count >> 1;
  353. for (i = 0; (i < w_count) && len; i++, len -= 2)
  354. iowrite16(*p++, info->nand.IO_ADDR_W);
  355. }
  356. /* wait for data to flushed-out before reset the prefetch */
  357. tim = 0;
  358. limit = (loops_per_jiffy *
  359. msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  360. do {
  361. cpu_relax();
  362. val = readl(info->reg.gpmc_prefetch_status);
  363. val = GPMC_PREFETCH_STATUS_COUNT(val);
  364. } while (val && (tim++ < limit));
  365. /* disable and stop the PFPW engine */
  366. omap_prefetch_reset(info->gpmc_cs, info);
  367. }
  368. }
  369. /*
  370. * omap_nand_dma_callback: callback on the completion of dma transfer
  371. * @data: pointer to completion data structure
  372. */
  373. static void omap_nand_dma_callback(void *data)
  374. {
  375. complete((struct completion *) data);
  376. }
  377. /*
  378. * omap_nand_dma_transfer: configure and start dma transfer
  379. * @mtd: MTD device structure
  380. * @addr: virtual address in RAM of source/destination
  381. * @len: number of data bytes to be transferred
  382. * @is_write: flag for read/write operation
  383. */
  384. static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
  385. unsigned int len, int is_write)
  386. {
  387. struct omap_nand_info *info = container_of(mtd,
  388. struct omap_nand_info, mtd);
  389. struct dma_async_tx_descriptor *tx;
  390. enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
  391. DMA_FROM_DEVICE;
  392. struct scatterlist sg;
  393. unsigned long tim, limit;
  394. unsigned n;
  395. int ret;
  396. u32 val;
  397. if (addr >= high_memory) {
  398. struct page *p1;
  399. if (((size_t)addr & PAGE_MASK) !=
  400. ((size_t)(addr + len - 1) & PAGE_MASK))
  401. goto out_copy;
  402. p1 = vmalloc_to_page(addr);
  403. if (!p1)
  404. goto out_copy;
  405. addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
  406. }
  407. sg_init_one(&sg, addr, len);
  408. n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
  409. if (n == 0) {
  410. dev_err(&info->pdev->dev,
  411. "Couldn't DMA map a %d byte buffer\n", len);
  412. goto out_copy;
  413. }
  414. tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
  415. is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  416. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  417. if (!tx)
  418. goto out_copy_unmap;
  419. tx->callback = omap_nand_dma_callback;
  420. tx->callback_param = &info->comp;
  421. dmaengine_submit(tx);
  422. /* configure and start prefetch transfer */
  423. ret = omap_prefetch_enable(info->gpmc_cs,
  424. PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
  425. if (ret)
  426. /* PFPW engine is busy, use cpu copy method */
  427. goto out_copy_unmap;
  428. init_completion(&info->comp);
  429. dma_async_issue_pending(info->dma);
  430. /* setup and start DMA using dma_addr */
  431. wait_for_completion(&info->comp);
  432. tim = 0;
  433. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  434. do {
  435. cpu_relax();
  436. val = readl(info->reg.gpmc_prefetch_status);
  437. val = GPMC_PREFETCH_STATUS_COUNT(val);
  438. } while (val && (tim++ < limit));
  439. /* disable and stop the PFPW engine */
  440. omap_prefetch_reset(info->gpmc_cs, info);
  441. dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
  442. return 0;
  443. out_copy_unmap:
  444. dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
  445. out_copy:
  446. if (info->nand.options & NAND_BUSWIDTH_16)
  447. is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
  448. : omap_write_buf16(mtd, (u_char *) addr, len);
  449. else
  450. is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
  451. : omap_write_buf8(mtd, (u_char *) addr, len);
  452. return 0;
  453. }
  454. /**
  455. * omap_read_buf_dma_pref - read data from NAND controller into buffer
  456. * @mtd: MTD device structure
  457. * @buf: buffer to store date
  458. * @len: number of bytes to read
  459. */
  460. static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
  461. {
  462. if (len <= mtd->oobsize)
  463. omap_read_buf_pref(mtd, buf, len);
  464. else
  465. /* start transfer in DMA mode */
  466. omap_nand_dma_transfer(mtd, buf, len, 0x0);
  467. }
  468. /**
  469. * omap_write_buf_dma_pref - write buffer to NAND controller
  470. * @mtd: MTD device structure
  471. * @buf: data buffer
  472. * @len: number of bytes to write
  473. */
  474. static void omap_write_buf_dma_pref(struct mtd_info *mtd,
  475. const u_char *buf, int len)
  476. {
  477. if (len <= mtd->oobsize)
  478. omap_write_buf_pref(mtd, buf, len);
  479. else
  480. /* start transfer in DMA mode */
  481. omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
  482. }
  483. /*
  484. * omap_nand_irq - GPMC irq handler
  485. * @this_irq: gpmc irq number
  486. * @dev: omap_nand_info structure pointer is passed here
  487. */
  488. static irqreturn_t omap_nand_irq(int this_irq, void *dev)
  489. {
  490. struct omap_nand_info *info = (struct omap_nand_info *) dev;
  491. u32 bytes;
  492. bytes = readl(info->reg.gpmc_prefetch_status);
  493. bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
  494. bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
  495. if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
  496. if (this_irq == info->gpmc_irq_count)
  497. goto done;
  498. if (info->buf_len && (info->buf_len < bytes))
  499. bytes = info->buf_len;
  500. else if (!info->buf_len)
  501. bytes = 0;
  502. iowrite32_rep(info->nand.IO_ADDR_W,
  503. (u32 *)info->buf, bytes >> 2);
  504. info->buf = info->buf + bytes;
  505. info->buf_len -= bytes;
  506. } else {
  507. ioread32_rep(info->nand.IO_ADDR_R,
  508. (u32 *)info->buf, bytes >> 2);
  509. info->buf = info->buf + bytes;
  510. if (this_irq == info->gpmc_irq_count)
  511. goto done;
  512. }
  513. return IRQ_HANDLED;
  514. done:
  515. complete(&info->comp);
  516. disable_irq_nosync(info->gpmc_irq_fifo);
  517. disable_irq_nosync(info->gpmc_irq_count);
  518. return IRQ_HANDLED;
  519. }
  520. /*
  521. * omap_read_buf_irq_pref - read data from NAND controller into buffer
  522. * @mtd: MTD device structure
  523. * @buf: buffer to store date
  524. * @len: number of bytes to read
  525. */
  526. static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
  527. {
  528. struct omap_nand_info *info = container_of(mtd,
  529. struct omap_nand_info, mtd);
  530. int ret = 0;
  531. if (len <= mtd->oobsize) {
  532. omap_read_buf_pref(mtd, buf, len);
  533. return;
  534. }
  535. info->iomode = OMAP_NAND_IO_READ;
  536. info->buf = buf;
  537. init_completion(&info->comp);
  538. /* configure and start prefetch transfer */
  539. ret = omap_prefetch_enable(info->gpmc_cs,
  540. PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
  541. if (ret)
  542. /* PFPW engine is busy, use cpu copy method */
  543. goto out_copy;
  544. info->buf_len = len;
  545. enable_irq(info->gpmc_irq_count);
  546. enable_irq(info->gpmc_irq_fifo);
  547. /* waiting for read to complete */
  548. wait_for_completion(&info->comp);
  549. /* disable and stop the PFPW engine */
  550. omap_prefetch_reset(info->gpmc_cs, info);
  551. return;
  552. out_copy:
  553. if (info->nand.options & NAND_BUSWIDTH_16)
  554. omap_read_buf16(mtd, buf, len);
  555. else
  556. omap_read_buf8(mtd, buf, len);
  557. }
  558. /*
  559. * omap_write_buf_irq_pref - write buffer to NAND controller
  560. * @mtd: MTD device structure
  561. * @buf: data buffer
  562. * @len: number of bytes to write
  563. */
  564. static void omap_write_buf_irq_pref(struct mtd_info *mtd,
  565. const u_char *buf, int len)
  566. {
  567. struct omap_nand_info *info = container_of(mtd,
  568. struct omap_nand_info, mtd);
  569. int ret = 0;
  570. unsigned long tim, limit;
  571. u32 val;
  572. if (len <= mtd->oobsize) {
  573. omap_write_buf_pref(mtd, buf, len);
  574. return;
  575. }
  576. info->iomode = OMAP_NAND_IO_WRITE;
  577. info->buf = (u_char *) buf;
  578. init_completion(&info->comp);
  579. /* configure and start prefetch transfer : size=24 */
  580. ret = omap_prefetch_enable(info->gpmc_cs,
  581. (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
  582. if (ret)
  583. /* PFPW engine is busy, use cpu copy method */
  584. goto out_copy;
  585. info->buf_len = len;
  586. enable_irq(info->gpmc_irq_count);
  587. enable_irq(info->gpmc_irq_fifo);
  588. /* waiting for write to complete */
  589. wait_for_completion(&info->comp);
  590. /* wait for data to flushed-out before reset the prefetch */
  591. tim = 0;
  592. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  593. do {
  594. val = readl(info->reg.gpmc_prefetch_status);
  595. val = GPMC_PREFETCH_STATUS_COUNT(val);
  596. cpu_relax();
  597. } while (val && (tim++ < limit));
  598. /* disable and stop the PFPW engine */
  599. omap_prefetch_reset(info->gpmc_cs, info);
  600. return;
  601. out_copy:
  602. if (info->nand.options & NAND_BUSWIDTH_16)
  603. omap_write_buf16(mtd, buf, len);
  604. else
  605. omap_write_buf8(mtd, buf, len);
  606. }
  607. /**
  608. * gen_true_ecc - This function will generate true ECC value
  609. * @ecc_buf: buffer to store ecc code
  610. *
  611. * This generated true ECC value can be used when correcting
  612. * data read from NAND flash memory core
  613. */
  614. static void gen_true_ecc(u8 *ecc_buf)
  615. {
  616. u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
  617. ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
  618. ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
  619. P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
  620. ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
  621. P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
  622. ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
  623. P1e(tmp) | P2048o(tmp) | P2048e(tmp));
  624. }
  625. /**
  626. * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
  627. * @ecc_data1: ecc code from nand spare area
  628. * @ecc_data2: ecc code from hardware register obtained from hardware ecc
  629. * @page_data: page data
  630. *
  631. * This function compares two ECC's and indicates if there is an error.
  632. * If the error can be corrected it will be corrected to the buffer.
  633. * If there is no error, %0 is returned. If there is an error but it
  634. * was corrected, %1 is returned. Otherwise, %-1 is returned.
  635. */
  636. static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
  637. u8 *ecc_data2, /* read from register */
  638. u8 *page_data)
  639. {
  640. uint i;
  641. u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
  642. u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
  643. u8 ecc_bit[24];
  644. u8 ecc_sum = 0;
  645. u8 find_bit = 0;
  646. uint find_byte = 0;
  647. int isEccFF;
  648. isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
  649. gen_true_ecc(ecc_data1);
  650. gen_true_ecc(ecc_data2);
  651. for (i = 0; i <= 2; i++) {
  652. *(ecc_data1 + i) = ~(*(ecc_data1 + i));
  653. *(ecc_data2 + i) = ~(*(ecc_data2 + i));
  654. }
  655. for (i = 0; i < 8; i++) {
  656. tmp0_bit[i] = *ecc_data1 % 2;
  657. *ecc_data1 = *ecc_data1 / 2;
  658. }
  659. for (i = 0; i < 8; i++) {
  660. tmp1_bit[i] = *(ecc_data1 + 1) % 2;
  661. *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
  662. }
  663. for (i = 0; i < 8; i++) {
  664. tmp2_bit[i] = *(ecc_data1 + 2) % 2;
  665. *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
  666. }
  667. for (i = 0; i < 8; i++) {
  668. comp0_bit[i] = *ecc_data2 % 2;
  669. *ecc_data2 = *ecc_data2 / 2;
  670. }
  671. for (i = 0; i < 8; i++) {
  672. comp1_bit[i] = *(ecc_data2 + 1) % 2;
  673. *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
  674. }
  675. for (i = 0; i < 8; i++) {
  676. comp2_bit[i] = *(ecc_data2 + 2) % 2;
  677. *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
  678. }
  679. for (i = 0; i < 6; i++)
  680. ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
  681. for (i = 0; i < 8; i++)
  682. ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
  683. for (i = 0; i < 8; i++)
  684. ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
  685. ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
  686. ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
  687. for (i = 0; i < 24; i++)
  688. ecc_sum += ecc_bit[i];
  689. switch (ecc_sum) {
  690. case 0:
  691. /* Not reached because this function is not called if
  692. * ECC values are equal
  693. */
  694. return 0;
  695. case 1:
  696. /* Uncorrectable error */
  697. pr_debug("ECC UNCORRECTED_ERROR 1\n");
  698. return -1;
  699. case 11:
  700. /* UN-Correctable error */
  701. pr_debug("ECC UNCORRECTED_ERROR B\n");
  702. return -1;
  703. case 12:
  704. /* Correctable error */
  705. find_byte = (ecc_bit[23] << 8) +
  706. (ecc_bit[21] << 7) +
  707. (ecc_bit[19] << 6) +
  708. (ecc_bit[17] << 5) +
  709. (ecc_bit[15] << 4) +
  710. (ecc_bit[13] << 3) +
  711. (ecc_bit[11] << 2) +
  712. (ecc_bit[9] << 1) +
  713. ecc_bit[7];
  714. find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
  715. pr_debug("Correcting single bit ECC error at offset: "
  716. "%d, bit: %d\n", find_byte, find_bit);
  717. page_data[find_byte] ^= (1 << find_bit);
  718. return 1;
  719. default:
  720. if (isEccFF) {
  721. if (ecc_data2[0] == 0 &&
  722. ecc_data2[1] == 0 &&
  723. ecc_data2[2] == 0)
  724. return 0;
  725. }
  726. pr_debug("UNCORRECTED_ERROR default\n");
  727. return -1;
  728. }
  729. }
  730. /**
  731. * omap_correct_data - Compares the ECC read with HW generated ECC
  732. * @mtd: MTD device structure
  733. * @dat: page data
  734. * @read_ecc: ecc read from nand flash
  735. * @calc_ecc: ecc read from HW ECC registers
  736. *
  737. * Compares the ecc read from nand spare area with ECC registers values
  738. * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
  739. * detection and correction. If there are no errors, %0 is returned. If
  740. * there were errors and all of the errors were corrected, the number of
  741. * corrected errors is returned. If uncorrectable errors exist, %-1 is
  742. * returned.
  743. */
  744. static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
  745. u_char *read_ecc, u_char *calc_ecc)
  746. {
  747. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  748. mtd);
  749. int blockCnt = 0, i = 0, ret = 0;
  750. int stat = 0;
  751. /* Ex NAND_ECC_HW12_2048 */
  752. if ((info->nand.ecc.mode == NAND_ECC_HW) &&
  753. (info->nand.ecc.size == 2048))
  754. blockCnt = 4;
  755. else
  756. blockCnt = 1;
  757. for (i = 0; i < blockCnt; i++) {
  758. if (memcmp(read_ecc, calc_ecc, 3) != 0) {
  759. ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
  760. if (ret < 0)
  761. return ret;
  762. /* keep track of the number of corrected errors */
  763. stat += ret;
  764. }
  765. read_ecc += 3;
  766. calc_ecc += 3;
  767. dat += 512;
  768. }
  769. return stat;
  770. }
  771. /**
  772. * omap_calcuate_ecc - Generate non-inverted ECC bytes.
  773. * @mtd: MTD device structure
  774. * @dat: The pointer to data on which ecc is computed
  775. * @ecc_code: The ecc_code buffer
  776. *
  777. * Using noninverted ECC can be considered ugly since writing a blank
  778. * page ie. padding will clear the ECC bytes. This is no problem as long
  779. * nobody is trying to write data on the seemingly unused page. Reading
  780. * an erased page will produce an ECC mismatch between generated and read
  781. * ECC bytes that has to be dealt with separately.
  782. */
  783. static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
  784. u_char *ecc_code)
  785. {
  786. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  787. mtd);
  788. u32 val;
  789. val = readl(info->reg.gpmc_ecc_config);
  790. if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
  791. return -EINVAL;
  792. /* read ecc result */
  793. val = readl(info->reg.gpmc_ecc1_result);
  794. *ecc_code++ = val; /* P128e, ..., P1e */
  795. *ecc_code++ = val >> 16; /* P128o, ..., P1o */
  796. /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
  797. *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
  798. return 0;
  799. }
  800. /**
  801. * omap_enable_hwecc - This function enables the hardware ecc functionality
  802. * @mtd: MTD device structure
  803. * @mode: Read/Write mode
  804. */
  805. static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
  806. {
  807. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  808. mtd);
  809. struct nand_chip *chip = mtd->priv;
  810. unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  811. u32 val;
  812. /* clear ecc and enable bits */
  813. val = ECCCLEAR | ECC1;
  814. writel(val, info->reg.gpmc_ecc_control);
  815. /* program ecc and result sizes */
  816. val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
  817. ECC1RESULTSIZE);
  818. writel(val, info->reg.gpmc_ecc_size_config);
  819. switch (mode) {
  820. case NAND_ECC_READ:
  821. case NAND_ECC_WRITE:
  822. writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
  823. break;
  824. case NAND_ECC_READSYN:
  825. writel(ECCCLEAR, info->reg.gpmc_ecc_control);
  826. break;
  827. default:
  828. dev_info(&info->pdev->dev,
  829. "error: unrecognized Mode[%d]!\n", mode);
  830. break;
  831. }
  832. /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
  833. val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
  834. writel(val, info->reg.gpmc_ecc_config);
  835. }
  836. /**
  837. * omap_wait - wait until the command is done
  838. * @mtd: MTD device structure
  839. * @chip: NAND Chip structure
  840. *
  841. * Wait function is called during Program and erase operations and
  842. * the way it is called from MTD layer, we should wait till the NAND
  843. * chip is ready after the programming/erase operation has completed.
  844. *
  845. * Erase can take up to 400ms and program up to 20ms according to
  846. * general NAND and SmartMedia specs
  847. */
  848. static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
  849. {
  850. struct nand_chip *this = mtd->priv;
  851. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  852. mtd);
  853. unsigned long timeo = jiffies;
  854. int status, state = this->state;
  855. if (state == FL_ERASING)
  856. timeo += (HZ * 400) / 1000;
  857. else
  858. timeo += (HZ * 20) / 1000;
  859. writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
  860. while (time_before(jiffies, timeo)) {
  861. status = readb(info->reg.gpmc_nand_data);
  862. if (status & NAND_STATUS_READY)
  863. break;
  864. cond_resched();
  865. }
  866. status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
  867. return status;
  868. }
  869. /**
  870. * omap_dev_ready - calls the platform specific dev_ready function
  871. * @mtd: MTD device structure
  872. */
  873. static int omap_dev_ready(struct mtd_info *mtd)
  874. {
  875. unsigned int val = 0;
  876. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  877. mtd);
  878. val = readl(info->reg.gpmc_status);
  879. if ((val & 0x100) == 0x100) {
  880. return 1;
  881. } else {
  882. return 0;
  883. }
  884. }
  885. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  886. /**
  887. * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
  888. * @mtd: MTD device structure
  889. * @mode: Read/Write mode
  890. */
  891. static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
  892. {
  893. int nerrors;
  894. unsigned int dev_width;
  895. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  896. mtd);
  897. struct nand_chip *chip = mtd->priv;
  898. nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
  899. dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  900. /*
  901. * Program GPMC to perform correction on one 512-byte sector at a time.
  902. * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
  903. * gives a slight (5%) performance gain (but requires additional code).
  904. */
  905. (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
  906. }
  907. /**
  908. * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
  909. * @mtd: MTD device structure
  910. * @dat: The pointer to data on which ecc is computed
  911. * @ecc_code: The ecc_code buffer
  912. */
  913. static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
  914. u_char *ecc_code)
  915. {
  916. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  917. mtd);
  918. return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
  919. }
  920. /**
  921. * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
  922. * @mtd: MTD device structure
  923. * @dat: The pointer to data on which ecc is computed
  924. * @ecc_code: The ecc_code buffer
  925. */
  926. static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
  927. u_char *ecc_code)
  928. {
  929. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  930. mtd);
  931. return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
  932. }
  933. /**
  934. * omap3_correct_data_bch - Decode received data and correct errors
  935. * @mtd: MTD device structure
  936. * @data: page data
  937. * @read_ecc: ecc read from nand flash
  938. * @calc_ecc: ecc read from HW ECC registers
  939. */
  940. static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
  941. u_char *read_ecc, u_char *calc_ecc)
  942. {
  943. int i, count;
  944. /* cannot correct more than 8 errors */
  945. unsigned int errloc[8];
  946. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  947. mtd);
  948. count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
  949. errloc);
  950. if (count > 0) {
  951. /* correct errors */
  952. for (i = 0; i < count; i++) {
  953. /* correct data only, not ecc bytes */
  954. if (errloc[i] < 8*512)
  955. data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
  956. pr_debug("corrected bitflip %u\n", errloc[i]);
  957. }
  958. } else if (count < 0) {
  959. pr_err("ecc unrecoverable error\n");
  960. }
  961. return count;
  962. }
  963. /**
  964. * omap3_free_bch - Release BCH ecc resources
  965. * @mtd: MTD device structure
  966. */
  967. static void omap3_free_bch(struct mtd_info *mtd)
  968. {
  969. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  970. mtd);
  971. if (info->bch) {
  972. free_bch(info->bch);
  973. info->bch = NULL;
  974. }
  975. }
  976. /**
  977. * omap3_init_bch - Initialize BCH ECC
  978. * @mtd: MTD device structure
  979. * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
  980. */
  981. static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
  982. {
  983. int ret, max_errors;
  984. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  985. mtd);
  986. #ifdef CONFIG_MTD_NAND_OMAP_BCH8
  987. const int hw_errors = 8;
  988. #else
  989. const int hw_errors = 4;
  990. #endif
  991. info->bch = NULL;
  992. max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
  993. if (max_errors != hw_errors) {
  994. pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
  995. max_errors, hw_errors);
  996. goto fail;
  997. }
  998. /* initialize GPMC BCH engine */
  999. ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
  1000. if (ret)
  1001. goto fail;
  1002. /* software bch library is only used to detect and locate errors */
  1003. info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
  1004. if (!info->bch)
  1005. goto fail;
  1006. info->nand.ecc.size = 512;
  1007. info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
  1008. info->nand.ecc.correct = omap3_correct_data_bch;
  1009. info->nand.ecc.mode = NAND_ECC_HW;
  1010. /*
  1011. * The number of corrected errors in an ecc block that will trigger
  1012. * block scrubbing defaults to the ecc strength (4 or 8).
  1013. * Set mtd->bitflip_threshold here to define a custom threshold.
  1014. */
  1015. if (max_errors == 8) {
  1016. info->nand.ecc.strength = 8;
  1017. info->nand.ecc.bytes = 13;
  1018. info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
  1019. } else {
  1020. info->nand.ecc.strength = 4;
  1021. info->nand.ecc.bytes = 7;
  1022. info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
  1023. }
  1024. pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
  1025. return 0;
  1026. fail:
  1027. omap3_free_bch(mtd);
  1028. return -1;
  1029. }
  1030. /**
  1031. * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
  1032. * @mtd: MTD device structure
  1033. */
  1034. static int omap3_init_bch_tail(struct mtd_info *mtd)
  1035. {
  1036. int i, steps;
  1037. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1038. mtd);
  1039. struct nand_ecclayout *layout = &info->ecclayout;
  1040. /* build oob layout */
  1041. steps = mtd->writesize/info->nand.ecc.size;
  1042. layout->eccbytes = steps*info->nand.ecc.bytes;
  1043. /* do not bother creating special oob layouts for small page devices */
  1044. if (mtd->oobsize < 64) {
  1045. pr_err("BCH ecc is not supported on small page devices\n");
  1046. goto fail;
  1047. }
  1048. /* reserve 2 bytes for bad block marker */
  1049. if (layout->eccbytes+2 > mtd->oobsize) {
  1050. pr_err("no oob layout available for oobsize %d eccbytes %u\n",
  1051. mtd->oobsize, layout->eccbytes);
  1052. goto fail;
  1053. }
  1054. /* put ecc bytes at oob tail */
  1055. for (i = 0; i < layout->eccbytes; i++)
  1056. layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
  1057. layout->oobfree[0].offset = 2;
  1058. layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
  1059. info->nand.ecc.layout = layout;
  1060. if (!(info->nand.options & NAND_BUSWIDTH_16))
  1061. info->nand.badblock_pattern = &bb_descrip_flashbased;
  1062. return 0;
  1063. fail:
  1064. omap3_free_bch(mtd);
  1065. return -1;
  1066. }
  1067. #else
  1068. static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
  1069. {
  1070. pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
  1071. return -1;
  1072. }
  1073. static int omap3_init_bch_tail(struct mtd_info *mtd)
  1074. {
  1075. return -1;
  1076. }
  1077. static void omap3_free_bch(struct mtd_info *mtd)
  1078. {
  1079. }
  1080. #endif /* CONFIG_MTD_NAND_OMAP_BCH */
  1081. static int __devinit omap_nand_probe(struct platform_device *pdev)
  1082. {
  1083. struct omap_nand_info *info;
  1084. struct omap_nand_platform_data *pdata;
  1085. int err;
  1086. int i, offset;
  1087. dma_cap_mask_t mask;
  1088. unsigned sig;
  1089. struct resource *res;
  1090. pdata = pdev->dev.platform_data;
  1091. if (pdata == NULL) {
  1092. dev_err(&pdev->dev, "platform data missing\n");
  1093. return -ENODEV;
  1094. }
  1095. info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
  1096. if (!info)
  1097. return -ENOMEM;
  1098. platform_set_drvdata(pdev, info);
  1099. spin_lock_init(&info->controller.lock);
  1100. init_waitqueue_head(&info->controller.wq);
  1101. info->pdev = pdev;
  1102. info->gpmc_cs = pdata->cs;
  1103. info->reg = pdata->reg;
  1104. info->mtd.priv = &info->nand;
  1105. info->mtd.name = dev_name(&pdev->dev);
  1106. info->mtd.owner = THIS_MODULE;
  1107. info->nand.options = pdata->devsize;
  1108. info->nand.options |= NAND_SKIP_BBTSCAN;
  1109. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1110. if (res == NULL) {
  1111. err = -EINVAL;
  1112. dev_err(&pdev->dev, "error getting memory resource\n");
  1113. goto out_free_info;
  1114. }
  1115. info->phys_base = res->start;
  1116. info->mem_size = resource_size(res);
  1117. if (!request_mem_region(info->phys_base, info->mem_size,
  1118. pdev->dev.driver->name)) {
  1119. err = -EBUSY;
  1120. goto out_free_info;
  1121. }
  1122. info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
  1123. if (!info->nand.IO_ADDR_R) {
  1124. err = -ENOMEM;
  1125. goto out_release_mem_region;
  1126. }
  1127. info->nand.controller = &info->controller;
  1128. info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
  1129. info->nand.cmd_ctrl = omap_hwcontrol;
  1130. /*
  1131. * If RDY/BSY line is connected to OMAP then use the omap ready
  1132. * function and the generic nand_wait function which reads the status
  1133. * register after monitoring the RDY/BSY line. Otherwise use a standard
  1134. * chip delay which is slightly more than tR (AC Timing) of the NAND
  1135. * device and read status register until you get a failure or success
  1136. */
  1137. if (pdata->dev_ready) {
  1138. info->nand.dev_ready = omap_dev_ready;
  1139. info->nand.chip_delay = 0;
  1140. } else {
  1141. info->nand.waitfunc = omap_wait;
  1142. info->nand.chip_delay = 50;
  1143. }
  1144. switch (pdata->xfer_type) {
  1145. case NAND_OMAP_PREFETCH_POLLED:
  1146. info->nand.read_buf = omap_read_buf_pref;
  1147. info->nand.write_buf = omap_write_buf_pref;
  1148. break;
  1149. case NAND_OMAP_POLLED:
  1150. if (info->nand.options & NAND_BUSWIDTH_16) {
  1151. info->nand.read_buf = omap_read_buf16;
  1152. info->nand.write_buf = omap_write_buf16;
  1153. } else {
  1154. info->nand.read_buf = omap_read_buf8;
  1155. info->nand.write_buf = omap_write_buf8;
  1156. }
  1157. break;
  1158. case NAND_OMAP_PREFETCH_DMA:
  1159. dma_cap_zero(mask);
  1160. dma_cap_set(DMA_SLAVE, mask);
  1161. sig = OMAP24XX_DMA_GPMC;
  1162. info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  1163. if (!info->dma) {
  1164. dev_err(&pdev->dev, "DMA engine request failed\n");
  1165. err = -ENXIO;
  1166. goto out_release_mem_region;
  1167. } else {
  1168. struct dma_slave_config cfg;
  1169. memset(&cfg, 0, sizeof(cfg));
  1170. cfg.src_addr = info->phys_base;
  1171. cfg.dst_addr = info->phys_base;
  1172. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1173. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1174. cfg.src_maxburst = 16;
  1175. cfg.dst_maxburst = 16;
  1176. err = dmaengine_slave_config(info->dma, &cfg);
  1177. if (err) {
  1178. dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
  1179. err);
  1180. goto out_release_mem_region;
  1181. }
  1182. info->nand.read_buf = omap_read_buf_dma_pref;
  1183. info->nand.write_buf = omap_write_buf_dma_pref;
  1184. }
  1185. break;
  1186. case NAND_OMAP_PREFETCH_IRQ:
  1187. info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
  1188. if (info->gpmc_irq_fifo <= 0) {
  1189. dev_err(&pdev->dev, "error getting fifo irq\n");
  1190. err = -ENODEV;
  1191. goto out_release_mem_region;
  1192. }
  1193. err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
  1194. IRQF_SHARED, "gpmc-nand-fifo", info);
  1195. if (err) {
  1196. dev_err(&pdev->dev, "requesting irq(%d) error:%d",
  1197. info->gpmc_irq_fifo, err);
  1198. info->gpmc_irq_fifo = 0;
  1199. goto out_release_mem_region;
  1200. }
  1201. info->gpmc_irq_count = platform_get_irq(pdev, 1);
  1202. if (info->gpmc_irq_count <= 0) {
  1203. dev_err(&pdev->dev, "error getting count irq\n");
  1204. err = -ENODEV;
  1205. goto out_release_mem_region;
  1206. }
  1207. err = request_irq(info->gpmc_irq_count, omap_nand_irq,
  1208. IRQF_SHARED, "gpmc-nand-count", info);
  1209. if (err) {
  1210. dev_err(&pdev->dev, "requesting irq(%d) error:%d",
  1211. info->gpmc_irq_count, err);
  1212. info->gpmc_irq_count = 0;
  1213. goto out_release_mem_region;
  1214. }
  1215. info->nand.read_buf = omap_read_buf_irq_pref;
  1216. info->nand.write_buf = omap_write_buf_irq_pref;
  1217. break;
  1218. default:
  1219. dev_err(&pdev->dev,
  1220. "xfer_type(%d) not supported!\n", pdata->xfer_type);
  1221. err = -EINVAL;
  1222. goto out_release_mem_region;
  1223. }
  1224. /* select the ecc type */
  1225. if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
  1226. info->nand.ecc.mode = NAND_ECC_SOFT;
  1227. else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
  1228. (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
  1229. info->nand.ecc.bytes = 3;
  1230. info->nand.ecc.size = 512;
  1231. info->nand.ecc.strength = 1;
  1232. info->nand.ecc.calculate = omap_calculate_ecc;
  1233. info->nand.ecc.hwctl = omap_enable_hwecc;
  1234. info->nand.ecc.correct = omap_correct_data;
  1235. info->nand.ecc.mode = NAND_ECC_HW;
  1236. } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
  1237. (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
  1238. err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
  1239. if (err) {
  1240. err = -EINVAL;
  1241. goto out_release_mem_region;
  1242. }
  1243. }
  1244. /* DIP switches on some boards change between 8 and 16 bit
  1245. * bus widths for flash. Try the other width if the first try fails.
  1246. */
  1247. if (nand_scan_ident(&info->mtd, 1, NULL)) {
  1248. info->nand.options ^= NAND_BUSWIDTH_16;
  1249. if (nand_scan_ident(&info->mtd, 1, NULL)) {
  1250. err = -ENXIO;
  1251. goto out_release_mem_region;
  1252. }
  1253. }
  1254. /* rom code layout */
  1255. if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
  1256. if (info->nand.options & NAND_BUSWIDTH_16)
  1257. offset = 2;
  1258. else {
  1259. offset = 1;
  1260. info->nand.badblock_pattern = &bb_descrip_flashbased;
  1261. }
  1262. omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
  1263. for (i = 0; i < omap_oobinfo.eccbytes; i++)
  1264. omap_oobinfo.eccpos[i] = i+offset;
  1265. omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
  1266. omap_oobinfo.oobfree->length = info->mtd.oobsize -
  1267. (offset + omap_oobinfo.eccbytes);
  1268. info->nand.ecc.layout = &omap_oobinfo;
  1269. } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
  1270. (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
  1271. /* build OOB layout for BCH ECC correction */
  1272. err = omap3_init_bch_tail(&info->mtd);
  1273. if (err) {
  1274. err = -EINVAL;
  1275. goto out_release_mem_region;
  1276. }
  1277. }
  1278. /* second phase scan */
  1279. if (nand_scan_tail(&info->mtd)) {
  1280. err = -ENXIO;
  1281. goto out_release_mem_region;
  1282. }
  1283. mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
  1284. pdata->nr_parts);
  1285. platform_set_drvdata(pdev, &info->mtd);
  1286. return 0;
  1287. out_release_mem_region:
  1288. if (info->dma)
  1289. dma_release_channel(info->dma);
  1290. if (info->gpmc_irq_count > 0)
  1291. free_irq(info->gpmc_irq_count, info);
  1292. if (info->gpmc_irq_fifo > 0)
  1293. free_irq(info->gpmc_irq_fifo, info);
  1294. release_mem_region(info->phys_base, info->mem_size);
  1295. out_free_info:
  1296. kfree(info);
  1297. return err;
  1298. }
  1299. static int omap_nand_remove(struct platform_device *pdev)
  1300. {
  1301. struct mtd_info *mtd = platform_get_drvdata(pdev);
  1302. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1303. mtd);
  1304. omap3_free_bch(&info->mtd);
  1305. platform_set_drvdata(pdev, NULL);
  1306. if (info->dma)
  1307. dma_release_channel(info->dma);
  1308. if (info->gpmc_irq_count > 0)
  1309. free_irq(info->gpmc_irq_count, info);
  1310. if (info->gpmc_irq_fifo > 0)
  1311. free_irq(info->gpmc_irq_fifo, info);
  1312. /* Release NAND device, its internal structures and partitions */
  1313. nand_release(&info->mtd);
  1314. iounmap(info->nand.IO_ADDR_R);
  1315. release_mem_region(info->phys_base, NAND_IO_SIZE);
  1316. kfree(info);
  1317. return 0;
  1318. }
  1319. static struct platform_driver omap_nand_driver = {
  1320. .probe = omap_nand_probe,
  1321. .remove = omap_nand_remove,
  1322. .driver = {
  1323. .name = DRIVER_NAME,
  1324. .owner = THIS_MODULE,
  1325. },
  1326. };
  1327. module_platform_driver(omap_nand_driver);
  1328. MODULE_ALIAS("platform:" DRIVER_NAME);
  1329. MODULE_LICENSE("GPL");
  1330. MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");