omap2.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. /*
  2. * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
  3. * Copyright © 2004 Micron Technology Inc.
  4. * Copyright © 2004 David Brownell
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/platform_device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/delay.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/sched.h>
  15. #include <linux/mtd/mtd.h>
  16. #include <linux/mtd/nand.h>
  17. #include <linux/mtd/partitions.h>
  18. #include <linux/io.h>
  19. #include <linux/slab.h>
  20. #include <plat/dma.h>
  21. #include <plat/gpmc.h>
  22. #include <plat/nand.h>
  23. #define DRIVER_NAME "omap2-nand"
  24. #define NAND_Ecc_P1e (1 << 0)
  25. #define NAND_Ecc_P2e (1 << 1)
  26. #define NAND_Ecc_P4e (1 << 2)
  27. #define NAND_Ecc_P8e (1 << 3)
  28. #define NAND_Ecc_P16e (1 << 4)
  29. #define NAND_Ecc_P32e (1 << 5)
  30. #define NAND_Ecc_P64e (1 << 6)
  31. #define NAND_Ecc_P128e (1 << 7)
  32. #define NAND_Ecc_P256e (1 << 8)
  33. #define NAND_Ecc_P512e (1 << 9)
  34. #define NAND_Ecc_P1024e (1 << 10)
  35. #define NAND_Ecc_P2048e (1 << 11)
  36. #define NAND_Ecc_P1o (1 << 16)
  37. #define NAND_Ecc_P2o (1 << 17)
  38. #define NAND_Ecc_P4o (1 << 18)
  39. #define NAND_Ecc_P8o (1 << 19)
  40. #define NAND_Ecc_P16o (1 << 20)
  41. #define NAND_Ecc_P32o (1 << 21)
  42. #define NAND_Ecc_P64o (1 << 22)
  43. #define NAND_Ecc_P128o (1 << 23)
  44. #define NAND_Ecc_P256o (1 << 24)
  45. #define NAND_Ecc_P512o (1 << 25)
  46. #define NAND_Ecc_P1024o (1 << 26)
  47. #define NAND_Ecc_P2048o (1 << 27)
  48. #define TF(value) (value ? 1 : 0)
  49. #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
  50. #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
  51. #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
  52. #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
  53. #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
  54. #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
  55. #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
  56. #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
  57. #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
  58. #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
  59. #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
  60. #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
  61. #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
  62. #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
  63. #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
  64. #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
  65. #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
  66. #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
  67. #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
  68. #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
  69. #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
  70. #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
  71. #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
  72. #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
  73. #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
  74. #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
  75. #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
  76. #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
  77. #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
  78. #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
  79. #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
  80. #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
  81. #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
  82. #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
  83. #ifdef CONFIG_MTD_PARTITIONS
  84. static const char *part_probes[] = { "cmdlinepart", NULL };
  85. #endif
  86. struct omap_nand_info {
  87. struct nand_hw_control controller;
  88. struct omap_nand_platform_data *pdata;
  89. struct mtd_info mtd;
  90. struct mtd_partition *parts;
  91. struct nand_chip nand;
  92. struct platform_device *pdev;
  93. int gpmc_cs;
  94. unsigned long phys_base;
  95. struct completion comp;
  96. int dma_ch;
  97. };
  98. /**
  99. * omap_hwcontrol - hardware specific access to control-lines
  100. * @mtd: MTD device structure
  101. * @cmd: command to device
  102. * @ctrl:
  103. * NAND_NCE: bit 0 -> don't care
  104. * NAND_CLE: bit 1 -> Command Latch
  105. * NAND_ALE: bit 2 -> Address Latch
  106. *
  107. * NOTE: boards may use different bits for these!!
  108. */
  109. static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
  110. {
  111. struct omap_nand_info *info = container_of(mtd,
  112. struct omap_nand_info, mtd);
  113. if (cmd != NAND_CMD_NONE) {
  114. if (ctrl & NAND_CLE)
  115. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
  116. else if (ctrl & NAND_ALE)
  117. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
  118. else /* NAND_NCE */
  119. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
  120. }
  121. }
  122. /**
  123. * omap_read_buf8 - read data from NAND controller into buffer
  124. * @mtd: MTD device structure
  125. * @buf: buffer to store date
  126. * @len: number of bytes to read
  127. */
  128. static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
  129. {
  130. struct nand_chip *nand = mtd->priv;
  131. ioread8_rep(nand->IO_ADDR_R, buf, len);
  132. }
  133. /**
  134. * omap_write_buf8 - write buffer to NAND controller
  135. * @mtd: MTD device structure
  136. * @buf: data buffer
  137. * @len: number of bytes to write
  138. */
  139. static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
  140. {
  141. struct omap_nand_info *info = container_of(mtd,
  142. struct omap_nand_info, mtd);
  143. u_char *p = (u_char *)buf;
  144. u32 status = 0;
  145. while (len--) {
  146. iowrite8(*p++, info->nand.IO_ADDR_W);
  147. /* wait until buffer is available for write */
  148. do {
  149. status = gpmc_read_status(GPMC_STATUS_BUFFER);
  150. } while (!status);
  151. }
  152. }
  153. /**
  154. * omap_read_buf16 - read data from NAND controller into buffer
  155. * @mtd: MTD device structure
  156. * @buf: buffer to store date
  157. * @len: number of bytes to read
  158. */
  159. static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
  160. {
  161. struct nand_chip *nand = mtd->priv;
  162. ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
  163. }
  164. /**
  165. * omap_write_buf16 - write buffer to NAND controller
  166. * @mtd: MTD device structure
  167. * @buf: data buffer
  168. * @len: number of bytes to write
  169. */
  170. static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
  171. {
  172. struct omap_nand_info *info = container_of(mtd,
  173. struct omap_nand_info, mtd);
  174. u16 *p = (u16 *) buf;
  175. u32 status = 0;
  176. /* FIXME try bursts of writesw() or DMA ... */
  177. len >>= 1;
  178. while (len--) {
  179. iowrite16(*p++, info->nand.IO_ADDR_W);
  180. /* wait until buffer is available for write */
  181. do {
  182. status = gpmc_read_status(GPMC_STATUS_BUFFER);
  183. } while (!status);
  184. }
  185. }
  186. /**
  187. * omap_read_buf_pref - read data from NAND controller into buffer
  188. * @mtd: MTD device structure
  189. * @buf: buffer to store date
  190. * @len: number of bytes to read
  191. */
  192. static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
  193. {
  194. struct omap_nand_info *info = container_of(mtd,
  195. struct omap_nand_info, mtd);
  196. uint32_t r_count = 0;
  197. int ret = 0;
  198. u32 *p = (u32 *)buf;
  199. /* take care of subpage reads */
  200. if (len % 4) {
  201. if (info->nand.options & NAND_BUSWIDTH_16)
  202. omap_read_buf16(mtd, buf, len % 4);
  203. else
  204. omap_read_buf8(mtd, buf, len % 4);
  205. p = (u32 *) (buf + len % 4);
  206. len -= len % 4;
  207. }
  208. /* configure and start prefetch transfer */
  209. ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
  210. if (ret) {
  211. /* PFPW engine is busy, use cpu copy method */
  212. if (info->nand.options & NAND_BUSWIDTH_16)
  213. omap_read_buf16(mtd, buf, len);
  214. else
  215. omap_read_buf8(mtd, buf, len);
  216. } else {
  217. p = (u32 *) buf;
  218. do {
  219. r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
  220. r_count = r_count >> 2;
  221. ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
  222. p += r_count;
  223. len -= r_count << 2;
  224. } while (len);
  225. /* disable and stop the PFPW engine */
  226. gpmc_prefetch_reset(info->gpmc_cs);
  227. }
  228. }
  229. /**
  230. * omap_write_buf_pref - write buffer to NAND controller
  231. * @mtd: MTD device structure
  232. * @buf: data buffer
  233. * @len: number of bytes to write
  234. */
  235. static void omap_write_buf_pref(struct mtd_info *mtd,
  236. const u_char *buf, int len)
  237. {
  238. struct omap_nand_info *info = container_of(mtd,
  239. struct omap_nand_info, mtd);
  240. uint32_t pref_count = 0, w_count = 0;
  241. int i = 0, ret = 0;
  242. u16 *p;
  243. /* take care of subpage writes */
  244. if (len % 2 != 0) {
  245. writeb(*buf, info->nand.IO_ADDR_W);
  246. p = (u16 *)(buf + 1);
  247. len--;
  248. }
  249. /* configure and start prefetch transfer */
  250. ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
  251. if (ret) {
  252. /* PFPW engine is busy, use cpu copy method */
  253. if (info->nand.options & NAND_BUSWIDTH_16)
  254. omap_write_buf16(mtd, buf, len);
  255. else
  256. omap_write_buf8(mtd, buf, len);
  257. } else {
  258. p = (u16 *) buf;
  259. while (len) {
  260. w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
  261. w_count = w_count >> 1;
  262. for (i = 0; (i < w_count) && len; i++, len -= 2)
  263. iowrite16(*p++, info->nand.IO_ADDR_W);
  264. }
  265. /* wait for data to flushed-out before reset the prefetch */
  266. do {
  267. pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
  268. } while (pref_count);
  269. /* disable and stop the PFPW engine */
  270. gpmc_prefetch_reset(info->gpmc_cs);
  271. }
  272. }
  273. /*
  274. * omap_nand_dma_cb: callback on the completion of dma transfer
  275. * @lch: logical channel
  276. * @ch_satuts: channel status
  277. * @data: pointer to completion data structure
  278. */
  279. static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
  280. {
  281. complete((struct completion *) data);
  282. }
  283. /*
  284. * omap_nand_dma_transfer: configer and start dma transfer
  285. * @mtd: MTD device structure
  286. * @addr: virtual address in RAM of source/destination
  287. * @len: number of data bytes to be transferred
  288. * @is_write: flag for read/write operation
  289. */
  290. static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
  291. unsigned int len, int is_write)
  292. {
  293. struct omap_nand_info *info = container_of(mtd,
  294. struct omap_nand_info, mtd);
  295. uint32_t prefetch_status = 0;
  296. enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
  297. DMA_FROM_DEVICE;
  298. dma_addr_t dma_addr;
  299. int ret;
  300. /* The fifo depth is 64 bytes. We have a sync at each frame and frame
  301. * length is 64 bytes.
  302. */
  303. int buf_len = len >> 6;
  304. if (addr >= high_memory) {
  305. struct page *p1;
  306. if (((size_t)addr & PAGE_MASK) !=
  307. ((size_t)(addr + len - 1) & PAGE_MASK))
  308. goto out_copy;
  309. p1 = vmalloc_to_page(addr);
  310. if (!p1)
  311. goto out_copy;
  312. addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
  313. }
  314. dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
  315. if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
  316. dev_err(&info->pdev->dev,
  317. "Couldn't DMA map a %d byte buffer\n", len);
  318. goto out_copy;
  319. }
  320. if (is_write) {
  321. omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
  322. info->phys_base, 0, 0);
  323. omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
  324. dma_addr, 0, 0);
  325. omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
  326. 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
  327. OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
  328. } else {
  329. omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
  330. info->phys_base, 0, 0);
  331. omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
  332. dma_addr, 0, 0);
  333. omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
  334. 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
  335. OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
  336. }
  337. /* configure and start prefetch transfer */
  338. ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
  339. if (ret)
  340. /* PFPW engine is busy, use cpu copy methode */
  341. goto out_copy;
  342. init_completion(&info->comp);
  343. omap_start_dma(info->dma_ch);
  344. /* setup and start DMA using dma_addr */
  345. wait_for_completion(&info->comp);
  346. do {
  347. prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
  348. } while (prefetch_status);
  349. /* disable and stop the PFPW engine */
  350. gpmc_prefetch_reset(info->gpmc_cs);
  351. dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
  352. return 0;
  353. out_copy:
  354. if (info->nand.options & NAND_BUSWIDTH_16)
  355. is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
  356. : omap_write_buf16(mtd, (u_char *) addr, len);
  357. else
  358. is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
  359. : omap_write_buf8(mtd, (u_char *) addr, len);
  360. return 0;
  361. }
  362. /**
  363. * omap_read_buf_dma_pref - read data from NAND controller into buffer
  364. * @mtd: MTD device structure
  365. * @buf: buffer to store date
  366. * @len: number of bytes to read
  367. */
  368. static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
  369. {
  370. if (len <= mtd->oobsize)
  371. omap_read_buf_pref(mtd, buf, len);
  372. else
  373. /* start transfer in DMA mode */
  374. omap_nand_dma_transfer(mtd, buf, len, 0x0);
  375. }
  376. /**
  377. * omap_write_buf_dma_pref - write buffer to NAND controller
  378. * @mtd: MTD device structure
  379. * @buf: data buffer
  380. * @len: number of bytes to write
  381. */
  382. static void omap_write_buf_dma_pref(struct mtd_info *mtd,
  383. const u_char *buf, int len)
  384. {
  385. if (len <= mtd->oobsize)
  386. omap_write_buf_pref(mtd, buf, len);
  387. else
  388. /* start transfer in DMA mode */
  389. omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
  390. }
  391. /**
  392. * omap_verify_buf - Verify chip data against buffer
  393. * @mtd: MTD device structure
  394. * @buf: buffer containing the data to compare
  395. * @len: number of bytes to compare
  396. */
  397. static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
  398. {
  399. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  400. mtd);
  401. u16 *p = (u16 *) buf;
  402. len >>= 1;
  403. while (len--) {
  404. if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
  405. return -EFAULT;
  406. }
  407. return 0;
  408. }
  409. #ifdef CONFIG_MTD_NAND_OMAP_HWECC
  410. /**
  411. * gen_true_ecc - This function will generate true ECC value
  412. * @ecc_buf: buffer to store ecc code
  413. *
  414. * This generated true ECC value can be used when correcting
  415. * data read from NAND flash memory core
  416. */
  417. static void gen_true_ecc(u8 *ecc_buf)
  418. {
  419. u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
  420. ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
  421. ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
  422. P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
  423. ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
  424. P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
  425. ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
  426. P1e(tmp) | P2048o(tmp) | P2048e(tmp));
  427. }
  428. /**
  429. * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
  430. * @ecc_data1: ecc code from nand spare area
  431. * @ecc_data2: ecc code from hardware register obtained from hardware ecc
  432. * @page_data: page data
  433. *
  434. * This function compares two ECC's and indicates if there is an error.
  435. * If the error can be corrected it will be corrected to the buffer.
  436. */
  437. static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
  438. u8 *ecc_data2, /* read from register */
  439. u8 *page_data)
  440. {
  441. uint i;
  442. u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
  443. u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
  444. u8 ecc_bit[24];
  445. u8 ecc_sum = 0;
  446. u8 find_bit = 0;
  447. uint find_byte = 0;
  448. int isEccFF;
  449. isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
  450. gen_true_ecc(ecc_data1);
  451. gen_true_ecc(ecc_data2);
  452. for (i = 0; i <= 2; i++) {
  453. *(ecc_data1 + i) = ~(*(ecc_data1 + i));
  454. *(ecc_data2 + i) = ~(*(ecc_data2 + i));
  455. }
  456. for (i = 0; i < 8; i++) {
  457. tmp0_bit[i] = *ecc_data1 % 2;
  458. *ecc_data1 = *ecc_data1 / 2;
  459. }
  460. for (i = 0; i < 8; i++) {
  461. tmp1_bit[i] = *(ecc_data1 + 1) % 2;
  462. *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
  463. }
  464. for (i = 0; i < 8; i++) {
  465. tmp2_bit[i] = *(ecc_data1 + 2) % 2;
  466. *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
  467. }
  468. for (i = 0; i < 8; i++) {
  469. comp0_bit[i] = *ecc_data2 % 2;
  470. *ecc_data2 = *ecc_data2 / 2;
  471. }
  472. for (i = 0; i < 8; i++) {
  473. comp1_bit[i] = *(ecc_data2 + 1) % 2;
  474. *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
  475. }
  476. for (i = 0; i < 8; i++) {
  477. comp2_bit[i] = *(ecc_data2 + 2) % 2;
  478. *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
  479. }
  480. for (i = 0; i < 6; i++)
  481. ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
  482. for (i = 0; i < 8; i++)
  483. ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
  484. for (i = 0; i < 8; i++)
  485. ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
  486. ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
  487. ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
  488. for (i = 0; i < 24; i++)
  489. ecc_sum += ecc_bit[i];
  490. switch (ecc_sum) {
  491. case 0:
  492. /* Not reached because this function is not called if
  493. * ECC values are equal
  494. */
  495. return 0;
  496. case 1:
  497. /* Uncorrectable error */
  498. DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
  499. return -1;
  500. case 11:
  501. /* UN-Correctable error */
  502. DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
  503. return -1;
  504. case 12:
  505. /* Correctable error */
  506. find_byte = (ecc_bit[23] << 8) +
  507. (ecc_bit[21] << 7) +
  508. (ecc_bit[19] << 6) +
  509. (ecc_bit[17] << 5) +
  510. (ecc_bit[15] << 4) +
  511. (ecc_bit[13] << 3) +
  512. (ecc_bit[11] << 2) +
  513. (ecc_bit[9] << 1) +
  514. ecc_bit[7];
  515. find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
  516. DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
  517. "offset: %d, bit: %d\n", find_byte, find_bit);
  518. page_data[find_byte] ^= (1 << find_bit);
  519. return 0;
  520. default:
  521. if (isEccFF) {
  522. if (ecc_data2[0] == 0 &&
  523. ecc_data2[1] == 0 &&
  524. ecc_data2[2] == 0)
  525. return 0;
  526. }
  527. DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
  528. return -1;
  529. }
  530. }
  531. /**
  532. * omap_correct_data - Compares the ECC read with HW generated ECC
  533. * @mtd: MTD device structure
  534. * @dat: page data
  535. * @read_ecc: ecc read from nand flash
  536. * @calc_ecc: ecc read from HW ECC registers
  537. *
  538. * Compares the ecc read from nand spare area with ECC registers values
  539. * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
  540. * and correction.
  541. */
  542. static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
  543. u_char *read_ecc, u_char *calc_ecc)
  544. {
  545. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  546. mtd);
  547. int blockCnt = 0, i = 0, ret = 0;
  548. /* Ex NAND_ECC_HW12_2048 */
  549. if ((info->nand.ecc.mode == NAND_ECC_HW) &&
  550. (info->nand.ecc.size == 2048))
  551. blockCnt = 4;
  552. else
  553. blockCnt = 1;
  554. for (i = 0; i < blockCnt; i++) {
  555. if (memcmp(read_ecc, calc_ecc, 3) != 0) {
  556. ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
  557. if (ret < 0)
  558. return ret;
  559. }
  560. read_ecc += 3;
  561. calc_ecc += 3;
  562. dat += 512;
  563. }
  564. return 0;
  565. }
  566. /**
  567. * omap_calcuate_ecc - Generate non-inverted ECC bytes.
  568. * @mtd: MTD device structure
  569. * @dat: The pointer to data on which ecc is computed
  570. * @ecc_code: The ecc_code buffer
  571. *
  572. * Using noninverted ECC can be considered ugly since writing a blank
  573. * page ie. padding will clear the ECC bytes. This is no problem as long
  574. * nobody is trying to write data on the seemingly unused page. Reading
  575. * an erased page will produce an ECC mismatch between generated and read
  576. * ECC bytes that has to be dealt with separately.
  577. */
  578. static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
  579. u_char *ecc_code)
  580. {
  581. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  582. mtd);
  583. return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
  584. }
  585. /**
  586. * omap_enable_hwecc - This function enables the hardware ecc functionality
  587. * @mtd: MTD device structure
  588. * @mode: Read/Write mode
  589. */
  590. static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
  591. {
  592. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  593. mtd);
  594. struct nand_chip *chip = mtd->priv;
  595. unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  596. gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
  597. }
  598. #endif
  599. /**
  600. * omap_wait - wait until the command is done
  601. * @mtd: MTD device structure
  602. * @chip: NAND Chip structure
  603. *
  604. * Wait function is called during Program and erase operations and
  605. * the way it is called from MTD layer, we should wait till the NAND
  606. * chip is ready after the programming/erase operation has completed.
  607. *
  608. * Erase can take up to 400ms and program up to 20ms according to
  609. * general NAND and SmartMedia specs
  610. */
  611. static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
  612. {
  613. struct nand_chip *this = mtd->priv;
  614. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  615. mtd);
  616. unsigned long timeo = jiffies;
  617. int status = NAND_STATUS_FAIL, state = this->state;
  618. if (state == FL_ERASING)
  619. timeo += (HZ * 400) / 1000;
  620. else
  621. timeo += (HZ * 20) / 1000;
  622. gpmc_nand_write(info->gpmc_cs,
  623. GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
  624. while (time_before(jiffies, timeo)) {
  625. status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
  626. if (status & NAND_STATUS_READY)
  627. break;
  628. cond_resched();
  629. }
  630. return status;
  631. }
  632. /**
  633. * omap_dev_ready - calls the platform specific dev_ready function
  634. * @mtd: MTD device structure
  635. */
  636. static int omap_dev_ready(struct mtd_info *mtd)
  637. {
  638. unsigned int val = 0;
  639. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  640. mtd);
  641. val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
  642. if ((val & 0x100) == 0x100) {
  643. /* Clear IRQ Interrupt */
  644. val |= 0x100;
  645. val &= ~(0x0);
  646. gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
  647. } else {
  648. unsigned int cnt = 0;
  649. while (cnt++ < 0x1FF) {
  650. if ((val & 0x100) == 0x100)
  651. return 0;
  652. val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
  653. }
  654. }
  655. return 1;
  656. }
  657. static int __devinit omap_nand_probe(struct platform_device *pdev)
  658. {
  659. struct omap_nand_info *info;
  660. struct omap_nand_platform_data *pdata;
  661. int err;
  662. pdata = pdev->dev.platform_data;
  663. if (pdata == NULL) {
  664. dev_err(&pdev->dev, "platform data missing\n");
  665. return -ENODEV;
  666. }
  667. info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
  668. if (!info)
  669. return -ENOMEM;
  670. platform_set_drvdata(pdev, info);
  671. spin_lock_init(&info->controller.lock);
  672. init_waitqueue_head(&info->controller.wq);
  673. info->pdev = pdev;
  674. info->gpmc_cs = pdata->cs;
  675. info->phys_base = pdata->phys_base;
  676. info->mtd.priv = &info->nand;
  677. info->mtd.name = dev_name(&pdev->dev);
  678. info->mtd.owner = THIS_MODULE;
  679. info->nand.options = pdata->devsize;
  680. info->nand.options |= NAND_SKIP_BBTSCAN;
  681. /* NAND write protect off */
  682. gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
  683. if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
  684. pdev->dev.driver->name)) {
  685. err = -EBUSY;
  686. goto out_free_info;
  687. }
  688. info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
  689. if (!info->nand.IO_ADDR_R) {
  690. err = -ENOMEM;
  691. goto out_release_mem_region;
  692. }
  693. info->nand.controller = &info->controller;
  694. info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
  695. info->nand.cmd_ctrl = omap_hwcontrol;
  696. /*
  697. * If RDY/BSY line is connected to OMAP then use the omap ready
  698. * funcrtion and the generic nand_wait function which reads the status
  699. * register after monitoring the RDY/BSY line.Otherwise use a standard
  700. * chip delay which is slightly more than tR (AC Timing) of the NAND
  701. * device and read status register until you get a failure or success
  702. */
  703. if (pdata->dev_ready) {
  704. info->nand.dev_ready = omap_dev_ready;
  705. info->nand.chip_delay = 0;
  706. } else {
  707. info->nand.waitfunc = omap_wait;
  708. info->nand.chip_delay = 50;
  709. }
  710. switch (pdata->xfer_type) {
  711. case NAND_OMAP_PREFETCH_POLLED:
  712. info->nand.read_buf = omap_read_buf_pref;
  713. info->nand.write_buf = omap_write_buf_pref;
  714. break;
  715. case NAND_OMAP_POLLED:
  716. if (info->nand.options & NAND_BUSWIDTH_16) {
  717. info->nand.read_buf = omap_read_buf16;
  718. info->nand.write_buf = omap_write_buf16;
  719. } else {
  720. info->nand.read_buf = omap_read_buf8;
  721. info->nand.write_buf = omap_write_buf8;
  722. }
  723. break;
  724. case NAND_OMAP_PREFETCH_DMA:
  725. err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
  726. omap_nand_dma_cb, &info->comp, &info->dma_ch);
  727. if (err < 0) {
  728. info->dma_ch = -1;
  729. dev_err(&pdev->dev, "DMA request failed!\n");
  730. goto out_release_mem_region;
  731. } else {
  732. omap_set_dma_dest_burst_mode(info->dma_ch,
  733. OMAP_DMA_DATA_BURST_16);
  734. omap_set_dma_src_burst_mode(info->dma_ch,
  735. OMAP_DMA_DATA_BURST_16);
  736. info->nand.read_buf = omap_read_buf_dma_pref;
  737. info->nand.write_buf = omap_write_buf_dma_pref;
  738. }
  739. break;
  740. default:
  741. dev_err(&pdev->dev,
  742. "xfer_type(%d) not supported!\n", pdata->xfer_type);
  743. err = -EINVAL;
  744. goto out_release_mem_region;
  745. }
  746. info->nand.verify_buf = omap_verify_buf;
  747. #ifdef CONFIG_MTD_NAND_OMAP_HWECC
  748. info->nand.ecc.bytes = 3;
  749. info->nand.ecc.size = 512;
  750. info->nand.ecc.calculate = omap_calculate_ecc;
  751. info->nand.ecc.hwctl = omap_enable_hwecc;
  752. info->nand.ecc.correct = omap_correct_data;
  753. info->nand.ecc.mode = NAND_ECC_HW;
  754. #else
  755. info->nand.ecc.mode = NAND_ECC_SOFT;
  756. #endif
  757. /* DIP switches on some boards change between 8 and 16 bit
  758. * bus widths for flash. Try the other width if the first try fails.
  759. */
  760. if (nand_scan(&info->mtd, 1)) {
  761. info->nand.options ^= NAND_BUSWIDTH_16;
  762. if (nand_scan(&info->mtd, 1)) {
  763. err = -ENXIO;
  764. goto out_release_mem_region;
  765. }
  766. }
  767. #ifdef CONFIG_MTD_PARTITIONS
  768. err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
  769. if (err > 0)
  770. add_mtd_partitions(&info->mtd, info->parts, err);
  771. else if (pdata->parts)
  772. add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
  773. else
  774. #endif
  775. add_mtd_device(&info->mtd);
  776. platform_set_drvdata(pdev, &info->mtd);
  777. return 0;
  778. out_release_mem_region:
  779. release_mem_region(info->phys_base, NAND_IO_SIZE);
  780. out_free_info:
  781. kfree(info);
  782. return err;
  783. }
  784. static int omap_nand_remove(struct platform_device *pdev)
  785. {
  786. struct mtd_info *mtd = platform_get_drvdata(pdev);
  787. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  788. mtd);
  789. platform_set_drvdata(pdev, NULL);
  790. if (info->dma_ch != -1)
  791. omap_free_dma(info->dma_ch);
  792. /* Release NAND device, its internal structures and partitions */
  793. nand_release(&info->mtd);
  794. iounmap(info->nand.IO_ADDR_R);
  795. kfree(&info->mtd);
  796. return 0;
  797. }
  798. static struct platform_driver omap_nand_driver = {
  799. .probe = omap_nand_probe,
  800. .remove = omap_nand_remove,
  801. .driver = {
  802. .name = DRIVER_NAME,
  803. .owner = THIS_MODULE,
  804. },
  805. };
  806. static int __init omap_nand_init(void)
  807. {
  808. pr_info("%s driver initializing\n", DRIVER_NAME);
  809. return platform_driver_register(&omap_nand_driver);
  810. }
  811. static void __exit omap_nand_exit(void)
  812. {
  813. platform_driver_unregister(&omap_nand_driver);
  814. }
  815. module_init(omap_nand_init);
  816. module_exit(omap_nand_exit);
  817. MODULE_ALIAS(DRIVER_NAME);
  818. MODULE_LICENSE("GPL");
  819. MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");