omap2.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150
  1. /*
  2. * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
  3. * Copyright © 2004 Micron Technology Inc.
  4. * Copyright © 2004 David Brownell
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/platform_device.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/delay.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/sched.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/omap-dma.h>
  22. #include <linux/io.h>
  23. #include <linux/slab.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/bch.h>
  27. #include <linux/platform_data/elm.h>
  28. #include <linux/platform_data/mtd-nand-omap2.h>
  29. #define DRIVER_NAME "omap2-nand"
  30. #define OMAP_NAND_TIMEOUT_MS 5000
  31. #define NAND_Ecc_P1e (1 << 0)
  32. #define NAND_Ecc_P2e (1 << 1)
  33. #define NAND_Ecc_P4e (1 << 2)
  34. #define NAND_Ecc_P8e (1 << 3)
  35. #define NAND_Ecc_P16e (1 << 4)
  36. #define NAND_Ecc_P32e (1 << 5)
  37. #define NAND_Ecc_P64e (1 << 6)
  38. #define NAND_Ecc_P128e (1 << 7)
  39. #define NAND_Ecc_P256e (1 << 8)
  40. #define NAND_Ecc_P512e (1 << 9)
  41. #define NAND_Ecc_P1024e (1 << 10)
  42. #define NAND_Ecc_P2048e (1 << 11)
  43. #define NAND_Ecc_P1o (1 << 16)
  44. #define NAND_Ecc_P2o (1 << 17)
  45. #define NAND_Ecc_P4o (1 << 18)
  46. #define NAND_Ecc_P8o (1 << 19)
  47. #define NAND_Ecc_P16o (1 << 20)
  48. #define NAND_Ecc_P32o (1 << 21)
  49. #define NAND_Ecc_P64o (1 << 22)
  50. #define NAND_Ecc_P128o (1 << 23)
  51. #define NAND_Ecc_P256o (1 << 24)
  52. #define NAND_Ecc_P512o (1 << 25)
  53. #define NAND_Ecc_P1024o (1 << 26)
  54. #define NAND_Ecc_P2048o (1 << 27)
  55. #define TF(value) (value ? 1 : 0)
  56. #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
  57. #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
  58. #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
  59. #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
  60. #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
  61. #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
  62. #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
  63. #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
  64. #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
  65. #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
  66. #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
  67. #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
  68. #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
  69. #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
  70. #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
  71. #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
  72. #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
  73. #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
  74. #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
  75. #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
  76. #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
  77. #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
  78. #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
  79. #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
  80. #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
  81. #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
  82. #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
  83. #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
  84. #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
  85. #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
  86. #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
  87. #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
  88. #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
  89. #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
  90. #define PREFETCH_CONFIG1_CS_SHIFT 24
  91. #define ECC_CONFIG_CS_SHIFT 1
  92. #define CS_MASK 0x7
  93. #define ENABLE_PREFETCH (0x1 << 7)
  94. #define DMA_MPU_MODE_SHIFT 2
  95. #define ECCSIZE0_SHIFT 12
  96. #define ECCSIZE1_SHIFT 22
  97. #define ECC1RESULTSIZE 0x1
  98. #define ECCCLEAR 0x100
  99. #define ECC1 0x1
  100. #define PREFETCH_FIFOTHRESHOLD_MAX 0x40
  101. #define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
  102. #define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
  103. #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
  104. #define STATUS_BUFF_EMPTY 0x00000001
  105. #define OMAP24XX_DMA_GPMC 4
  106. #define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
  107. #define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
  108. #define SECTOR_BYTES 512
  109. /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
  110. #define BCH4_BIT_PAD 4
  111. #define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
  112. #define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
  113. /* GPMC ecc engine settings for read */
  114. #define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
  115. #define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
  116. #define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
  117. #define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
  118. #define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
  119. /* GPMC ecc engine settings for write */
  120. #define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
  121. #define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
  122. #define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
  123. #define OMAP_ECC_BCH8_POLYNOMIAL 0x201b
  124. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  125. static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
  126. 0xac, 0x6b, 0xff, 0x99, 0x7b};
  127. static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
  128. #endif
  129. /* oob info generated runtime depending on ecc algorithm and layout selected */
  130. static struct nand_ecclayout omap_oobinfo;
  131. /* Define some generic bad / good block scan pattern which are used
  132. * while scanning a device for factory marked good / bad blocks
  133. */
  134. static uint8_t scan_ff_pattern[] = { 0xff };
  135. static struct nand_bbt_descr bb_descrip_flashbased = {
  136. .options = NAND_BBT_SCANALLPAGES,
  137. .offs = 0,
  138. .len = 1,
  139. .pattern = scan_ff_pattern,
  140. };
  141. struct omap_nand_info {
  142. struct nand_hw_control controller;
  143. struct omap_nand_platform_data *pdata;
  144. struct mtd_info mtd;
  145. struct nand_chip nand;
  146. struct platform_device *pdev;
  147. int gpmc_cs;
  148. unsigned long phys_base;
  149. unsigned long mem_size;
  150. struct completion comp;
  151. struct dma_chan *dma;
  152. int gpmc_irq_fifo;
  153. int gpmc_irq_count;
  154. enum {
  155. OMAP_NAND_IO_READ = 0, /* read */
  156. OMAP_NAND_IO_WRITE, /* write */
  157. } iomode;
  158. u_char *buf;
  159. int buf_len;
  160. struct gpmc_nand_regs reg;
  161. /* fields specific for BCHx_HW ECC scheme */
  162. struct bch_control *bch;
  163. struct nand_ecclayout ecclayout;
  164. bool is_elm_used;
  165. struct device *elm_dev;
  166. struct device_node *of_node;
  167. };
  168. /**
  169. * omap_prefetch_enable - configures and starts prefetch transfer
  170. * @cs: cs (chip select) number
  171. * @fifo_th: fifo threshold to be used for read/ write
  172. * @dma_mode: dma mode enable (1) or disable (0)
  173. * @u32_count: number of bytes to be transferred
  174. * @is_write: prefetch read(0) or write post(1) mode
  175. */
  176. static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
  177. unsigned int u32_count, int is_write, struct omap_nand_info *info)
  178. {
  179. u32 val;
  180. if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
  181. return -1;
  182. if (readl(info->reg.gpmc_prefetch_control))
  183. return -EBUSY;
  184. /* Set the amount of bytes to be prefetched */
  185. writel(u32_count, info->reg.gpmc_prefetch_config2);
  186. /* Set dma/mpu mode, the prefetch read / post write and
  187. * enable the engine. Set which cs is has requested for.
  188. */
  189. val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
  190. PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
  191. (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
  192. writel(val, info->reg.gpmc_prefetch_config1);
  193. /* Start the prefetch engine */
  194. writel(0x1, info->reg.gpmc_prefetch_control);
  195. return 0;
  196. }
  197. /**
  198. * omap_prefetch_reset - disables and stops the prefetch engine
  199. */
  200. static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
  201. {
  202. u32 config1;
  203. /* check if the same module/cs is trying to reset */
  204. config1 = readl(info->reg.gpmc_prefetch_config1);
  205. if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
  206. return -EINVAL;
  207. /* Stop the PFPW engine */
  208. writel(0x0, info->reg.gpmc_prefetch_control);
  209. /* Reset/disable the PFPW engine */
  210. writel(0x0, info->reg.gpmc_prefetch_config1);
  211. return 0;
  212. }
  213. /**
  214. * omap_hwcontrol - hardware specific access to control-lines
  215. * @mtd: MTD device structure
  216. * @cmd: command to device
  217. * @ctrl:
  218. * NAND_NCE: bit 0 -> don't care
  219. * NAND_CLE: bit 1 -> Command Latch
  220. * NAND_ALE: bit 2 -> Address Latch
  221. *
  222. * NOTE: boards may use different bits for these!!
  223. */
  224. static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
  225. {
  226. struct omap_nand_info *info = container_of(mtd,
  227. struct omap_nand_info, mtd);
  228. if (cmd != NAND_CMD_NONE) {
  229. if (ctrl & NAND_CLE)
  230. writeb(cmd, info->reg.gpmc_nand_command);
  231. else if (ctrl & NAND_ALE)
  232. writeb(cmd, info->reg.gpmc_nand_address);
  233. else /* NAND_NCE */
  234. writeb(cmd, info->reg.gpmc_nand_data);
  235. }
  236. }
  237. /**
  238. * omap_read_buf8 - read data from NAND controller into buffer
  239. * @mtd: MTD device structure
  240. * @buf: buffer to store date
  241. * @len: number of bytes to read
  242. */
  243. static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
  244. {
  245. struct nand_chip *nand = mtd->priv;
  246. ioread8_rep(nand->IO_ADDR_R, buf, len);
  247. }
  248. /**
  249. * omap_write_buf8 - write buffer to NAND controller
  250. * @mtd: MTD device structure
  251. * @buf: data buffer
  252. * @len: number of bytes to write
  253. */
  254. static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
  255. {
  256. struct omap_nand_info *info = container_of(mtd,
  257. struct omap_nand_info, mtd);
  258. u_char *p = (u_char *)buf;
  259. u32 status = 0;
  260. while (len--) {
  261. iowrite8(*p++, info->nand.IO_ADDR_W);
  262. /* wait until buffer is available for write */
  263. do {
  264. status = readl(info->reg.gpmc_status) &
  265. STATUS_BUFF_EMPTY;
  266. } while (!status);
  267. }
  268. }
  269. /**
  270. * omap_read_buf16 - read data from NAND controller into buffer
  271. * @mtd: MTD device structure
  272. * @buf: buffer to store date
  273. * @len: number of bytes to read
  274. */
  275. static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
  276. {
  277. struct nand_chip *nand = mtd->priv;
  278. ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
  279. }
  280. /**
  281. * omap_write_buf16 - write buffer to NAND controller
  282. * @mtd: MTD device structure
  283. * @buf: data buffer
  284. * @len: number of bytes to write
  285. */
  286. static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
  287. {
  288. struct omap_nand_info *info = container_of(mtd,
  289. struct omap_nand_info, mtd);
  290. u16 *p = (u16 *) buf;
  291. u32 status = 0;
  292. /* FIXME try bursts of writesw() or DMA ... */
  293. len >>= 1;
  294. while (len--) {
  295. iowrite16(*p++, info->nand.IO_ADDR_W);
  296. /* wait until buffer is available for write */
  297. do {
  298. status = readl(info->reg.gpmc_status) &
  299. STATUS_BUFF_EMPTY;
  300. } while (!status);
  301. }
  302. }
  303. /**
  304. * omap_read_buf_pref - read data from NAND controller into buffer
  305. * @mtd: MTD device structure
  306. * @buf: buffer to store date
  307. * @len: number of bytes to read
  308. */
  309. static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
  310. {
  311. struct omap_nand_info *info = container_of(mtd,
  312. struct omap_nand_info, mtd);
  313. uint32_t r_count = 0;
  314. int ret = 0;
  315. u32 *p = (u32 *)buf;
  316. /* take care of subpage reads */
  317. if (len % 4) {
  318. if (info->nand.options & NAND_BUSWIDTH_16)
  319. omap_read_buf16(mtd, buf, len % 4);
  320. else
  321. omap_read_buf8(mtd, buf, len % 4);
  322. p = (u32 *) (buf + len % 4);
  323. len -= len % 4;
  324. }
  325. /* configure and start prefetch transfer */
  326. ret = omap_prefetch_enable(info->gpmc_cs,
  327. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
  328. if (ret) {
  329. /* PFPW engine is busy, use cpu copy method */
  330. if (info->nand.options & NAND_BUSWIDTH_16)
  331. omap_read_buf16(mtd, (u_char *)p, len);
  332. else
  333. omap_read_buf8(mtd, (u_char *)p, len);
  334. } else {
  335. do {
  336. r_count = readl(info->reg.gpmc_prefetch_status);
  337. r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
  338. r_count = r_count >> 2;
  339. ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
  340. p += r_count;
  341. len -= r_count << 2;
  342. } while (len);
  343. /* disable and stop the PFPW engine */
  344. omap_prefetch_reset(info->gpmc_cs, info);
  345. }
  346. }
  347. /**
  348. * omap_write_buf_pref - write buffer to NAND controller
  349. * @mtd: MTD device structure
  350. * @buf: data buffer
  351. * @len: number of bytes to write
  352. */
  353. static void omap_write_buf_pref(struct mtd_info *mtd,
  354. const u_char *buf, int len)
  355. {
  356. struct omap_nand_info *info = container_of(mtd,
  357. struct omap_nand_info, mtd);
  358. uint32_t w_count = 0;
  359. int i = 0, ret = 0;
  360. u16 *p = (u16 *)buf;
  361. unsigned long tim, limit;
  362. u32 val;
  363. /* take care of subpage writes */
  364. if (len % 2 != 0) {
  365. writeb(*buf, info->nand.IO_ADDR_W);
  366. p = (u16 *)(buf + 1);
  367. len--;
  368. }
  369. /* configure and start prefetch transfer */
  370. ret = omap_prefetch_enable(info->gpmc_cs,
  371. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
  372. if (ret) {
  373. /* PFPW engine is busy, use cpu copy method */
  374. if (info->nand.options & NAND_BUSWIDTH_16)
  375. omap_write_buf16(mtd, (u_char *)p, len);
  376. else
  377. omap_write_buf8(mtd, (u_char *)p, len);
  378. } else {
  379. while (len) {
  380. w_count = readl(info->reg.gpmc_prefetch_status);
  381. w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
  382. w_count = w_count >> 1;
  383. for (i = 0; (i < w_count) && len; i++, len -= 2)
  384. iowrite16(*p++, info->nand.IO_ADDR_W);
  385. }
  386. /* wait for data to flushed-out before reset the prefetch */
  387. tim = 0;
  388. limit = (loops_per_jiffy *
  389. msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  390. do {
  391. cpu_relax();
  392. val = readl(info->reg.gpmc_prefetch_status);
  393. val = PREFETCH_STATUS_COUNT(val);
  394. } while (val && (tim++ < limit));
  395. /* disable and stop the PFPW engine */
  396. omap_prefetch_reset(info->gpmc_cs, info);
  397. }
  398. }
  399. /*
  400. * omap_nand_dma_callback: callback on the completion of dma transfer
  401. * @data: pointer to completion data structure
  402. */
  403. static void omap_nand_dma_callback(void *data)
  404. {
  405. complete((struct completion *) data);
  406. }
  407. /*
  408. * omap_nand_dma_transfer: configure and start dma transfer
  409. * @mtd: MTD device structure
  410. * @addr: virtual address in RAM of source/destination
  411. * @len: number of data bytes to be transferred
  412. * @is_write: flag for read/write operation
  413. */
  414. static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
  415. unsigned int len, int is_write)
  416. {
  417. struct omap_nand_info *info = container_of(mtd,
  418. struct omap_nand_info, mtd);
  419. struct dma_async_tx_descriptor *tx;
  420. enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
  421. DMA_FROM_DEVICE;
  422. struct scatterlist sg;
  423. unsigned long tim, limit;
  424. unsigned n;
  425. int ret;
  426. u32 val;
  427. if (addr >= high_memory) {
  428. struct page *p1;
  429. if (((size_t)addr & PAGE_MASK) !=
  430. ((size_t)(addr + len - 1) & PAGE_MASK))
  431. goto out_copy;
  432. p1 = vmalloc_to_page(addr);
  433. if (!p1)
  434. goto out_copy;
  435. addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
  436. }
  437. sg_init_one(&sg, addr, len);
  438. n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
  439. if (n == 0) {
  440. dev_err(&info->pdev->dev,
  441. "Couldn't DMA map a %d byte buffer\n", len);
  442. goto out_copy;
  443. }
  444. tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
  445. is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  446. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  447. if (!tx)
  448. goto out_copy_unmap;
  449. tx->callback = omap_nand_dma_callback;
  450. tx->callback_param = &info->comp;
  451. dmaengine_submit(tx);
  452. /* configure and start prefetch transfer */
  453. ret = omap_prefetch_enable(info->gpmc_cs,
  454. PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
  455. if (ret)
  456. /* PFPW engine is busy, use cpu copy method */
  457. goto out_copy_unmap;
  458. init_completion(&info->comp);
  459. dma_async_issue_pending(info->dma);
  460. /* setup and start DMA using dma_addr */
  461. wait_for_completion(&info->comp);
  462. tim = 0;
  463. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  464. do {
  465. cpu_relax();
  466. val = readl(info->reg.gpmc_prefetch_status);
  467. val = PREFETCH_STATUS_COUNT(val);
  468. } while (val && (tim++ < limit));
  469. /* disable and stop the PFPW engine */
  470. omap_prefetch_reset(info->gpmc_cs, info);
  471. dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
  472. return 0;
  473. out_copy_unmap:
  474. dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
  475. out_copy:
  476. if (info->nand.options & NAND_BUSWIDTH_16)
  477. is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
  478. : omap_write_buf16(mtd, (u_char *) addr, len);
  479. else
  480. is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
  481. : omap_write_buf8(mtd, (u_char *) addr, len);
  482. return 0;
  483. }
  484. /**
  485. * omap_read_buf_dma_pref - read data from NAND controller into buffer
  486. * @mtd: MTD device structure
  487. * @buf: buffer to store date
  488. * @len: number of bytes to read
  489. */
  490. static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
  491. {
  492. if (len <= mtd->oobsize)
  493. omap_read_buf_pref(mtd, buf, len);
  494. else
  495. /* start transfer in DMA mode */
  496. omap_nand_dma_transfer(mtd, buf, len, 0x0);
  497. }
  498. /**
  499. * omap_write_buf_dma_pref - write buffer to NAND controller
  500. * @mtd: MTD device structure
  501. * @buf: data buffer
  502. * @len: number of bytes to write
  503. */
  504. static void omap_write_buf_dma_pref(struct mtd_info *mtd,
  505. const u_char *buf, int len)
  506. {
  507. if (len <= mtd->oobsize)
  508. omap_write_buf_pref(mtd, buf, len);
  509. else
  510. /* start transfer in DMA mode */
  511. omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
  512. }
  513. /*
  514. * omap_nand_irq - GPMC irq handler
  515. * @this_irq: gpmc irq number
  516. * @dev: omap_nand_info structure pointer is passed here
  517. */
  518. static irqreturn_t omap_nand_irq(int this_irq, void *dev)
  519. {
  520. struct omap_nand_info *info = (struct omap_nand_info *) dev;
  521. u32 bytes;
  522. bytes = readl(info->reg.gpmc_prefetch_status);
  523. bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
  524. bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
  525. if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
  526. if (this_irq == info->gpmc_irq_count)
  527. goto done;
  528. if (info->buf_len && (info->buf_len < bytes))
  529. bytes = info->buf_len;
  530. else if (!info->buf_len)
  531. bytes = 0;
  532. iowrite32_rep(info->nand.IO_ADDR_W,
  533. (u32 *)info->buf, bytes >> 2);
  534. info->buf = info->buf + bytes;
  535. info->buf_len -= bytes;
  536. } else {
  537. ioread32_rep(info->nand.IO_ADDR_R,
  538. (u32 *)info->buf, bytes >> 2);
  539. info->buf = info->buf + bytes;
  540. if (this_irq == info->gpmc_irq_count)
  541. goto done;
  542. }
  543. return IRQ_HANDLED;
  544. done:
  545. complete(&info->comp);
  546. disable_irq_nosync(info->gpmc_irq_fifo);
  547. disable_irq_nosync(info->gpmc_irq_count);
  548. return IRQ_HANDLED;
  549. }
  550. /*
  551. * omap_read_buf_irq_pref - read data from NAND controller into buffer
  552. * @mtd: MTD device structure
  553. * @buf: buffer to store date
  554. * @len: number of bytes to read
  555. */
  556. static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
  557. {
  558. struct omap_nand_info *info = container_of(mtd,
  559. struct omap_nand_info, mtd);
  560. int ret = 0;
  561. if (len <= mtd->oobsize) {
  562. omap_read_buf_pref(mtd, buf, len);
  563. return;
  564. }
  565. info->iomode = OMAP_NAND_IO_READ;
  566. info->buf = buf;
  567. init_completion(&info->comp);
  568. /* configure and start prefetch transfer */
  569. ret = omap_prefetch_enable(info->gpmc_cs,
  570. PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
  571. if (ret)
  572. /* PFPW engine is busy, use cpu copy method */
  573. goto out_copy;
  574. info->buf_len = len;
  575. enable_irq(info->gpmc_irq_count);
  576. enable_irq(info->gpmc_irq_fifo);
  577. /* waiting for read to complete */
  578. wait_for_completion(&info->comp);
  579. /* disable and stop the PFPW engine */
  580. omap_prefetch_reset(info->gpmc_cs, info);
  581. return;
  582. out_copy:
  583. if (info->nand.options & NAND_BUSWIDTH_16)
  584. omap_read_buf16(mtd, buf, len);
  585. else
  586. omap_read_buf8(mtd, buf, len);
  587. }
  588. /*
  589. * omap_write_buf_irq_pref - write buffer to NAND controller
  590. * @mtd: MTD device structure
  591. * @buf: data buffer
  592. * @len: number of bytes to write
  593. */
  594. static void omap_write_buf_irq_pref(struct mtd_info *mtd,
  595. const u_char *buf, int len)
  596. {
  597. struct omap_nand_info *info = container_of(mtd,
  598. struct omap_nand_info, mtd);
  599. int ret = 0;
  600. unsigned long tim, limit;
  601. u32 val;
  602. if (len <= mtd->oobsize) {
  603. omap_write_buf_pref(mtd, buf, len);
  604. return;
  605. }
  606. info->iomode = OMAP_NAND_IO_WRITE;
  607. info->buf = (u_char *) buf;
  608. init_completion(&info->comp);
  609. /* configure and start prefetch transfer : size=24 */
  610. ret = omap_prefetch_enable(info->gpmc_cs,
  611. (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
  612. if (ret)
  613. /* PFPW engine is busy, use cpu copy method */
  614. goto out_copy;
  615. info->buf_len = len;
  616. enable_irq(info->gpmc_irq_count);
  617. enable_irq(info->gpmc_irq_fifo);
  618. /* waiting for write to complete */
  619. wait_for_completion(&info->comp);
  620. /* wait for data to flushed-out before reset the prefetch */
  621. tim = 0;
  622. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  623. do {
  624. val = readl(info->reg.gpmc_prefetch_status);
  625. val = PREFETCH_STATUS_COUNT(val);
  626. cpu_relax();
  627. } while (val && (tim++ < limit));
  628. /* disable and stop the PFPW engine */
  629. omap_prefetch_reset(info->gpmc_cs, info);
  630. return;
  631. out_copy:
  632. if (info->nand.options & NAND_BUSWIDTH_16)
  633. omap_write_buf16(mtd, buf, len);
  634. else
  635. omap_write_buf8(mtd, buf, len);
  636. }
  637. /**
  638. * gen_true_ecc - This function will generate true ECC value
  639. * @ecc_buf: buffer to store ecc code
  640. *
  641. * This generated true ECC value can be used when correcting
  642. * data read from NAND flash memory core
  643. */
  644. static void gen_true_ecc(u8 *ecc_buf)
  645. {
  646. u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
  647. ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
  648. ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
  649. P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
  650. ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
  651. P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
  652. ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
  653. P1e(tmp) | P2048o(tmp) | P2048e(tmp));
  654. }
  655. /**
  656. * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
  657. * @ecc_data1: ecc code from nand spare area
  658. * @ecc_data2: ecc code from hardware register obtained from hardware ecc
  659. * @page_data: page data
  660. *
  661. * This function compares two ECC's and indicates if there is an error.
  662. * If the error can be corrected it will be corrected to the buffer.
  663. * If there is no error, %0 is returned. If there is an error but it
  664. * was corrected, %1 is returned. Otherwise, %-1 is returned.
  665. */
  666. static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
  667. u8 *ecc_data2, /* read from register */
  668. u8 *page_data)
  669. {
  670. uint i;
  671. u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
  672. u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
  673. u8 ecc_bit[24];
  674. u8 ecc_sum = 0;
  675. u8 find_bit = 0;
  676. uint find_byte = 0;
  677. int isEccFF;
  678. isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
  679. gen_true_ecc(ecc_data1);
  680. gen_true_ecc(ecc_data2);
  681. for (i = 0; i <= 2; i++) {
  682. *(ecc_data1 + i) = ~(*(ecc_data1 + i));
  683. *(ecc_data2 + i) = ~(*(ecc_data2 + i));
  684. }
  685. for (i = 0; i < 8; i++) {
  686. tmp0_bit[i] = *ecc_data1 % 2;
  687. *ecc_data1 = *ecc_data1 / 2;
  688. }
  689. for (i = 0; i < 8; i++) {
  690. tmp1_bit[i] = *(ecc_data1 + 1) % 2;
  691. *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
  692. }
  693. for (i = 0; i < 8; i++) {
  694. tmp2_bit[i] = *(ecc_data1 + 2) % 2;
  695. *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
  696. }
  697. for (i = 0; i < 8; i++) {
  698. comp0_bit[i] = *ecc_data2 % 2;
  699. *ecc_data2 = *ecc_data2 / 2;
  700. }
  701. for (i = 0; i < 8; i++) {
  702. comp1_bit[i] = *(ecc_data2 + 1) % 2;
  703. *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
  704. }
  705. for (i = 0; i < 8; i++) {
  706. comp2_bit[i] = *(ecc_data2 + 2) % 2;
  707. *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
  708. }
  709. for (i = 0; i < 6; i++)
  710. ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
  711. for (i = 0; i < 8; i++)
  712. ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
  713. for (i = 0; i < 8; i++)
  714. ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
  715. ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
  716. ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
  717. for (i = 0; i < 24; i++)
  718. ecc_sum += ecc_bit[i];
  719. switch (ecc_sum) {
  720. case 0:
  721. /* Not reached because this function is not called if
  722. * ECC values are equal
  723. */
  724. return 0;
  725. case 1:
  726. /* Uncorrectable error */
  727. pr_debug("ECC UNCORRECTED_ERROR 1\n");
  728. return -1;
  729. case 11:
  730. /* UN-Correctable error */
  731. pr_debug("ECC UNCORRECTED_ERROR B\n");
  732. return -1;
  733. case 12:
  734. /* Correctable error */
  735. find_byte = (ecc_bit[23] << 8) +
  736. (ecc_bit[21] << 7) +
  737. (ecc_bit[19] << 6) +
  738. (ecc_bit[17] << 5) +
  739. (ecc_bit[15] << 4) +
  740. (ecc_bit[13] << 3) +
  741. (ecc_bit[11] << 2) +
  742. (ecc_bit[9] << 1) +
  743. ecc_bit[7];
  744. find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
  745. pr_debug("Correcting single bit ECC error at offset: "
  746. "%d, bit: %d\n", find_byte, find_bit);
  747. page_data[find_byte] ^= (1 << find_bit);
  748. return 1;
  749. default:
  750. if (isEccFF) {
  751. if (ecc_data2[0] == 0 &&
  752. ecc_data2[1] == 0 &&
  753. ecc_data2[2] == 0)
  754. return 0;
  755. }
  756. pr_debug("UNCORRECTED_ERROR default\n");
  757. return -1;
  758. }
  759. }
  760. /**
  761. * omap_correct_data - Compares the ECC read with HW generated ECC
  762. * @mtd: MTD device structure
  763. * @dat: page data
  764. * @read_ecc: ecc read from nand flash
  765. * @calc_ecc: ecc read from HW ECC registers
  766. *
  767. * Compares the ecc read from nand spare area with ECC registers values
  768. * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
  769. * detection and correction. If there are no errors, %0 is returned. If
  770. * there were errors and all of the errors were corrected, the number of
  771. * corrected errors is returned. If uncorrectable errors exist, %-1 is
  772. * returned.
  773. */
  774. static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
  775. u_char *read_ecc, u_char *calc_ecc)
  776. {
  777. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  778. mtd);
  779. int blockCnt = 0, i = 0, ret = 0;
  780. int stat = 0;
  781. /* Ex NAND_ECC_HW12_2048 */
  782. if ((info->nand.ecc.mode == NAND_ECC_HW) &&
  783. (info->nand.ecc.size == 2048))
  784. blockCnt = 4;
  785. else
  786. blockCnt = 1;
  787. for (i = 0; i < blockCnt; i++) {
  788. if (memcmp(read_ecc, calc_ecc, 3) != 0) {
  789. ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
  790. if (ret < 0)
  791. return ret;
  792. /* keep track of the number of corrected errors */
  793. stat += ret;
  794. }
  795. read_ecc += 3;
  796. calc_ecc += 3;
  797. dat += 512;
  798. }
  799. return stat;
  800. }
  801. /**
  802. * omap_calcuate_ecc - Generate non-inverted ECC bytes.
  803. * @mtd: MTD device structure
  804. * @dat: The pointer to data on which ecc is computed
  805. * @ecc_code: The ecc_code buffer
  806. *
  807. * Using noninverted ECC can be considered ugly since writing a blank
  808. * page ie. padding will clear the ECC bytes. This is no problem as long
  809. * nobody is trying to write data on the seemingly unused page. Reading
  810. * an erased page will produce an ECC mismatch between generated and read
  811. * ECC bytes that has to be dealt with separately.
  812. */
  813. static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
  814. u_char *ecc_code)
  815. {
  816. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  817. mtd);
  818. u32 val;
  819. val = readl(info->reg.gpmc_ecc_config);
  820. if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
  821. return -EINVAL;
  822. /* read ecc result */
  823. val = readl(info->reg.gpmc_ecc1_result);
  824. *ecc_code++ = val; /* P128e, ..., P1e */
  825. *ecc_code++ = val >> 16; /* P128o, ..., P1o */
  826. /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
  827. *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
  828. return 0;
  829. }
  830. /**
  831. * omap_enable_hwecc - This function enables the hardware ecc functionality
  832. * @mtd: MTD device structure
  833. * @mode: Read/Write mode
  834. */
  835. static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
  836. {
  837. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  838. mtd);
  839. struct nand_chip *chip = mtd->priv;
  840. unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  841. u32 val;
  842. /* clear ecc and enable bits */
  843. val = ECCCLEAR | ECC1;
  844. writel(val, info->reg.gpmc_ecc_control);
  845. /* program ecc and result sizes */
  846. val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
  847. ECC1RESULTSIZE);
  848. writel(val, info->reg.gpmc_ecc_size_config);
  849. switch (mode) {
  850. case NAND_ECC_READ:
  851. case NAND_ECC_WRITE:
  852. writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
  853. break;
  854. case NAND_ECC_READSYN:
  855. writel(ECCCLEAR, info->reg.gpmc_ecc_control);
  856. break;
  857. default:
  858. dev_info(&info->pdev->dev,
  859. "error: unrecognized Mode[%d]!\n", mode);
  860. break;
  861. }
  862. /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
  863. val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
  864. writel(val, info->reg.gpmc_ecc_config);
  865. }
  866. /**
  867. * omap_wait - wait until the command is done
  868. * @mtd: MTD device structure
  869. * @chip: NAND Chip structure
  870. *
  871. * Wait function is called during Program and erase operations and
  872. * the way it is called from MTD layer, we should wait till the NAND
  873. * chip is ready after the programming/erase operation has completed.
  874. *
  875. * Erase can take up to 400ms and program up to 20ms according to
  876. * general NAND and SmartMedia specs
  877. */
  878. static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
  879. {
  880. struct nand_chip *this = mtd->priv;
  881. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  882. mtd);
  883. unsigned long timeo = jiffies;
  884. int status, state = this->state;
  885. if (state == FL_ERASING)
  886. timeo += msecs_to_jiffies(400);
  887. else
  888. timeo += msecs_to_jiffies(20);
  889. writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
  890. while (time_before(jiffies, timeo)) {
  891. status = readb(info->reg.gpmc_nand_data);
  892. if (status & NAND_STATUS_READY)
  893. break;
  894. cond_resched();
  895. }
  896. status = readb(info->reg.gpmc_nand_data);
  897. return status;
  898. }
  899. /**
  900. * omap_dev_ready - calls the platform specific dev_ready function
  901. * @mtd: MTD device structure
  902. */
  903. static int omap_dev_ready(struct mtd_info *mtd)
  904. {
  905. unsigned int val = 0;
  906. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  907. mtd);
  908. val = readl(info->reg.gpmc_status);
  909. if ((val & 0x100) == 0x100) {
  910. return 1;
  911. } else {
  912. return 0;
  913. }
  914. }
  915. #if defined(CONFIG_MTD_NAND_ECC_BCH) || defined(CONFIG_MTD_NAND_OMAP_BCH)
  916. /**
  917. * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
  918. * @mtd: MTD device structure
  919. * @mode: Read/Write mode
  920. *
  921. * When using BCH, sector size is hardcoded to 512 bytes.
  922. * Using wrapping mode 6 both for reading and writing if ELM module not uses
  923. * for error correction.
  924. * On writing,
  925. * eccsize0 = 0 (no additional protected byte in spare area)
  926. * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
  927. */
  928. static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
  929. {
  930. int nerrors;
  931. unsigned int dev_width, nsectors;
  932. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  933. mtd);
  934. struct nand_chip *chip = mtd->priv;
  935. u32 val, wr_mode;
  936. unsigned int ecc_size1, ecc_size0;
  937. /* Using wrapping mode 6 for writing */
  938. wr_mode = BCH_WRAPMODE_6;
  939. /*
  940. * ECC engine enabled for valid ecc_size0 nibbles
  941. * and disabled for ecc_size1 nibbles.
  942. */
  943. ecc_size0 = BCH_ECC_SIZE0;
  944. ecc_size1 = BCH_ECC_SIZE1;
  945. /* Perform ecc calculation on 512-byte sector */
  946. nsectors = 1;
  947. /* Update number of error correction */
  948. nerrors = info->nand.ecc.strength;
  949. /* Multi sector reading/writing for NAND flash with page size < 4096 */
  950. if (info->is_elm_used && (mtd->writesize <= 4096)) {
  951. if (mode == NAND_ECC_READ) {
  952. /* Using wrapping mode 1 for reading */
  953. wr_mode = BCH_WRAPMODE_1;
  954. /*
  955. * ECC engine enabled for ecc_size0 nibbles
  956. * and disabled for ecc_size1 nibbles.
  957. */
  958. ecc_size0 = (nerrors == 8) ?
  959. BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
  960. ecc_size1 = (nerrors == 8) ?
  961. BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
  962. }
  963. /* Perform ecc calculation for one page (< 4096) */
  964. nsectors = info->nand.ecc.steps;
  965. }
  966. writel(ECC1, info->reg.gpmc_ecc_control);
  967. /* Configure ecc size for BCH */
  968. val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
  969. writel(val, info->reg.gpmc_ecc_size_config);
  970. dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  971. /* BCH configuration */
  972. val = ((1 << 16) | /* enable BCH */
  973. (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
  974. (wr_mode << 8) | /* wrap mode */
  975. (dev_width << 7) | /* bus width */
  976. (((nsectors-1) & 0x7) << 4) | /* number of sectors */
  977. (info->gpmc_cs << 1) | /* ECC CS */
  978. (0x1)); /* enable ECC */
  979. writel(val, info->reg.gpmc_ecc_config);
  980. /* Clear ecc and enable bits */
  981. writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
  982. }
  983. #endif
  984. #ifdef CONFIG_MTD_NAND_ECC_BCH
  985. /**
  986. * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
  987. * @mtd: MTD device structure
  988. * @dat: The pointer to data on which ecc is computed
  989. * @ecc_code: The ecc_code buffer
  990. */
  991. static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
  992. u_char *ecc_code)
  993. {
  994. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  995. mtd);
  996. unsigned long nsectors, val1, val2;
  997. int i;
  998. nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
  999. for (i = 0; i < nsectors; i++) {
  1000. /* Read hw-computed remainder */
  1001. val1 = readl(info->reg.gpmc_bch_result0[i]);
  1002. val2 = readl(info->reg.gpmc_bch_result1[i]);
  1003. /*
  1004. * Add constant polynomial to remainder, in order to get an ecc
  1005. * sequence of 0xFFs for a buffer filled with 0xFFs; and
  1006. * left-justify the resulting polynomial.
  1007. */
  1008. *ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF);
  1009. *ecc_code++ = 0x13 ^ ((val2 >> 4) & 0xFF);
  1010. *ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
  1011. *ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF);
  1012. *ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF);
  1013. *ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF);
  1014. *ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4);
  1015. }
  1016. return 0;
  1017. }
  1018. /**
  1019. * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
  1020. * @mtd: MTD device structure
  1021. * @dat: The pointer to data on which ecc is computed
  1022. * @ecc_code: The ecc_code buffer
  1023. */
  1024. static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
  1025. u_char *ecc_code)
  1026. {
  1027. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1028. mtd);
  1029. unsigned long nsectors, val1, val2, val3, val4;
  1030. int i;
  1031. nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
  1032. for (i = 0; i < nsectors; i++) {
  1033. /* Read hw-computed remainder */
  1034. val1 = readl(info->reg.gpmc_bch_result0[i]);
  1035. val2 = readl(info->reg.gpmc_bch_result1[i]);
  1036. val3 = readl(info->reg.gpmc_bch_result2[i]);
  1037. val4 = readl(info->reg.gpmc_bch_result3[i]);
  1038. /*
  1039. * Add constant polynomial to remainder, in order to get an ecc
  1040. * sequence of 0xFFs for a buffer filled with 0xFFs.
  1041. */
  1042. *ecc_code++ = 0xef ^ (val4 & 0xFF);
  1043. *ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF);
  1044. *ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF);
  1045. *ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF);
  1046. *ecc_code++ = 0xed ^ (val3 & 0xFF);
  1047. *ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF);
  1048. *ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF);
  1049. *ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
  1050. *ecc_code++ = 0x97 ^ (val2 & 0xFF);
  1051. *ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF);
  1052. *ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
  1053. *ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF);
  1054. *ecc_code++ = 0xb5 ^ (val1 & 0xFF);
  1055. }
  1056. return 0;
  1057. }
  1058. #endif /* CONFIG_MTD_NAND_ECC_BCH */
  1059. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  1060. /**
  1061. * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
  1062. * @mtd: MTD device structure
  1063. * @dat: The pointer to data on which ecc is computed
  1064. * @ecc_code: The ecc_code buffer
  1065. *
  1066. * Support calculating of BCH4/8 ecc vectors for the page
  1067. */
  1068. static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
  1069. u_char *ecc_code)
  1070. {
  1071. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1072. mtd);
  1073. unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
  1074. int i, eccbchtsel;
  1075. nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
  1076. /*
  1077. * find BCH scheme used
  1078. * 0 -> BCH4
  1079. * 1 -> BCH8
  1080. */
  1081. eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
  1082. for (i = 0; i < nsectors; i++) {
  1083. /* Read hw-computed remainder */
  1084. bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
  1085. bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
  1086. if (eccbchtsel) {
  1087. bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
  1088. bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
  1089. }
  1090. if (eccbchtsel) {
  1091. /* BCH8 ecc scheme */
  1092. *ecc_code++ = (bch_val4 & 0xFF);
  1093. *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
  1094. *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
  1095. *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
  1096. *ecc_code++ = (bch_val3 & 0xFF);
  1097. *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
  1098. *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
  1099. *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
  1100. *ecc_code++ = (bch_val2 & 0xFF);
  1101. *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
  1102. *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
  1103. *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
  1104. *ecc_code++ = (bch_val1 & 0xFF);
  1105. /*
  1106. * Setting 14th byte to zero to handle
  1107. * erased page & maintain compatibility
  1108. * with RBL
  1109. */
  1110. *ecc_code++ = 0x0;
  1111. } else {
  1112. /* BCH4 ecc scheme */
  1113. *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
  1114. *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
  1115. *ecc_code++ = ((bch_val2 & 0xF) << 4) |
  1116. ((bch_val1 >> 28) & 0xF);
  1117. *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
  1118. *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
  1119. *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
  1120. *ecc_code++ = ((bch_val1 & 0xF) << 4);
  1121. /*
  1122. * Setting 8th byte to zero to handle
  1123. * erased page
  1124. */
  1125. *ecc_code++ = 0x0;
  1126. }
  1127. }
  1128. return 0;
  1129. }
  1130. /**
  1131. * erased_sector_bitflips - count bit flips
  1132. * @data: data sector buffer
  1133. * @oob: oob buffer
  1134. * @info: omap_nand_info
  1135. *
  1136. * Check the bit flips in erased page falls below correctable level.
  1137. * If falls below, report the page as erased with correctable bit
  1138. * flip, else report as uncorrectable page.
  1139. */
  1140. static int erased_sector_bitflips(u_char *data, u_char *oob,
  1141. struct omap_nand_info *info)
  1142. {
  1143. int flip_bits = 0, i;
  1144. for (i = 0; i < info->nand.ecc.size; i++) {
  1145. flip_bits += hweight8(~data[i]);
  1146. if (flip_bits > info->nand.ecc.strength)
  1147. return 0;
  1148. }
  1149. for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
  1150. flip_bits += hweight8(~oob[i]);
  1151. if (flip_bits > info->nand.ecc.strength)
  1152. return 0;
  1153. }
  1154. /*
  1155. * Bit flips falls in correctable level.
  1156. * Fill data area with 0xFF
  1157. */
  1158. if (flip_bits) {
  1159. memset(data, 0xFF, info->nand.ecc.size);
  1160. memset(oob, 0xFF, info->nand.ecc.bytes);
  1161. }
  1162. return flip_bits;
  1163. }
  1164. /**
  1165. * omap_elm_correct_data - corrects page data area in case error reported
  1166. * @mtd: MTD device structure
  1167. * @data: page data
  1168. * @read_ecc: ecc read from nand flash
  1169. * @calc_ecc: ecc read from HW ECC registers
  1170. *
  1171. * Calculated ecc vector reported as zero in case of non-error pages.
  1172. * In case of error/erased pages non-zero error vector is reported.
  1173. * In case of non-zero ecc vector, check read_ecc at fixed offset
  1174. * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
  1175. * To handle bit flips in this data, count the number of 0's in
  1176. * read_ecc[x] and check if it greater than 4. If it is less, it is
  1177. * programmed page, else erased page.
  1178. *
  1179. * 1. If page is erased, check with standard ecc vector (ecc vector
  1180. * for erased page to find any bit flip). If check fails, bit flip
  1181. * is present in erased page. Count the bit flips in erased page and
  1182. * if it falls under correctable level, report page with 0xFF and
  1183. * update the correctable bit information.
  1184. * 2. If error is reported on programmed page, update elm error
  1185. * vector and correct the page with ELM error correction routine.
  1186. *
  1187. */
  1188. static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
  1189. u_char *read_ecc, u_char *calc_ecc)
  1190. {
  1191. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1192. mtd);
  1193. int eccsteps = info->nand.ecc.steps;
  1194. int i , j, stat = 0;
  1195. int eccsize, eccflag, ecc_vector_size;
  1196. struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
  1197. u_char *ecc_vec = calc_ecc;
  1198. u_char *spare_ecc = read_ecc;
  1199. u_char *erased_ecc_vec;
  1200. enum bch_ecc type;
  1201. bool is_error_reported = false;
  1202. /* Initialize elm error vector to zero */
  1203. memset(err_vec, 0, sizeof(err_vec));
  1204. if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
  1205. type = BCH8_ECC;
  1206. erased_ecc_vec = bch8_vector;
  1207. } else {
  1208. type = BCH4_ECC;
  1209. erased_ecc_vec = bch4_vector;
  1210. }
  1211. ecc_vector_size = info->nand.ecc.bytes;
  1212. /*
  1213. * Remove extra byte padding for BCH8 RBL
  1214. * compatibility and erased page handling
  1215. */
  1216. eccsize = ecc_vector_size - 1;
  1217. for (i = 0; i < eccsteps ; i++) {
  1218. eccflag = 0; /* initialize eccflag */
  1219. /*
  1220. * Check any error reported,
  1221. * In case of error, non zero ecc reported.
  1222. */
  1223. for (j = 0; (j < eccsize); j++) {
  1224. if (calc_ecc[j] != 0) {
  1225. eccflag = 1; /* non zero ecc, error present */
  1226. break;
  1227. }
  1228. }
  1229. if (eccflag == 1) {
  1230. /*
  1231. * Set threshold to minimum of 4, half of ecc.strength/2
  1232. * to allow max bit flip in byte to 4
  1233. */
  1234. unsigned int threshold = min_t(unsigned int, 4,
  1235. info->nand.ecc.strength / 2);
  1236. /*
  1237. * Check data area is programmed by counting
  1238. * number of 0's at fixed offset in spare area.
  1239. * Checking count of 0's against threshold.
  1240. * In case programmed page expects at least threshold
  1241. * zeros in byte.
  1242. * If zeros are less than threshold for programmed page/
  1243. * zeros are more than threshold erased page, either
  1244. * case page reported as uncorrectable.
  1245. */
  1246. if (hweight8(~read_ecc[eccsize]) >= threshold) {
  1247. /*
  1248. * Update elm error vector as
  1249. * data area is programmed
  1250. */
  1251. err_vec[i].error_reported = true;
  1252. is_error_reported = true;
  1253. } else {
  1254. /* Error reported in erased page */
  1255. int bitflip_count;
  1256. u_char *buf = &data[info->nand.ecc.size * i];
  1257. if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
  1258. bitflip_count = erased_sector_bitflips(
  1259. buf, read_ecc, info);
  1260. if (bitflip_count)
  1261. stat += bitflip_count;
  1262. else
  1263. return -EINVAL;
  1264. }
  1265. }
  1266. }
  1267. /* Update the ecc vector */
  1268. calc_ecc += ecc_vector_size;
  1269. read_ecc += ecc_vector_size;
  1270. }
  1271. /* Check if any error reported */
  1272. if (!is_error_reported)
  1273. return 0;
  1274. /* Decode BCH error using ELM module */
  1275. elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
  1276. for (i = 0; i < eccsteps; i++) {
  1277. if (err_vec[i].error_reported) {
  1278. for (j = 0; j < err_vec[i].error_count; j++) {
  1279. u32 bit_pos, byte_pos, error_max, pos;
  1280. if (type == BCH8_ECC)
  1281. error_max = BCH8_ECC_MAX;
  1282. else
  1283. error_max = BCH4_ECC_MAX;
  1284. if (info->nand.ecc.strength == BCH8_MAX_ERROR)
  1285. pos = err_vec[i].error_loc[j];
  1286. else
  1287. /* Add 4 to take care 4 bit padding */
  1288. pos = err_vec[i].error_loc[j] +
  1289. BCH4_BIT_PAD;
  1290. /* Calculate bit position of error */
  1291. bit_pos = pos % 8;
  1292. /* Calculate byte position of error */
  1293. byte_pos = (error_max - pos - 1) / 8;
  1294. if (pos < error_max) {
  1295. if (byte_pos < 512)
  1296. data[byte_pos] ^= 1 << bit_pos;
  1297. else
  1298. spare_ecc[byte_pos - 512] ^=
  1299. 1 << bit_pos;
  1300. }
  1301. /* else, not interested to correct ecc */
  1302. }
  1303. }
  1304. /* Update number of correctable errors */
  1305. stat += err_vec[i].error_count;
  1306. /* Update page data with sector size */
  1307. data += info->nand.ecc.size;
  1308. spare_ecc += ecc_vector_size;
  1309. }
  1310. for (i = 0; i < eccsteps; i++)
  1311. /* Return error if uncorrectable error present */
  1312. if (err_vec[i].error_uncorrectable)
  1313. return -EINVAL;
  1314. return stat;
  1315. }
  1316. #endif /* CONFIG_MTD_NAND_OMAP_BCH */
  1317. #ifdef CONFIG_MTD_NAND_ECC_BCH
  1318. /**
  1319. * omap3_correct_data_bch - Decode received data and correct errors
  1320. * @mtd: MTD device structure
  1321. * @data: page data
  1322. * @read_ecc: ecc read from nand flash
  1323. * @calc_ecc: ecc read from HW ECC registers
  1324. */
  1325. static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
  1326. u_char *read_ecc, u_char *calc_ecc)
  1327. {
  1328. int i, count;
  1329. /* cannot correct more than 8 errors */
  1330. unsigned int errloc[8];
  1331. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1332. mtd);
  1333. count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
  1334. errloc);
  1335. if (count > 0) {
  1336. /* correct errors */
  1337. for (i = 0; i < count; i++) {
  1338. /* correct data only, not ecc bytes */
  1339. if (errloc[i] < 8*512)
  1340. data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
  1341. pr_debug("corrected bitflip %u\n", errloc[i]);
  1342. }
  1343. } else if (count < 0) {
  1344. pr_err("ecc unrecoverable error\n");
  1345. }
  1346. return count;
  1347. }
  1348. #endif /* CONFIG_MTD_NAND_ECC_BCH */
  1349. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  1350. /**
  1351. * omap_write_page_bch - BCH ecc based write page function for entire page
  1352. * @mtd: mtd info structure
  1353. * @chip: nand chip info structure
  1354. * @buf: data buffer
  1355. * @oob_required: must write chip->oob_poi to OOB
  1356. *
  1357. * Custom write page method evolved to support multi sector writing in one shot
  1358. */
  1359. static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
  1360. const uint8_t *buf, int oob_required)
  1361. {
  1362. int i;
  1363. uint8_t *ecc_calc = chip->buffers->ecccalc;
  1364. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1365. /* Enable GPMC ecc engine */
  1366. chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
  1367. /* Write data */
  1368. chip->write_buf(mtd, buf, mtd->writesize);
  1369. /* Update ecc vector from GPMC result registers */
  1370. chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
  1371. for (i = 0; i < chip->ecc.total; i++)
  1372. chip->oob_poi[eccpos[i]] = ecc_calc[i];
  1373. /* Write ecc vector to OOB area */
  1374. chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
  1375. return 0;
  1376. }
  1377. /**
  1378. * omap_read_page_bch - BCH ecc based page read function for entire page
  1379. * @mtd: mtd info structure
  1380. * @chip: nand chip info structure
  1381. * @buf: buffer to store read data
  1382. * @oob_required: caller requires OOB data read to chip->oob_poi
  1383. * @page: page number to read
  1384. *
  1385. * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
  1386. * used for error correction.
  1387. * Custom method evolved to support ELM error correction & multi sector
  1388. * reading. On reading page data area is read along with OOB data with
  1389. * ecc engine enabled. ecc vector updated after read of OOB data.
  1390. * For non error pages ecc vector reported as zero.
  1391. */
  1392. static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
  1393. uint8_t *buf, int oob_required, int page)
  1394. {
  1395. uint8_t *ecc_calc = chip->buffers->ecccalc;
  1396. uint8_t *ecc_code = chip->buffers->ecccode;
  1397. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1398. uint8_t *oob = &chip->oob_poi[eccpos[0]];
  1399. uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
  1400. int stat;
  1401. unsigned int max_bitflips = 0;
  1402. /* Enable GPMC ecc engine */
  1403. chip->ecc.hwctl(mtd, NAND_ECC_READ);
  1404. /* Read data */
  1405. chip->read_buf(mtd, buf, mtd->writesize);
  1406. /* Read oob bytes */
  1407. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
  1408. chip->read_buf(mtd, oob, chip->ecc.total);
  1409. /* Calculate ecc bytes */
  1410. chip->ecc.calculate(mtd, buf, ecc_calc);
  1411. memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
  1412. stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
  1413. if (stat < 0) {
  1414. mtd->ecc_stats.failed++;
  1415. } else {
  1416. mtd->ecc_stats.corrected += stat;
  1417. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1418. }
  1419. return max_bitflips;
  1420. }
  1421. /**
  1422. * is_elm_present - checks for presence of ELM module by scanning DT nodes
  1423. * @omap_nand_info: NAND device structure containing platform data
  1424. * @bch_type: 0x0=BCH4, 0x1=BCH8, 0x2=BCH16
  1425. */
  1426. static int is_elm_present(struct omap_nand_info *info,
  1427. struct device_node *elm_node, enum bch_ecc bch_type)
  1428. {
  1429. struct platform_device *pdev;
  1430. info->is_elm_used = false;
  1431. /* check whether elm-id is passed via DT */
  1432. if (!elm_node) {
  1433. pr_err("nand: error: ELM DT node not found\n");
  1434. return -ENODEV;
  1435. }
  1436. pdev = of_find_device_by_node(elm_node);
  1437. /* check whether ELM device is registered */
  1438. if (!pdev) {
  1439. pr_err("nand: error: ELM device not found\n");
  1440. return -ENODEV;
  1441. }
  1442. /* ELM module available, now configure it */
  1443. info->elm_dev = &pdev->dev;
  1444. if (elm_config(info->elm_dev, bch_type))
  1445. return -ENODEV;
  1446. info->is_elm_used = true;
  1447. return 0;
  1448. }
  1449. #endif /* CONFIG_MTD_NAND_ECC_BCH */
  1450. #ifdef CONFIG_MTD_NAND_ECC_BCH
  1451. /**
  1452. * omap3_free_bch - Release BCH ecc resources
  1453. * @mtd: MTD device structure
  1454. */
  1455. static void omap3_free_bch(struct mtd_info *mtd)
  1456. {
  1457. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1458. mtd);
  1459. if (info->bch) {
  1460. free_bch(info->bch);
  1461. info->bch = NULL;
  1462. }
  1463. }
  1464. /**
  1465. * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
  1466. * @mtd: MTD device structure
  1467. */
  1468. static int omap3_init_bch_tail(struct mtd_info *mtd)
  1469. {
  1470. int i, steps, offset;
  1471. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1472. mtd);
  1473. struct nand_ecclayout *layout = &info->ecclayout;
  1474. /* build oob layout */
  1475. steps = mtd->writesize/info->nand.ecc.size;
  1476. layout->eccbytes = steps*info->nand.ecc.bytes;
  1477. /* do not bother creating special oob layouts for small page devices */
  1478. if (mtd->oobsize < 64) {
  1479. pr_err("BCH ecc is not supported on small page devices\n");
  1480. goto fail;
  1481. }
  1482. /* reserve 2 bytes for bad block marker */
  1483. if (layout->eccbytes+2 > mtd->oobsize) {
  1484. pr_err("no oob layout available for oobsize %d eccbytes %u\n",
  1485. mtd->oobsize, layout->eccbytes);
  1486. goto fail;
  1487. }
  1488. /* ECC layout compatible with RBL for BCH8 */
  1489. if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
  1490. offset = 2;
  1491. else
  1492. offset = mtd->oobsize - layout->eccbytes;
  1493. /* put ecc bytes at oob tail */
  1494. for (i = 0; i < layout->eccbytes; i++)
  1495. layout->eccpos[i] = offset + i;
  1496. if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
  1497. layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
  1498. else
  1499. layout->oobfree[0].offset = 2;
  1500. layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
  1501. info->nand.ecc.layout = layout;
  1502. if (!(info->nand.options & NAND_BUSWIDTH_16))
  1503. info->nand.badblock_pattern = &bb_descrip_flashbased;
  1504. return 0;
  1505. fail:
  1506. omap3_free_bch(mtd);
  1507. return -1;
  1508. }
  1509. #else
  1510. static int omap3_init_bch_tail(struct mtd_info *mtd)
  1511. {
  1512. return -1;
  1513. }
  1514. static void omap3_free_bch(struct mtd_info *mtd)
  1515. {
  1516. }
  1517. #endif /* CONFIG_MTD_NAND_ECC_BCH */
  1518. static int omap_nand_probe(struct platform_device *pdev)
  1519. {
  1520. struct omap_nand_info *info;
  1521. struct omap_nand_platform_data *pdata;
  1522. struct mtd_info *mtd;
  1523. struct nand_chip *nand_chip;
  1524. int err;
  1525. int i, offset;
  1526. dma_cap_mask_t mask;
  1527. unsigned sig;
  1528. struct resource *res;
  1529. struct mtd_part_parser_data ppdata = {};
  1530. pdata = dev_get_platdata(&pdev->dev);
  1531. if (pdata == NULL) {
  1532. dev_err(&pdev->dev, "platform data missing\n");
  1533. return -ENODEV;
  1534. }
  1535. info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
  1536. if (!info)
  1537. return -ENOMEM;
  1538. platform_set_drvdata(pdev, info);
  1539. spin_lock_init(&info->controller.lock);
  1540. init_waitqueue_head(&info->controller.wq);
  1541. info->pdev = pdev;
  1542. info->gpmc_cs = pdata->cs;
  1543. info->reg = pdata->reg;
  1544. info->bch = NULL;
  1545. info->of_node = pdata->of_node;
  1546. mtd = &info->mtd;
  1547. mtd->priv = &info->nand;
  1548. mtd->name = dev_name(&pdev->dev);
  1549. mtd->owner = THIS_MODULE;
  1550. nand_chip = &info->nand;
  1551. nand_chip->options |= NAND_SKIP_BBTSCAN;
  1552. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1553. if (res == NULL) {
  1554. err = -EINVAL;
  1555. dev_err(&pdev->dev, "error getting memory resource\n");
  1556. goto out_free_info;
  1557. }
  1558. info->phys_base = res->start;
  1559. info->mem_size = resource_size(res);
  1560. if (!request_mem_region(info->phys_base, info->mem_size,
  1561. pdev->dev.driver->name)) {
  1562. err = -EBUSY;
  1563. goto out_free_info;
  1564. }
  1565. nand_chip->IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
  1566. if (!nand_chip->IO_ADDR_R) {
  1567. err = -ENOMEM;
  1568. goto out_release_mem_region;
  1569. }
  1570. nand_chip->controller = &info->controller;
  1571. nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
  1572. nand_chip->cmd_ctrl = omap_hwcontrol;
  1573. /*
  1574. * If RDY/BSY line is connected to OMAP then use the omap ready
  1575. * function and the generic nand_wait function which reads the status
  1576. * register after monitoring the RDY/BSY line. Otherwise use a standard
  1577. * chip delay which is slightly more than tR (AC Timing) of the NAND
  1578. * device and read status register until you get a failure or success
  1579. */
  1580. if (pdata->dev_ready) {
  1581. nand_chip->dev_ready = omap_dev_ready;
  1582. nand_chip->chip_delay = 0;
  1583. } else {
  1584. nand_chip->waitfunc = omap_wait;
  1585. nand_chip->chip_delay = 50;
  1586. }
  1587. /* scan NAND device connected to chip controller */
  1588. nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
  1589. if (nand_scan_ident(mtd, 1, NULL)) {
  1590. pr_err("nand device scan failed, may be bus-width mismatch\n");
  1591. err = -ENXIO;
  1592. goto out_release_mem_region;
  1593. }
  1594. /* re-populate low-level callbacks based on xfer modes */
  1595. switch (pdata->xfer_type) {
  1596. case NAND_OMAP_PREFETCH_POLLED:
  1597. nand_chip->read_buf = omap_read_buf_pref;
  1598. nand_chip->write_buf = omap_write_buf_pref;
  1599. break;
  1600. case NAND_OMAP_POLLED:
  1601. if (nand_chip->options & NAND_BUSWIDTH_16) {
  1602. nand_chip->read_buf = omap_read_buf16;
  1603. nand_chip->write_buf = omap_write_buf16;
  1604. } else {
  1605. nand_chip->read_buf = omap_read_buf8;
  1606. nand_chip->write_buf = omap_write_buf8;
  1607. }
  1608. break;
  1609. case NAND_OMAP_PREFETCH_DMA:
  1610. dma_cap_zero(mask);
  1611. dma_cap_set(DMA_SLAVE, mask);
  1612. sig = OMAP24XX_DMA_GPMC;
  1613. info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  1614. if (!info->dma) {
  1615. dev_err(&pdev->dev, "DMA engine request failed\n");
  1616. err = -ENXIO;
  1617. goto out_release_mem_region;
  1618. } else {
  1619. struct dma_slave_config cfg;
  1620. memset(&cfg, 0, sizeof(cfg));
  1621. cfg.src_addr = info->phys_base;
  1622. cfg.dst_addr = info->phys_base;
  1623. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1624. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1625. cfg.src_maxburst = 16;
  1626. cfg.dst_maxburst = 16;
  1627. err = dmaengine_slave_config(info->dma, &cfg);
  1628. if (err) {
  1629. dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
  1630. err);
  1631. goto out_release_mem_region;
  1632. }
  1633. nand_chip->read_buf = omap_read_buf_dma_pref;
  1634. nand_chip->write_buf = omap_write_buf_dma_pref;
  1635. }
  1636. break;
  1637. case NAND_OMAP_PREFETCH_IRQ:
  1638. info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
  1639. if (info->gpmc_irq_fifo <= 0) {
  1640. dev_err(&pdev->dev, "error getting fifo irq\n");
  1641. err = -ENODEV;
  1642. goto out_release_mem_region;
  1643. }
  1644. err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
  1645. IRQF_SHARED, "gpmc-nand-fifo", info);
  1646. if (err) {
  1647. dev_err(&pdev->dev, "requesting irq(%d) error:%d",
  1648. info->gpmc_irq_fifo, err);
  1649. info->gpmc_irq_fifo = 0;
  1650. goto out_release_mem_region;
  1651. }
  1652. info->gpmc_irq_count = platform_get_irq(pdev, 1);
  1653. if (info->gpmc_irq_count <= 0) {
  1654. dev_err(&pdev->dev, "error getting count irq\n");
  1655. err = -ENODEV;
  1656. goto out_release_mem_region;
  1657. }
  1658. err = request_irq(info->gpmc_irq_count, omap_nand_irq,
  1659. IRQF_SHARED, "gpmc-nand-count", info);
  1660. if (err) {
  1661. dev_err(&pdev->dev, "requesting irq(%d) error:%d",
  1662. info->gpmc_irq_count, err);
  1663. info->gpmc_irq_count = 0;
  1664. goto out_release_mem_region;
  1665. }
  1666. nand_chip->read_buf = omap_read_buf_irq_pref;
  1667. nand_chip->write_buf = omap_write_buf_irq_pref;
  1668. break;
  1669. default:
  1670. dev_err(&pdev->dev,
  1671. "xfer_type(%d) not supported!\n", pdata->xfer_type);
  1672. err = -EINVAL;
  1673. goto out_release_mem_region;
  1674. }
  1675. /* populate MTD interface based on ECC scheme */
  1676. switch (pdata->ecc_opt) {
  1677. case OMAP_ECC_HAM1_CODE_HW:
  1678. pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
  1679. nand_chip->ecc.mode = NAND_ECC_HW;
  1680. nand_chip->ecc.bytes = 3;
  1681. nand_chip->ecc.size = 512;
  1682. nand_chip->ecc.strength = 1;
  1683. nand_chip->ecc.calculate = omap_calculate_ecc;
  1684. nand_chip->ecc.hwctl = omap_enable_hwecc;
  1685. nand_chip->ecc.correct = omap_correct_data;
  1686. break;
  1687. case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
  1688. #ifdef CONFIG_MTD_NAND_ECC_BCH
  1689. pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
  1690. nand_chip->ecc.mode = NAND_ECC_HW;
  1691. nand_chip->ecc.size = 512;
  1692. nand_chip->ecc.bytes = 7;
  1693. nand_chip->ecc.strength = 4;
  1694. nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
  1695. nand_chip->ecc.correct = omap3_correct_data_bch;
  1696. nand_chip->ecc.calculate = omap3_calculate_ecc_bch4;
  1697. /* software bch library is used for locating errors */
  1698. info->bch = init_bch(nand_chip->ecc.bytes,
  1699. nand_chip->ecc.strength,
  1700. OMAP_ECC_BCH8_POLYNOMIAL);
  1701. if (!info->bch) {
  1702. pr_err("nand: error: unable to use s/w BCH library\n");
  1703. err = -EINVAL;
  1704. }
  1705. break;
  1706. #else
  1707. pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
  1708. err = -EINVAL;
  1709. goto out_release_mem_region;
  1710. #endif
  1711. case OMAP_ECC_BCH4_CODE_HW:
  1712. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  1713. pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
  1714. nand_chip->ecc.mode = NAND_ECC_HW;
  1715. nand_chip->ecc.size = 512;
  1716. /* 14th bit is kept reserved for ROM-code compatibility */
  1717. nand_chip->ecc.bytes = 7 + 1;
  1718. nand_chip->ecc.strength = 4;
  1719. nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
  1720. nand_chip->ecc.correct = omap_elm_correct_data;
  1721. nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
  1722. nand_chip->ecc.read_page = omap_read_page_bch;
  1723. nand_chip->ecc.write_page = omap_write_page_bch;
  1724. /* This ECC scheme requires ELM H/W block */
  1725. if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
  1726. pr_err("nand: error: could not initialize ELM\n");
  1727. err = -ENODEV;
  1728. goto out_release_mem_region;
  1729. }
  1730. break;
  1731. #else
  1732. pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
  1733. err = -EINVAL;
  1734. goto out_release_mem_region;
  1735. #endif
  1736. case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
  1737. #ifdef CONFIG_MTD_NAND_ECC_BCH
  1738. pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
  1739. nand_chip->ecc.mode = NAND_ECC_HW;
  1740. nand_chip->ecc.size = 512;
  1741. nand_chip->ecc.bytes = 13;
  1742. nand_chip->ecc.strength = 8;
  1743. nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
  1744. nand_chip->ecc.correct = omap3_correct_data_bch;
  1745. nand_chip->ecc.calculate = omap3_calculate_ecc_bch8;
  1746. /* software bch library is used for locating errors */
  1747. info->bch = init_bch(nand_chip->ecc.bytes,
  1748. nand_chip->ecc.strength,
  1749. OMAP_ECC_BCH8_POLYNOMIAL);
  1750. if (!info->bch) {
  1751. pr_err("nand: error: unable to use s/w BCH library\n");
  1752. err = -EINVAL;
  1753. goto out_release_mem_region;
  1754. }
  1755. break;
  1756. #else
  1757. pr_err("nand: error: CONFIG_MTD_NAND_ECC_BCH not enabled\n");
  1758. err = -EINVAL;
  1759. goto out_release_mem_region;
  1760. #endif
  1761. case OMAP_ECC_BCH8_CODE_HW:
  1762. #ifdef CONFIG_MTD_NAND_OMAP_BCH
  1763. pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
  1764. nand_chip->ecc.mode = NAND_ECC_HW;
  1765. nand_chip->ecc.size = 512;
  1766. /* 14th bit is kept reserved for ROM-code compatibility */
  1767. nand_chip->ecc.bytes = 13 + 1;
  1768. nand_chip->ecc.strength = 8;
  1769. nand_chip->ecc.hwctl = omap3_enable_hwecc_bch;
  1770. nand_chip->ecc.correct = omap_elm_correct_data;
  1771. nand_chip->ecc.calculate = omap3_calculate_ecc_bch;
  1772. nand_chip->ecc.read_page = omap_read_page_bch;
  1773. nand_chip->ecc.write_page = omap_write_page_bch;
  1774. /* This ECC scheme requires ELM H/W block */
  1775. if (is_elm_present(info, pdata->elm_of_node, BCH8_ECC) < 0) {
  1776. pr_err("nand: error: could not initialize ELM\n");
  1777. goto out_release_mem_region;
  1778. }
  1779. break;
  1780. #else
  1781. pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
  1782. err = -EINVAL;
  1783. goto out_release_mem_region;
  1784. #endif
  1785. default:
  1786. pr_err("nand: error: invalid or unsupported ECC scheme\n");
  1787. err = -EINVAL;
  1788. goto out_release_mem_region;
  1789. }
  1790. /* rom code layout */
  1791. if (pdata->ecc_opt == OMAP_ECC_HAM1_CODE_HW) {
  1792. if (nand_chip->options & NAND_BUSWIDTH_16) {
  1793. offset = 2;
  1794. } else {
  1795. offset = 1;
  1796. nand_chip->badblock_pattern = &bb_descrip_flashbased;
  1797. }
  1798. omap_oobinfo.eccbytes = 3 * (mtd->writesize / 512);
  1799. for (i = 0; i < omap_oobinfo.eccbytes; i++)
  1800. omap_oobinfo.eccpos[i] = i+offset;
  1801. omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
  1802. omap_oobinfo.oobfree->length = mtd->oobsize -
  1803. (offset + omap_oobinfo.eccbytes);
  1804. nand_chip->ecc.layout = &omap_oobinfo;
  1805. } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
  1806. (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) ||
  1807. (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) ||
  1808. (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
  1809. /* build OOB layout for BCH ECC correction */
  1810. err = omap3_init_bch_tail(mtd);
  1811. if (err) {
  1812. err = -EINVAL;
  1813. goto out_release_mem_region;
  1814. }
  1815. }
  1816. /* second phase scan */
  1817. if (nand_scan_tail(mtd)) {
  1818. err = -ENXIO;
  1819. goto out_release_mem_region;
  1820. }
  1821. ppdata.of_node = pdata->of_node;
  1822. mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts,
  1823. pdata->nr_parts);
  1824. platform_set_drvdata(pdev, mtd);
  1825. return 0;
  1826. out_release_mem_region:
  1827. if (info->dma)
  1828. dma_release_channel(info->dma);
  1829. if (info->gpmc_irq_count > 0)
  1830. free_irq(info->gpmc_irq_count, info);
  1831. if (info->gpmc_irq_fifo > 0)
  1832. free_irq(info->gpmc_irq_fifo, info);
  1833. release_mem_region(info->phys_base, info->mem_size);
  1834. out_free_info:
  1835. omap3_free_bch(mtd);
  1836. kfree(info);
  1837. return err;
  1838. }
  1839. static int omap_nand_remove(struct platform_device *pdev)
  1840. {
  1841. struct mtd_info *mtd = platform_get_drvdata(pdev);
  1842. struct nand_chip *nand_chip = mtd->priv;
  1843. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  1844. mtd);
  1845. omap3_free_bch(mtd);
  1846. if (info->dma)
  1847. dma_release_channel(info->dma);
  1848. if (info->gpmc_irq_count > 0)
  1849. free_irq(info->gpmc_irq_count, info);
  1850. if (info->gpmc_irq_fifo > 0)
  1851. free_irq(info->gpmc_irq_fifo, info);
  1852. /* Release NAND device, its internal structures and partitions */
  1853. nand_release(mtd);
  1854. iounmap(nand_chip->IO_ADDR_R);
  1855. release_mem_region(info->phys_base, info->mem_size);
  1856. kfree(info);
  1857. return 0;
  1858. }
  1859. static struct platform_driver omap_nand_driver = {
  1860. .probe = omap_nand_probe,
  1861. .remove = omap_nand_remove,
  1862. .driver = {
  1863. .name = DRIVER_NAME,
  1864. .owner = THIS_MODULE,
  1865. },
  1866. };
  1867. module_platform_driver(omap_nand_driver);
  1868. MODULE_ALIAS("platform:" DRIVER_NAME);
  1869. MODULE_LICENSE("GPL");
  1870. MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");