omap2.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. /*
  2. * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
  3. * Copyright © 2004 Micron Technology Inc.
  4. * Copyright © 2004 David Brownell
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/platform_device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/delay.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/sched.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/mtd/nand.h>
  18. #include <linux/mtd/partitions.h>
  19. #include <linux/io.h>
  20. #include <linux/slab.h>
  21. #include <plat/dma.h>
  22. #include <plat/gpmc.h>
  23. #include <plat/nand.h>
  24. #define DRIVER_NAME "omap2-nand"
  25. #define OMAP_NAND_TIMEOUT_MS 5000
  26. #define NAND_Ecc_P1e (1 << 0)
  27. #define NAND_Ecc_P2e (1 << 1)
  28. #define NAND_Ecc_P4e (1 << 2)
  29. #define NAND_Ecc_P8e (1 << 3)
  30. #define NAND_Ecc_P16e (1 << 4)
  31. #define NAND_Ecc_P32e (1 << 5)
  32. #define NAND_Ecc_P64e (1 << 6)
  33. #define NAND_Ecc_P128e (1 << 7)
  34. #define NAND_Ecc_P256e (1 << 8)
  35. #define NAND_Ecc_P512e (1 << 9)
  36. #define NAND_Ecc_P1024e (1 << 10)
  37. #define NAND_Ecc_P2048e (1 << 11)
  38. #define NAND_Ecc_P1o (1 << 16)
  39. #define NAND_Ecc_P2o (1 << 17)
  40. #define NAND_Ecc_P4o (1 << 18)
  41. #define NAND_Ecc_P8o (1 << 19)
  42. #define NAND_Ecc_P16o (1 << 20)
  43. #define NAND_Ecc_P32o (1 << 21)
  44. #define NAND_Ecc_P64o (1 << 22)
  45. #define NAND_Ecc_P128o (1 << 23)
  46. #define NAND_Ecc_P256o (1 << 24)
  47. #define NAND_Ecc_P512o (1 << 25)
  48. #define NAND_Ecc_P1024o (1 << 26)
  49. #define NAND_Ecc_P2048o (1 << 27)
  50. #define TF(value) (value ? 1 : 0)
  51. #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
  52. #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
  53. #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
  54. #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
  55. #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
  56. #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
  57. #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
  58. #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
  59. #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
  60. #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
  61. #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
  62. #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
  63. #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
  64. #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
  65. #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
  66. #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
  67. #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
  68. #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
  69. #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
  70. #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
  71. #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
  72. #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
  73. #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
  74. #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
  75. #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
  76. #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
  77. #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
  78. #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
  79. #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
  80. #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
  81. #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
  82. #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
  83. #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
  84. #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
  85. static const char *part_probes[] = { "cmdlinepart", NULL };
  86. /* oob info generated runtime depending on ecc algorithm and layout selected */
  87. static struct nand_ecclayout omap_oobinfo;
  88. /* Define some generic bad / good block scan pattern which are used
  89. * while scanning a device for factory marked good / bad blocks
  90. */
  91. static uint8_t scan_ff_pattern[] = { 0xff };
  92. static struct nand_bbt_descr bb_descrip_flashbased = {
  93. .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
  94. .offs = 0,
  95. .len = 1,
  96. .pattern = scan_ff_pattern,
  97. };
  98. struct omap_nand_info {
  99. struct nand_hw_control controller;
  100. struct omap_nand_platform_data *pdata;
  101. struct mtd_info mtd;
  102. struct mtd_partition *parts;
  103. struct nand_chip nand;
  104. struct platform_device *pdev;
  105. int gpmc_cs;
  106. unsigned long phys_base;
  107. struct completion comp;
  108. int dma_ch;
  109. int gpmc_irq;
  110. enum {
  111. OMAP_NAND_IO_READ = 0, /* read */
  112. OMAP_NAND_IO_WRITE, /* write */
  113. } iomode;
  114. u_char *buf;
  115. int buf_len;
  116. };
  117. /**
  118. * omap_hwcontrol - hardware specific access to control-lines
  119. * @mtd: MTD device structure
  120. * @cmd: command to device
  121. * @ctrl:
  122. * NAND_NCE: bit 0 -> don't care
  123. * NAND_CLE: bit 1 -> Command Latch
  124. * NAND_ALE: bit 2 -> Address Latch
  125. *
  126. * NOTE: boards may use different bits for these!!
  127. */
  128. static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
  129. {
  130. struct omap_nand_info *info = container_of(mtd,
  131. struct omap_nand_info, mtd);
  132. if (cmd != NAND_CMD_NONE) {
  133. if (ctrl & NAND_CLE)
  134. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
  135. else if (ctrl & NAND_ALE)
  136. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
  137. else /* NAND_NCE */
  138. gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
  139. }
  140. }
  141. /**
  142. * omap_read_buf8 - read data from NAND controller into buffer
  143. * @mtd: MTD device structure
  144. * @buf: buffer to store date
  145. * @len: number of bytes to read
  146. */
  147. static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
  148. {
  149. struct nand_chip *nand = mtd->priv;
  150. ioread8_rep(nand->IO_ADDR_R, buf, len);
  151. }
  152. /**
  153. * omap_write_buf8 - write buffer to NAND controller
  154. * @mtd: MTD device structure
  155. * @buf: data buffer
  156. * @len: number of bytes to write
  157. */
  158. static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
  159. {
  160. struct omap_nand_info *info = container_of(mtd,
  161. struct omap_nand_info, mtd);
  162. u_char *p = (u_char *)buf;
  163. u32 status = 0;
  164. while (len--) {
  165. iowrite8(*p++, info->nand.IO_ADDR_W);
  166. /* wait until buffer is available for write */
  167. do {
  168. status = gpmc_read_status(GPMC_STATUS_BUFFER);
  169. } while (!status);
  170. }
  171. }
  172. /**
  173. * omap_read_buf16 - read data from NAND controller into buffer
  174. * @mtd: MTD device structure
  175. * @buf: buffer to store date
  176. * @len: number of bytes to read
  177. */
  178. static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
  179. {
  180. struct nand_chip *nand = mtd->priv;
  181. ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
  182. }
  183. /**
  184. * omap_write_buf16 - write buffer to NAND controller
  185. * @mtd: MTD device structure
  186. * @buf: data buffer
  187. * @len: number of bytes to write
  188. */
  189. static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
  190. {
  191. struct omap_nand_info *info = container_of(mtd,
  192. struct omap_nand_info, mtd);
  193. u16 *p = (u16 *) buf;
  194. u32 status = 0;
  195. /* FIXME try bursts of writesw() or DMA ... */
  196. len >>= 1;
  197. while (len--) {
  198. iowrite16(*p++, info->nand.IO_ADDR_W);
  199. /* wait until buffer is available for write */
  200. do {
  201. status = gpmc_read_status(GPMC_STATUS_BUFFER);
  202. } while (!status);
  203. }
  204. }
  205. /**
  206. * omap_read_buf_pref - read data from NAND controller into buffer
  207. * @mtd: MTD device structure
  208. * @buf: buffer to store date
  209. * @len: number of bytes to read
  210. */
  211. static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
  212. {
  213. struct omap_nand_info *info = container_of(mtd,
  214. struct omap_nand_info, mtd);
  215. uint32_t r_count = 0;
  216. int ret = 0;
  217. u32 *p = (u32 *)buf;
  218. /* take care of subpage reads */
  219. if (len % 4) {
  220. if (info->nand.options & NAND_BUSWIDTH_16)
  221. omap_read_buf16(mtd, buf, len % 4);
  222. else
  223. omap_read_buf8(mtd, buf, len % 4);
  224. p = (u32 *) (buf + len % 4);
  225. len -= len % 4;
  226. }
  227. /* configure and start prefetch transfer */
  228. ret = gpmc_prefetch_enable(info->gpmc_cs,
  229. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
  230. if (ret) {
  231. /* PFPW engine is busy, use cpu copy method */
  232. if (info->nand.options & NAND_BUSWIDTH_16)
  233. omap_read_buf16(mtd, (u_char *)p, len);
  234. else
  235. omap_read_buf8(mtd, (u_char *)p, len);
  236. } else {
  237. do {
  238. r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
  239. r_count = r_count >> 2;
  240. ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
  241. p += r_count;
  242. len -= r_count << 2;
  243. } while (len);
  244. /* disable and stop the PFPW engine */
  245. gpmc_prefetch_reset(info->gpmc_cs);
  246. }
  247. }
  248. /**
  249. * omap_write_buf_pref - write buffer to NAND controller
  250. * @mtd: MTD device structure
  251. * @buf: data buffer
  252. * @len: number of bytes to write
  253. */
  254. static void omap_write_buf_pref(struct mtd_info *mtd,
  255. const u_char *buf, int len)
  256. {
  257. struct omap_nand_info *info = container_of(mtd,
  258. struct omap_nand_info, mtd);
  259. uint32_t w_count = 0;
  260. int i = 0, ret = 0;
  261. u16 *p = (u16 *)buf;
  262. unsigned long tim, limit;
  263. /* take care of subpage writes */
  264. if (len % 2 != 0) {
  265. writeb(*buf, info->nand.IO_ADDR_W);
  266. p = (u16 *)(buf + 1);
  267. len--;
  268. }
  269. /* configure and start prefetch transfer */
  270. ret = gpmc_prefetch_enable(info->gpmc_cs,
  271. PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
  272. if (ret) {
  273. /* PFPW engine is busy, use cpu copy method */
  274. if (info->nand.options & NAND_BUSWIDTH_16)
  275. omap_write_buf16(mtd, (u_char *)p, len);
  276. else
  277. omap_write_buf8(mtd, (u_char *)p, len);
  278. } else {
  279. while (len) {
  280. w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
  281. w_count = w_count >> 1;
  282. for (i = 0; (i < w_count) && len; i++, len -= 2)
  283. iowrite16(*p++, info->nand.IO_ADDR_W);
  284. }
  285. /* wait for data to flushed-out before reset the prefetch */
  286. tim = 0;
  287. limit = (loops_per_jiffy *
  288. msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  289. while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
  290. cpu_relax();
  291. /* disable and stop the PFPW engine */
  292. gpmc_prefetch_reset(info->gpmc_cs);
  293. }
  294. }
  295. /*
  296. * omap_nand_dma_cb: callback on the completion of dma transfer
  297. * @lch: logical channel
  298. * @ch_satuts: channel status
  299. * @data: pointer to completion data structure
  300. */
  301. static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
  302. {
  303. complete((struct completion *) data);
  304. }
  305. /*
  306. * omap_nand_dma_transfer: configer and start dma transfer
  307. * @mtd: MTD device structure
  308. * @addr: virtual address in RAM of source/destination
  309. * @len: number of data bytes to be transferred
  310. * @is_write: flag for read/write operation
  311. */
  312. static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
  313. unsigned int len, int is_write)
  314. {
  315. struct omap_nand_info *info = container_of(mtd,
  316. struct omap_nand_info, mtd);
  317. enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
  318. DMA_FROM_DEVICE;
  319. dma_addr_t dma_addr;
  320. int ret;
  321. unsigned long tim, limit;
  322. /* The fifo depth is 64 bytes max.
  323. * But configure the FIFO-threahold to 32 to get a sync at each frame
  324. * and frame length is 32 bytes.
  325. */
  326. int buf_len = len >> 6;
  327. if (addr >= high_memory) {
  328. struct page *p1;
  329. if (((size_t)addr & PAGE_MASK) !=
  330. ((size_t)(addr + len - 1) & PAGE_MASK))
  331. goto out_copy;
  332. p1 = vmalloc_to_page(addr);
  333. if (!p1)
  334. goto out_copy;
  335. addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
  336. }
  337. dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
  338. if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
  339. dev_err(&info->pdev->dev,
  340. "Couldn't DMA map a %d byte buffer\n", len);
  341. goto out_copy;
  342. }
  343. if (is_write) {
  344. omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
  345. info->phys_base, 0, 0);
  346. omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
  347. dma_addr, 0, 0);
  348. omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
  349. 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
  350. OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
  351. } else {
  352. omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
  353. info->phys_base, 0, 0);
  354. omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
  355. dma_addr, 0, 0);
  356. omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
  357. 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
  358. OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
  359. }
  360. /* configure and start prefetch transfer */
  361. ret = gpmc_prefetch_enable(info->gpmc_cs,
  362. PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
  363. if (ret)
  364. /* PFPW engine is busy, use cpu copy method */
  365. goto out_copy;
  366. init_completion(&info->comp);
  367. omap_start_dma(info->dma_ch);
  368. /* setup and start DMA using dma_addr */
  369. wait_for_completion(&info->comp);
  370. tim = 0;
  371. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  372. while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
  373. cpu_relax();
  374. /* disable and stop the PFPW engine */
  375. gpmc_prefetch_reset(info->gpmc_cs);
  376. dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
  377. return 0;
  378. out_copy:
  379. if (info->nand.options & NAND_BUSWIDTH_16)
  380. is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
  381. : omap_write_buf16(mtd, (u_char *) addr, len);
  382. else
  383. is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
  384. : omap_write_buf8(mtd, (u_char *) addr, len);
  385. return 0;
  386. }
  387. /**
  388. * omap_read_buf_dma_pref - read data from NAND controller into buffer
  389. * @mtd: MTD device structure
  390. * @buf: buffer to store date
  391. * @len: number of bytes to read
  392. */
  393. static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
  394. {
  395. if (len <= mtd->oobsize)
  396. omap_read_buf_pref(mtd, buf, len);
  397. else
  398. /* start transfer in DMA mode */
  399. omap_nand_dma_transfer(mtd, buf, len, 0x0);
  400. }
  401. /**
  402. * omap_write_buf_dma_pref - write buffer to NAND controller
  403. * @mtd: MTD device structure
  404. * @buf: data buffer
  405. * @len: number of bytes to write
  406. */
  407. static void omap_write_buf_dma_pref(struct mtd_info *mtd,
  408. const u_char *buf, int len)
  409. {
  410. if (len <= mtd->oobsize)
  411. omap_write_buf_pref(mtd, buf, len);
  412. else
  413. /* start transfer in DMA mode */
  414. omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
  415. }
  416. /*
  417. * omap_nand_irq - GMPC irq handler
  418. * @this_irq: gpmc irq number
  419. * @dev: omap_nand_info structure pointer is passed here
  420. */
  421. static irqreturn_t omap_nand_irq(int this_irq, void *dev)
  422. {
  423. struct omap_nand_info *info = (struct omap_nand_info *) dev;
  424. u32 bytes;
  425. u32 irq_stat;
  426. irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
  427. bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
  428. bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
  429. if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
  430. if (irq_stat & 0x2)
  431. goto done;
  432. if (info->buf_len && (info->buf_len < bytes))
  433. bytes = info->buf_len;
  434. else if (!info->buf_len)
  435. bytes = 0;
  436. iowrite32_rep(info->nand.IO_ADDR_W,
  437. (u32 *)info->buf, bytes >> 2);
  438. info->buf = info->buf + bytes;
  439. info->buf_len -= bytes;
  440. } else {
  441. ioread32_rep(info->nand.IO_ADDR_R,
  442. (u32 *)info->buf, bytes >> 2);
  443. info->buf = info->buf + bytes;
  444. if (irq_stat & 0x2)
  445. goto done;
  446. }
  447. gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
  448. return IRQ_HANDLED;
  449. done:
  450. complete(&info->comp);
  451. /* disable irq */
  452. gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
  453. /* clear status */
  454. gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
  455. return IRQ_HANDLED;
  456. }
  457. /*
  458. * omap_read_buf_irq_pref - read data from NAND controller into buffer
  459. * @mtd: MTD device structure
  460. * @buf: buffer to store date
  461. * @len: number of bytes to read
  462. */
  463. static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
  464. {
  465. struct omap_nand_info *info = container_of(mtd,
  466. struct omap_nand_info, mtd);
  467. int ret = 0;
  468. if (len <= mtd->oobsize) {
  469. omap_read_buf_pref(mtd, buf, len);
  470. return;
  471. }
  472. info->iomode = OMAP_NAND_IO_READ;
  473. info->buf = buf;
  474. init_completion(&info->comp);
  475. /* configure and start prefetch transfer */
  476. ret = gpmc_prefetch_enable(info->gpmc_cs,
  477. PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
  478. if (ret)
  479. /* PFPW engine is busy, use cpu copy method */
  480. goto out_copy;
  481. info->buf_len = len;
  482. /* enable irq */
  483. gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
  484. (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
  485. /* waiting for read to complete */
  486. wait_for_completion(&info->comp);
  487. /* disable and stop the PFPW engine */
  488. gpmc_prefetch_reset(info->gpmc_cs);
  489. return;
  490. out_copy:
  491. if (info->nand.options & NAND_BUSWIDTH_16)
  492. omap_read_buf16(mtd, buf, len);
  493. else
  494. omap_read_buf8(mtd, buf, len);
  495. }
  496. /*
  497. * omap_write_buf_irq_pref - write buffer to NAND controller
  498. * @mtd: MTD device structure
  499. * @buf: data buffer
  500. * @len: number of bytes to write
  501. */
  502. static void omap_write_buf_irq_pref(struct mtd_info *mtd,
  503. const u_char *buf, int len)
  504. {
  505. struct omap_nand_info *info = container_of(mtd,
  506. struct omap_nand_info, mtd);
  507. int ret = 0;
  508. unsigned long tim, limit;
  509. if (len <= mtd->oobsize) {
  510. omap_write_buf_pref(mtd, buf, len);
  511. return;
  512. }
  513. info->iomode = OMAP_NAND_IO_WRITE;
  514. info->buf = (u_char *) buf;
  515. init_completion(&info->comp);
  516. /* configure and start prefetch transfer : size=24 */
  517. ret = gpmc_prefetch_enable(info->gpmc_cs,
  518. (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
  519. if (ret)
  520. /* PFPW engine is busy, use cpu copy method */
  521. goto out_copy;
  522. info->buf_len = len;
  523. /* enable irq */
  524. gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
  525. (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
  526. /* waiting for write to complete */
  527. wait_for_completion(&info->comp);
  528. /* wait for data to flushed-out before reset the prefetch */
  529. tim = 0;
  530. limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
  531. while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
  532. cpu_relax();
  533. /* disable and stop the PFPW engine */
  534. gpmc_prefetch_reset(info->gpmc_cs);
  535. return;
  536. out_copy:
  537. if (info->nand.options & NAND_BUSWIDTH_16)
  538. omap_write_buf16(mtd, buf, len);
  539. else
  540. omap_write_buf8(mtd, buf, len);
  541. }
  542. /**
  543. * omap_verify_buf - Verify chip data against buffer
  544. * @mtd: MTD device structure
  545. * @buf: buffer containing the data to compare
  546. * @len: number of bytes to compare
  547. */
  548. static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
  549. {
  550. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  551. mtd);
  552. u16 *p = (u16 *) buf;
  553. len >>= 1;
  554. while (len--) {
  555. if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
  556. return -EFAULT;
  557. }
  558. return 0;
  559. }
  560. /**
  561. * gen_true_ecc - This function will generate true ECC value
  562. * @ecc_buf: buffer to store ecc code
  563. *
  564. * This generated true ECC value can be used when correcting
  565. * data read from NAND flash memory core
  566. */
  567. static void gen_true_ecc(u8 *ecc_buf)
  568. {
  569. u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
  570. ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
  571. ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
  572. P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
  573. ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
  574. P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
  575. ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
  576. P1e(tmp) | P2048o(tmp) | P2048e(tmp));
  577. }
  578. /**
  579. * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
  580. * @ecc_data1: ecc code from nand spare area
  581. * @ecc_data2: ecc code from hardware register obtained from hardware ecc
  582. * @page_data: page data
  583. *
  584. * This function compares two ECC's and indicates if there is an error.
  585. * If the error can be corrected it will be corrected to the buffer.
  586. * If there is no error, %0 is returned. If there is an error but it
  587. * was corrected, %1 is returned. Otherwise, %-1 is returned.
  588. */
  589. static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
  590. u8 *ecc_data2, /* read from register */
  591. u8 *page_data)
  592. {
  593. uint i;
  594. u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
  595. u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
  596. u8 ecc_bit[24];
  597. u8 ecc_sum = 0;
  598. u8 find_bit = 0;
  599. uint find_byte = 0;
  600. int isEccFF;
  601. isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
  602. gen_true_ecc(ecc_data1);
  603. gen_true_ecc(ecc_data2);
  604. for (i = 0; i <= 2; i++) {
  605. *(ecc_data1 + i) = ~(*(ecc_data1 + i));
  606. *(ecc_data2 + i) = ~(*(ecc_data2 + i));
  607. }
  608. for (i = 0; i < 8; i++) {
  609. tmp0_bit[i] = *ecc_data1 % 2;
  610. *ecc_data1 = *ecc_data1 / 2;
  611. }
  612. for (i = 0; i < 8; i++) {
  613. tmp1_bit[i] = *(ecc_data1 + 1) % 2;
  614. *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
  615. }
  616. for (i = 0; i < 8; i++) {
  617. tmp2_bit[i] = *(ecc_data1 + 2) % 2;
  618. *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
  619. }
  620. for (i = 0; i < 8; i++) {
  621. comp0_bit[i] = *ecc_data2 % 2;
  622. *ecc_data2 = *ecc_data2 / 2;
  623. }
  624. for (i = 0; i < 8; i++) {
  625. comp1_bit[i] = *(ecc_data2 + 1) % 2;
  626. *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
  627. }
  628. for (i = 0; i < 8; i++) {
  629. comp2_bit[i] = *(ecc_data2 + 2) % 2;
  630. *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
  631. }
  632. for (i = 0; i < 6; i++)
  633. ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
  634. for (i = 0; i < 8; i++)
  635. ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
  636. for (i = 0; i < 8; i++)
  637. ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
  638. ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
  639. ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
  640. for (i = 0; i < 24; i++)
  641. ecc_sum += ecc_bit[i];
  642. switch (ecc_sum) {
  643. case 0:
  644. /* Not reached because this function is not called if
  645. * ECC values are equal
  646. */
  647. return 0;
  648. case 1:
  649. /* Uncorrectable error */
  650. DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
  651. return -1;
  652. case 11:
  653. /* UN-Correctable error */
  654. DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
  655. return -1;
  656. case 12:
  657. /* Correctable error */
  658. find_byte = (ecc_bit[23] << 8) +
  659. (ecc_bit[21] << 7) +
  660. (ecc_bit[19] << 6) +
  661. (ecc_bit[17] << 5) +
  662. (ecc_bit[15] << 4) +
  663. (ecc_bit[13] << 3) +
  664. (ecc_bit[11] << 2) +
  665. (ecc_bit[9] << 1) +
  666. ecc_bit[7];
  667. find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
  668. DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
  669. "offset: %d, bit: %d\n", find_byte, find_bit);
  670. page_data[find_byte] ^= (1 << find_bit);
  671. return 1;
  672. default:
  673. if (isEccFF) {
  674. if (ecc_data2[0] == 0 &&
  675. ecc_data2[1] == 0 &&
  676. ecc_data2[2] == 0)
  677. return 0;
  678. }
  679. DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
  680. return -1;
  681. }
  682. }
  683. /**
  684. * omap_correct_data - Compares the ECC read with HW generated ECC
  685. * @mtd: MTD device structure
  686. * @dat: page data
  687. * @read_ecc: ecc read from nand flash
  688. * @calc_ecc: ecc read from HW ECC registers
  689. *
  690. * Compares the ecc read from nand spare area with ECC registers values
  691. * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
  692. * detection and correction. If there are no errors, %0 is returned. If
  693. * there were errors and all of the errors were corrected, the number of
  694. * corrected errors is returned. If uncorrectable errors exist, %-1 is
  695. * returned.
  696. */
  697. static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
  698. u_char *read_ecc, u_char *calc_ecc)
  699. {
  700. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  701. mtd);
  702. int blockCnt = 0, i = 0, ret = 0;
  703. int stat = 0;
  704. /* Ex NAND_ECC_HW12_2048 */
  705. if ((info->nand.ecc.mode == NAND_ECC_HW) &&
  706. (info->nand.ecc.size == 2048))
  707. blockCnt = 4;
  708. else
  709. blockCnt = 1;
  710. for (i = 0; i < blockCnt; i++) {
  711. if (memcmp(read_ecc, calc_ecc, 3) != 0) {
  712. ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
  713. if (ret < 0)
  714. return ret;
  715. /* keep track of the number of corrected errors */
  716. stat += ret;
  717. }
  718. read_ecc += 3;
  719. calc_ecc += 3;
  720. dat += 512;
  721. }
  722. return stat;
  723. }
  724. /**
  725. * omap_calcuate_ecc - Generate non-inverted ECC bytes.
  726. * @mtd: MTD device structure
  727. * @dat: The pointer to data on which ecc is computed
  728. * @ecc_code: The ecc_code buffer
  729. *
  730. * Using noninverted ECC can be considered ugly since writing a blank
  731. * page ie. padding will clear the ECC bytes. This is no problem as long
  732. * nobody is trying to write data on the seemingly unused page. Reading
  733. * an erased page will produce an ECC mismatch between generated and read
  734. * ECC bytes that has to be dealt with separately.
  735. */
  736. static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
  737. u_char *ecc_code)
  738. {
  739. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  740. mtd);
  741. return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
  742. }
  743. /**
  744. * omap_enable_hwecc - This function enables the hardware ecc functionality
  745. * @mtd: MTD device structure
  746. * @mode: Read/Write mode
  747. */
  748. static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
  749. {
  750. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  751. mtd);
  752. struct nand_chip *chip = mtd->priv;
  753. unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
  754. gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
  755. }
  756. /**
  757. * omap_wait - wait until the command is done
  758. * @mtd: MTD device structure
  759. * @chip: NAND Chip structure
  760. *
  761. * Wait function is called during Program and erase operations and
  762. * the way it is called from MTD layer, we should wait till the NAND
  763. * chip is ready after the programming/erase operation has completed.
  764. *
  765. * Erase can take up to 400ms and program up to 20ms according to
  766. * general NAND and SmartMedia specs
  767. */
  768. static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
  769. {
  770. struct nand_chip *this = mtd->priv;
  771. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  772. mtd);
  773. unsigned long timeo = jiffies;
  774. int status = NAND_STATUS_FAIL, state = this->state;
  775. if (state == FL_ERASING)
  776. timeo += (HZ * 400) / 1000;
  777. else
  778. timeo += (HZ * 20) / 1000;
  779. gpmc_nand_write(info->gpmc_cs,
  780. GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
  781. while (time_before(jiffies, timeo)) {
  782. status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
  783. if (status & NAND_STATUS_READY)
  784. break;
  785. cond_resched();
  786. }
  787. return status;
  788. }
  789. /**
  790. * omap_dev_ready - calls the platform specific dev_ready function
  791. * @mtd: MTD device structure
  792. */
  793. static int omap_dev_ready(struct mtd_info *mtd)
  794. {
  795. unsigned int val = 0;
  796. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  797. mtd);
  798. val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
  799. if ((val & 0x100) == 0x100) {
  800. /* Clear IRQ Interrupt */
  801. val |= 0x100;
  802. val &= ~(0x0);
  803. gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
  804. } else {
  805. unsigned int cnt = 0;
  806. while (cnt++ < 0x1FF) {
  807. if ((val & 0x100) == 0x100)
  808. return 0;
  809. val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
  810. }
  811. }
  812. return 1;
  813. }
  814. static int __devinit omap_nand_probe(struct platform_device *pdev)
  815. {
  816. struct omap_nand_info *info;
  817. struct omap_nand_platform_data *pdata;
  818. int err;
  819. int i, offset;
  820. pdata = pdev->dev.platform_data;
  821. if (pdata == NULL) {
  822. dev_err(&pdev->dev, "platform data missing\n");
  823. return -ENODEV;
  824. }
  825. info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
  826. if (!info)
  827. return -ENOMEM;
  828. platform_set_drvdata(pdev, info);
  829. spin_lock_init(&info->controller.lock);
  830. init_waitqueue_head(&info->controller.wq);
  831. info->pdev = pdev;
  832. info->gpmc_cs = pdata->cs;
  833. info->phys_base = pdata->phys_base;
  834. info->mtd.priv = &info->nand;
  835. info->mtd.name = dev_name(&pdev->dev);
  836. info->mtd.owner = THIS_MODULE;
  837. info->nand.options = pdata->devsize;
  838. info->nand.options |= NAND_SKIP_BBTSCAN;
  839. /* NAND write protect off */
  840. gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
  841. if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
  842. pdev->dev.driver->name)) {
  843. err = -EBUSY;
  844. goto out_free_info;
  845. }
  846. info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
  847. if (!info->nand.IO_ADDR_R) {
  848. err = -ENOMEM;
  849. goto out_release_mem_region;
  850. }
  851. info->nand.controller = &info->controller;
  852. info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
  853. info->nand.cmd_ctrl = omap_hwcontrol;
  854. /*
  855. * If RDY/BSY line is connected to OMAP then use the omap ready
  856. * funcrtion and the generic nand_wait function which reads the status
  857. * register after monitoring the RDY/BSY line.Otherwise use a standard
  858. * chip delay which is slightly more than tR (AC Timing) of the NAND
  859. * device and read status register until you get a failure or success
  860. */
  861. if (pdata->dev_ready) {
  862. info->nand.dev_ready = omap_dev_ready;
  863. info->nand.chip_delay = 0;
  864. } else {
  865. info->nand.waitfunc = omap_wait;
  866. info->nand.chip_delay = 50;
  867. }
  868. switch (pdata->xfer_type) {
  869. case NAND_OMAP_PREFETCH_POLLED:
  870. info->nand.read_buf = omap_read_buf_pref;
  871. info->nand.write_buf = omap_write_buf_pref;
  872. break;
  873. case NAND_OMAP_POLLED:
  874. if (info->nand.options & NAND_BUSWIDTH_16) {
  875. info->nand.read_buf = omap_read_buf16;
  876. info->nand.write_buf = omap_write_buf16;
  877. } else {
  878. info->nand.read_buf = omap_read_buf8;
  879. info->nand.write_buf = omap_write_buf8;
  880. }
  881. break;
  882. case NAND_OMAP_PREFETCH_DMA:
  883. err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
  884. omap_nand_dma_cb, &info->comp, &info->dma_ch);
  885. if (err < 0) {
  886. info->dma_ch = -1;
  887. dev_err(&pdev->dev, "DMA request failed!\n");
  888. goto out_release_mem_region;
  889. } else {
  890. omap_set_dma_dest_burst_mode(info->dma_ch,
  891. OMAP_DMA_DATA_BURST_16);
  892. omap_set_dma_src_burst_mode(info->dma_ch,
  893. OMAP_DMA_DATA_BURST_16);
  894. info->nand.read_buf = omap_read_buf_dma_pref;
  895. info->nand.write_buf = omap_write_buf_dma_pref;
  896. }
  897. break;
  898. case NAND_OMAP_PREFETCH_IRQ:
  899. err = request_irq(pdata->gpmc_irq,
  900. omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
  901. if (err) {
  902. dev_err(&pdev->dev, "requesting irq(%d) error:%d",
  903. pdata->gpmc_irq, err);
  904. goto out_release_mem_region;
  905. } else {
  906. info->gpmc_irq = pdata->gpmc_irq;
  907. info->nand.read_buf = omap_read_buf_irq_pref;
  908. info->nand.write_buf = omap_write_buf_irq_pref;
  909. }
  910. break;
  911. default:
  912. dev_err(&pdev->dev,
  913. "xfer_type(%d) not supported!\n", pdata->xfer_type);
  914. err = -EINVAL;
  915. goto out_release_mem_region;
  916. }
  917. info->nand.verify_buf = omap_verify_buf;
  918. /* selsect the ecc type */
  919. if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
  920. info->nand.ecc.mode = NAND_ECC_SOFT;
  921. else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
  922. (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
  923. info->nand.ecc.bytes = 3;
  924. info->nand.ecc.size = 512;
  925. info->nand.ecc.calculate = omap_calculate_ecc;
  926. info->nand.ecc.hwctl = omap_enable_hwecc;
  927. info->nand.ecc.correct = omap_correct_data;
  928. info->nand.ecc.mode = NAND_ECC_HW;
  929. }
  930. /* DIP switches on some boards change between 8 and 16 bit
  931. * bus widths for flash. Try the other width if the first try fails.
  932. */
  933. if (nand_scan_ident(&info->mtd, 1, NULL)) {
  934. info->nand.options ^= NAND_BUSWIDTH_16;
  935. if (nand_scan_ident(&info->mtd, 1, NULL)) {
  936. err = -ENXIO;
  937. goto out_release_mem_region;
  938. }
  939. }
  940. /* rom code layout */
  941. if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
  942. if (info->nand.options & NAND_BUSWIDTH_16)
  943. offset = 2;
  944. else {
  945. offset = 1;
  946. info->nand.badblock_pattern = &bb_descrip_flashbased;
  947. }
  948. omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
  949. for (i = 0; i < omap_oobinfo.eccbytes; i++)
  950. omap_oobinfo.eccpos[i] = i+offset;
  951. omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
  952. omap_oobinfo.oobfree->length = info->mtd.oobsize -
  953. (offset + omap_oobinfo.eccbytes);
  954. info->nand.ecc.layout = &omap_oobinfo;
  955. }
  956. /* second phase scan */
  957. if (nand_scan_tail(&info->mtd)) {
  958. err = -ENXIO;
  959. goto out_release_mem_region;
  960. }
  961. err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
  962. if (err > 0)
  963. mtd_device_register(&info->mtd, info->parts, err);
  964. else if (pdata->parts)
  965. mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
  966. else
  967. mtd_device_register(&info->mtd, NULL, 0);
  968. platform_set_drvdata(pdev, &info->mtd);
  969. return 0;
  970. out_release_mem_region:
  971. release_mem_region(info->phys_base, NAND_IO_SIZE);
  972. out_free_info:
  973. kfree(info);
  974. return err;
  975. }
  976. static int omap_nand_remove(struct platform_device *pdev)
  977. {
  978. struct mtd_info *mtd = platform_get_drvdata(pdev);
  979. struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
  980. mtd);
  981. platform_set_drvdata(pdev, NULL);
  982. if (info->dma_ch != -1)
  983. omap_free_dma(info->dma_ch);
  984. if (info->gpmc_irq)
  985. free_irq(info->gpmc_irq, info);
  986. /* Release NAND device, its internal structures and partitions */
  987. nand_release(&info->mtd);
  988. iounmap(info->nand.IO_ADDR_R);
  989. kfree(&info->mtd);
  990. return 0;
  991. }
  992. static struct platform_driver omap_nand_driver = {
  993. .probe = omap_nand_probe,
  994. .remove = omap_nand_remove,
  995. .driver = {
  996. .name = DRIVER_NAME,
  997. .owner = THIS_MODULE,
  998. },
  999. };
  1000. static int __init omap_nand_init(void)
  1001. {
  1002. pr_info("%s driver initializing\n", DRIVER_NAME);
  1003. return platform_driver_register(&omap_nand_driver);
  1004. }
  1005. static void __exit omap_nand_exit(void)
  1006. {
  1007. platform_driver_unregister(&omap_nand_driver);
  1008. }
  1009. module_init(omap_nand_init);
  1010. module_exit(omap_nand_exit);
  1011. MODULE_ALIAS("platform:" DRIVER_NAME);
  1012. MODULE_LICENSE("GPL");
  1013. MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");