omap2.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * linux/drivers/mtd/onenand/omap2.c
  3. *
  4. * OneNAND driver for OMAP2 / OMAP3
  5. *
  6. * Copyright © 2005-2006 Nokia Corporation
  7. *
  8. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9. * IRQ and DMA support written by Timo Teras
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published by
  13. * the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; see the file COPYING. If not, write to the Free Software
  22. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. */
  25. #include <linux/device.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/onenand.h>
  30. #include <linux/mtd/partitions.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/delay.h>
  34. #include <asm/io.h>
  35. #include <asm/mach/flash.h>
  36. #include <asm/arch/gpmc.h>
  37. #include <asm/arch/onenand.h>
  38. #include <asm/arch/gpio.h>
  39. #include <asm/arch/gpmc.h>
  40. #include <asm/arch/pm.h>
  41. #include <linux/dma-mapping.h>
  42. #include <asm/dma-mapping.h>
  43. #include <asm/arch/dma.h>
  44. #include <asm/arch/board.h>
  45. #define DRIVER_NAME "omap2-onenand"
  46. #define ONENAND_IO_SIZE SZ_128K
  47. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  48. struct omap2_onenand {
  49. struct platform_device *pdev;
  50. int gpmc_cs;
  51. unsigned long phys_base;
  52. int gpio_irq;
  53. struct mtd_info mtd;
  54. struct mtd_partition *parts;
  55. struct onenand_chip onenand;
  56. struct completion irq_done;
  57. struct completion dma_done;
  58. int dma_channel;
  59. int freq;
  60. int (*setup)(void __iomem *base, int freq);
  61. };
  62. static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
  63. {
  64. struct omap2_onenand *c = data;
  65. complete(&c->dma_done);
  66. }
  67. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  68. {
  69. struct omap2_onenand *c = dev_id;
  70. complete(&c->irq_done);
  71. return IRQ_HANDLED;
  72. }
  73. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  74. {
  75. return readw(c->onenand.base + reg);
  76. }
  77. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  78. int reg)
  79. {
  80. writew(value, c->onenand.base + reg);
  81. }
  82. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  83. {
  84. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  85. msg, state, ctrl, intr);
  86. }
  87. static void wait_warn(char *msg, int state, unsigned int ctrl,
  88. unsigned int intr)
  89. {
  90. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  91. "intr 0x%04x\n", msg, state, ctrl, intr);
  92. }
  93. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  94. {
  95. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  96. unsigned int intr = 0;
  97. unsigned int ctrl;
  98. unsigned long timeout;
  99. u32 syscfg;
  100. if (state == FL_RESETING) {
  101. int i;
  102. for (i = 0; i < 20; i++) {
  103. udelay(1);
  104. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  105. if (intr & ONENAND_INT_MASTER)
  106. break;
  107. }
  108. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  109. if (ctrl & ONENAND_CTRL_ERROR) {
  110. wait_err("controller error", state, ctrl, intr);
  111. return -EIO;
  112. }
  113. if (!(intr & ONENAND_INT_RESET)) {
  114. wait_err("timeout", state, ctrl, intr);
  115. return -EIO;
  116. }
  117. return 0;
  118. }
  119. if (state != FL_READING) {
  120. int result;
  121. /* Turn interrupts on */
  122. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  123. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  124. syscfg |= ONENAND_SYS_CFG1_IOBE;
  125. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  126. if (cpu_is_omap34xx())
  127. /* Add a delay to let GPIO settle */
  128. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  129. }
  130. INIT_COMPLETION(c->irq_done);
  131. if (c->gpio_irq) {
  132. result = omap_get_gpio_datain(c->gpio_irq);
  133. if (result == -1) {
  134. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  135. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  136. wait_err("gpio error", state, ctrl, intr);
  137. return -EIO;
  138. }
  139. } else
  140. result = 0;
  141. if (result == 0) {
  142. int retry_cnt = 0;
  143. retry:
  144. result = wait_for_completion_timeout(&c->irq_done,
  145. msecs_to_jiffies(20));
  146. if (result == 0) {
  147. /* Timeout after 20ms */
  148. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  149. if (ctrl & ONENAND_CTRL_ONGO) {
  150. /*
  151. * The operation seems to be still going
  152. * so give it some more time.
  153. */
  154. retry_cnt += 1;
  155. if (retry_cnt < 3)
  156. goto retry;
  157. intr = read_reg(c,
  158. ONENAND_REG_INTERRUPT);
  159. wait_err("timeout", state, ctrl, intr);
  160. return -EIO;
  161. }
  162. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  163. if ((intr & ONENAND_INT_MASTER) == 0)
  164. wait_warn("timeout", state, ctrl, intr);
  165. }
  166. }
  167. } else {
  168. int retry_cnt = 0;
  169. /* Turn interrupts off */
  170. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  171. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  172. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  173. timeout = jiffies + msecs_to_jiffies(20);
  174. while (1) {
  175. if (time_before(jiffies, timeout)) {
  176. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  177. if (intr & ONENAND_INT_MASTER)
  178. break;
  179. } else {
  180. /* Timeout after 20ms */
  181. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  182. if (ctrl & ONENAND_CTRL_ONGO) {
  183. /*
  184. * The operation seems to be still going
  185. * so give it some more time.
  186. */
  187. retry_cnt += 1;
  188. if (retry_cnt < 3) {
  189. timeout = jiffies +
  190. msecs_to_jiffies(20);
  191. continue;
  192. }
  193. }
  194. break;
  195. }
  196. }
  197. }
  198. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  199. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  200. if (intr & ONENAND_INT_READ) {
  201. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  202. if (ecc) {
  203. unsigned int addr1, addr8;
  204. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  205. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  206. if (ecc & ONENAND_ECC_2BIT_ALL) {
  207. printk(KERN_ERR "onenand_wait: ECC error = "
  208. "0x%04x, addr1 %#x, addr8 %#x\n",
  209. ecc, addr1, addr8);
  210. mtd->ecc_stats.failed++;
  211. return -EBADMSG;
  212. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  213. printk(KERN_NOTICE "onenand_wait: correctable "
  214. "ECC error = 0x%04x, addr1 %#x, "
  215. "addr8 %#x\n", ecc, addr1, addr8);
  216. mtd->ecc_stats.corrected++;
  217. }
  218. }
  219. } else if (state == FL_READING) {
  220. wait_err("timeout", state, ctrl, intr);
  221. return -EIO;
  222. }
  223. if (ctrl & ONENAND_CTRL_ERROR) {
  224. wait_err("controller error", state, ctrl, intr);
  225. if (ctrl & ONENAND_CTRL_LOCK)
  226. printk(KERN_ERR "onenand_wait: "
  227. "Device is write protected!!!\n");
  228. return -EIO;
  229. }
  230. if (ctrl & 0xFE9F)
  231. wait_warn("unexpected controller status", state, ctrl, intr);
  232. return 0;
  233. }
  234. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  235. {
  236. struct onenand_chip *this = mtd->priv;
  237. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  238. if (area == ONENAND_DATARAM)
  239. return mtd->writesize;
  240. if (area == ONENAND_SPARERAM)
  241. return mtd->oobsize;
  242. }
  243. return 0;
  244. }
  245. #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
  246. static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  247. unsigned char *buffer, int offset,
  248. size_t count)
  249. {
  250. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  251. struct onenand_chip *this = mtd->priv;
  252. dma_addr_t dma_src, dma_dst;
  253. int bram_offset;
  254. unsigned long timeout;
  255. void *buf = (void *)buffer;
  256. size_t xtra;
  257. volatile unsigned *done;
  258. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  259. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  260. goto out_copy;
  261. if (buf >= high_memory) {
  262. struct page *p1;
  263. if (((size_t)buf & PAGE_MASK) !=
  264. ((size_t)(buf + count - 1) & PAGE_MASK))
  265. goto out_copy;
  266. p1 = vmalloc_to_page(buf);
  267. if (!p1)
  268. goto out_copy;
  269. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  270. }
  271. xtra = count & 3;
  272. if (xtra) {
  273. count -= xtra;
  274. memcpy(buf + count, this->base + bram_offset + count, xtra);
  275. }
  276. dma_src = c->phys_base + bram_offset;
  277. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  278. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  279. dev_err(&c->pdev->dev,
  280. "Couldn't DMA map a %d byte buffer\n",
  281. count);
  282. goto out_copy;
  283. }
  284. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  285. count >> 2, 1, 0, 0, 0);
  286. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  287. dma_src, 0, 0);
  288. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  289. dma_dst, 0, 0);
  290. INIT_COMPLETION(c->dma_done);
  291. omap_start_dma(c->dma_channel);
  292. timeout = jiffies + msecs_to_jiffies(20);
  293. done = &c->dma_done.done;
  294. while (time_before(jiffies, timeout))
  295. if (*done)
  296. break;
  297. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  298. if (!*done) {
  299. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  300. goto out_copy;
  301. }
  302. return 0;
  303. out_copy:
  304. memcpy(buf, this->base + bram_offset, count);
  305. return 0;
  306. }
  307. static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  308. const unsigned char *buffer,
  309. int offset, size_t count)
  310. {
  311. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  312. struct onenand_chip *this = mtd->priv;
  313. dma_addr_t dma_src, dma_dst;
  314. int bram_offset;
  315. unsigned long timeout;
  316. void *buf = (void *)buffer;
  317. volatile unsigned *done;
  318. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  319. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  320. goto out_copy;
  321. /* panic_write() may be in an interrupt context */
  322. if (in_interrupt())
  323. goto out_copy;
  324. if (buf >= high_memory) {
  325. struct page *p1;
  326. if (((size_t)buf & PAGE_MASK) !=
  327. ((size_t)(buf + count - 1) & PAGE_MASK))
  328. goto out_copy;
  329. p1 = vmalloc_to_page(buf);
  330. if (!p1)
  331. goto out_copy;
  332. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  333. }
  334. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  335. dma_dst = c->phys_base + bram_offset;
  336. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  337. dev_err(&c->pdev->dev,
  338. "Couldn't DMA map a %d byte buffer\n",
  339. count);
  340. return -1;
  341. }
  342. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  343. count >> 2, 1, 0, 0, 0);
  344. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  345. dma_src, 0, 0);
  346. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  347. dma_dst, 0, 0);
  348. INIT_COMPLETION(c->dma_done);
  349. omap_start_dma(c->dma_channel);
  350. timeout = jiffies + msecs_to_jiffies(20);
  351. done = &c->dma_done.done;
  352. while (time_before(jiffies, timeout))
  353. if (*done)
  354. break;
  355. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  356. if (!*done) {
  357. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  358. goto out_copy;
  359. }
  360. return 0;
  361. out_copy:
  362. memcpy(this->base + bram_offset, buf, count);
  363. return 0;
  364. }
  365. #else
  366. int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  367. unsigned char *buffer, int offset,
  368. size_t count);
  369. int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  370. const unsigned char *buffer,
  371. int offset, size_t count);
  372. #endif
  373. #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
  374. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  375. unsigned char *buffer, int offset,
  376. size_t count)
  377. {
  378. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  379. struct onenand_chip *this = mtd->priv;
  380. dma_addr_t dma_src, dma_dst;
  381. int bram_offset;
  382. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  383. /* DMA is not used. Revisit PM requirements before enabling it. */
  384. if (1 || (c->dma_channel < 0) ||
  385. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  386. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  387. memcpy(buffer, (__force void *)(this->base + bram_offset),
  388. count);
  389. return 0;
  390. }
  391. dma_src = c->phys_base + bram_offset;
  392. dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
  393. DMA_FROM_DEVICE);
  394. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  395. dev_err(&c->pdev->dev,
  396. "Couldn't DMA map a %d byte buffer\n",
  397. count);
  398. return -1;
  399. }
  400. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  401. count / 4, 1, 0, 0, 0);
  402. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  403. dma_src, 0, 0);
  404. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  405. dma_dst, 0, 0);
  406. INIT_COMPLETION(c->dma_done);
  407. omap_start_dma(c->dma_channel);
  408. wait_for_completion(&c->dma_done);
  409. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  410. return 0;
  411. }
  412. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  413. const unsigned char *buffer,
  414. int offset, size_t count)
  415. {
  416. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  417. struct onenand_chip *this = mtd->priv;
  418. dma_addr_t dma_src, dma_dst;
  419. int bram_offset;
  420. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  421. /* DMA is not used. Revisit PM requirements before enabling it. */
  422. if (1 || (c->dma_channel < 0) ||
  423. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  424. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  425. memcpy((__force void *)(this->base + bram_offset), buffer,
  426. count);
  427. return 0;
  428. }
  429. dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
  430. DMA_TO_DEVICE);
  431. dma_dst = c->phys_base + bram_offset;
  432. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  433. dev_err(&c->pdev->dev,
  434. "Couldn't DMA map a %d byte buffer\n",
  435. count);
  436. return -1;
  437. }
  438. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
  439. count / 2, 1, 0, 0, 0);
  440. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  441. dma_src, 0, 0);
  442. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  443. dma_dst, 0, 0);
  444. INIT_COMPLETION(c->dma_done);
  445. omap_start_dma(c->dma_channel);
  446. wait_for_completion(&c->dma_done);
  447. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  448. return 0;
  449. }
  450. #else
  451. int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  452. unsigned char *buffer, int offset,
  453. size_t count);
  454. int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  455. const unsigned char *buffer,
  456. int offset, size_t count);
  457. #endif
  458. static struct platform_driver omap2_onenand_driver;
  459. static int __adjust_timing(struct device *dev, void *data)
  460. {
  461. int ret = 0;
  462. struct omap2_onenand *c;
  463. c = dev_get_drvdata(dev);
  464. BUG_ON(c->setup == NULL);
  465. /* DMA is not in use so this is all that is needed */
  466. /* Revisit for OMAP3! */
  467. ret = c->setup(c->onenand.base, c->freq);
  468. return ret;
  469. }
  470. int omap2_onenand_rephase(void)
  471. {
  472. return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
  473. NULL, __adjust_timing);
  474. }
  475. static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
  476. {
  477. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  478. /* With certain content in the buffer RAM, the OMAP boot ROM code
  479. * can recognize the flash chip incorrectly. Zero it out before
  480. * soft reset.
  481. */
  482. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  483. }
  484. static int __devinit omap2_onenand_probe(struct platform_device *pdev)
  485. {
  486. struct omap_onenand_platform_data *pdata;
  487. struct omap2_onenand *c;
  488. int r;
  489. pdata = pdev->dev.platform_data;
  490. if (pdata == NULL) {
  491. dev_err(&pdev->dev, "platform data missing\n");
  492. return -ENODEV;
  493. }
  494. c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
  495. if (!c)
  496. return -ENOMEM;
  497. init_completion(&c->irq_done);
  498. init_completion(&c->dma_done);
  499. c->gpmc_cs = pdata->cs;
  500. c->gpio_irq = pdata->gpio_irq;
  501. c->dma_channel = pdata->dma_channel;
  502. if (c->dma_channel < 0) {
  503. /* if -1, don't use DMA */
  504. c->gpio_irq = 0;
  505. }
  506. r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
  507. if (r < 0) {
  508. dev_err(&pdev->dev, "Cannot request GPMC CS\n");
  509. goto err_kfree;
  510. }
  511. if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
  512. pdev->dev.driver->name) == NULL) {
  513. dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
  514. "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
  515. r = -EBUSY;
  516. goto err_free_cs;
  517. }
  518. c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
  519. if (c->onenand.base == NULL) {
  520. r = -ENOMEM;
  521. goto err_release_mem_region;
  522. }
  523. if (pdata->onenand_setup != NULL) {
  524. r = pdata->onenand_setup(c->onenand.base, c->freq);
  525. if (r < 0) {
  526. dev_err(&pdev->dev, "Onenand platform setup failed: "
  527. "%d\n", r);
  528. goto err_iounmap;
  529. }
  530. c->setup = pdata->onenand_setup;
  531. }
  532. if (c->gpio_irq) {
  533. if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
  534. dev_err(&pdev->dev, "Failed to request GPIO%d for "
  535. "OneNAND\n", c->gpio_irq);
  536. goto err_iounmap;
  537. }
  538. omap_set_gpio_direction(c->gpio_irq, 1);
  539. if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
  540. omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
  541. pdev->dev.driver->name, c)) < 0)
  542. goto err_release_gpio;
  543. }
  544. if (c->dma_channel >= 0) {
  545. r = omap_request_dma(0, pdev->dev.driver->name,
  546. omap2_onenand_dma_cb, (void *) c,
  547. &c->dma_channel);
  548. if (r == 0) {
  549. omap_set_dma_write_mode(c->dma_channel,
  550. OMAP_DMA_WRITE_NON_POSTED);
  551. omap_set_dma_src_data_pack(c->dma_channel, 1);
  552. omap_set_dma_src_burst_mode(c->dma_channel,
  553. OMAP_DMA_DATA_BURST_8);
  554. omap_set_dma_dest_data_pack(c->dma_channel, 1);
  555. omap_set_dma_dest_burst_mode(c->dma_channel,
  556. OMAP_DMA_DATA_BURST_8);
  557. } else {
  558. dev_info(&pdev->dev,
  559. "failed to allocate DMA for OneNAND, "
  560. "using PIO instead\n");
  561. c->dma_channel = -1;
  562. }
  563. }
  564. dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
  565. "base %p\n", c->gpmc_cs, c->phys_base,
  566. c->onenand.base);
  567. c->pdev = pdev;
  568. c->mtd.name = pdev->dev.bus_id;
  569. c->mtd.priv = &c->onenand;
  570. c->mtd.owner = THIS_MODULE;
  571. if (c->dma_channel >= 0) {
  572. struct onenand_chip *this = &c->onenand;
  573. this->wait = omap2_onenand_wait;
  574. if (cpu_is_omap34xx()) {
  575. this->read_bufferram = omap3_onenand_read_bufferram;
  576. this->write_bufferram = omap3_onenand_write_bufferram;
  577. } else {
  578. this->read_bufferram = omap2_onenand_read_bufferram;
  579. this->write_bufferram = omap2_onenand_write_bufferram;
  580. }
  581. }
  582. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  583. goto err_release_dma;
  584. switch ((c->onenand.version_id >> 4) & 0xf) {
  585. case 0:
  586. c->freq = 40;
  587. break;
  588. case 1:
  589. c->freq = 54;
  590. break;
  591. case 2:
  592. c->freq = 66;
  593. break;
  594. case 3:
  595. c->freq = 83;
  596. break;
  597. }
  598. #ifdef CONFIG_MTD_PARTITIONS
  599. if (pdata->parts != NULL)
  600. r = add_mtd_partitions(&c->mtd, pdata->parts,
  601. pdata->nr_parts);
  602. else
  603. #endif
  604. r = add_mtd_device(&c->mtd);
  605. if (r < 0)
  606. goto err_release_onenand;
  607. platform_set_drvdata(pdev, c);
  608. return 0;
  609. err_release_onenand:
  610. onenand_release(&c->mtd);
  611. err_release_dma:
  612. if (c->dma_channel != -1)
  613. omap_free_dma(c->dma_channel);
  614. if (c->gpio_irq)
  615. free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
  616. err_release_gpio:
  617. if (c->gpio_irq)
  618. omap_free_gpio(c->gpio_irq);
  619. err_iounmap:
  620. iounmap(c->onenand.base);
  621. err_release_mem_region:
  622. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  623. err_free_cs:
  624. gpmc_cs_free(c->gpmc_cs);
  625. err_kfree:
  626. kfree(c);
  627. return r;
  628. }
  629. static int __devexit omap2_onenand_remove(struct platform_device *pdev)
  630. {
  631. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  632. BUG_ON(c == NULL);
  633. #ifdef CONFIG_MTD_PARTITIONS
  634. if (c->parts)
  635. del_mtd_partitions(&c->mtd);
  636. else
  637. del_mtd_device(&c->mtd);
  638. #else
  639. del_mtd_device(&c->mtd);
  640. #endif
  641. onenand_release(&c->mtd);
  642. if (c->dma_channel != -1)
  643. omap_free_dma(c->dma_channel);
  644. omap2_onenand_shutdown(pdev);
  645. platform_set_drvdata(pdev, NULL);
  646. if (c->gpio_irq) {
  647. free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
  648. omap_free_gpio(c->gpio_irq);
  649. }
  650. iounmap(c->onenand.base);
  651. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  652. kfree(c);
  653. return 0;
  654. }
  655. static struct platform_driver omap2_onenand_driver = {
  656. .probe = omap2_onenand_probe,
  657. .remove = omap2_onenand_remove,
  658. .shutdown = omap2_onenand_shutdown,
  659. .driver = {
  660. .name = DRIVER_NAME,
  661. .owner = THIS_MODULE,
  662. },
  663. };
  664. static int __init omap2_onenand_init(void)
  665. {
  666. printk(KERN_INFO "OneNAND driver initializing\n");
  667. return platform_driver_register(&omap2_onenand_driver);
  668. }
  669. static void __exit omap2_onenand_exit(void)
  670. {
  671. platform_driver_unregister(&omap2_onenand_driver);
  672. }
  673. module_init(omap2_onenand_init);
  674. module_exit(omap2_onenand_exit);
  675. MODULE_ALIAS(DRIVER_NAME);
  676. MODULE_LICENSE("GPL");
  677. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  678. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");