omap2.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * linux/drivers/mtd/onenand/omap2.c
  3. *
  4. * OneNAND driver for OMAP2 / OMAP3
  5. *
  6. * Copyright © 2005-2006 Nokia Corporation
  7. *
  8. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9. * IRQ and DMA support written by Timo Teras
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published by
  13. * the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; see the file COPYING. If not, write to the Free Software
  22. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. */
  25. #include <linux/device.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/onenand.h>
  30. #include <linux/mtd/partitions.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/delay.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/io.h>
  36. #include <asm/mach/flash.h>
  37. #include <plat/gpmc.h>
  38. #include <plat/onenand.h>
  39. #include <mach/gpio.h>
  40. #include <plat/dma.h>
  41. #include <plat/board.h>
  42. #define DRIVER_NAME "omap2-onenand"
  43. #define ONENAND_IO_SIZE SZ_128K
  44. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  45. struct omap2_onenand {
  46. struct platform_device *pdev;
  47. int gpmc_cs;
  48. unsigned long phys_base;
  49. int gpio_irq;
  50. struct mtd_info mtd;
  51. struct mtd_partition *parts;
  52. struct onenand_chip onenand;
  53. struct completion irq_done;
  54. struct completion dma_done;
  55. int dma_channel;
  56. int freq;
  57. int (*setup)(void __iomem *base, int freq);
  58. };
  59. static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
  60. {
  61. struct omap2_onenand *c = data;
  62. complete(&c->dma_done);
  63. }
  64. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  65. {
  66. struct omap2_onenand *c = dev_id;
  67. complete(&c->irq_done);
  68. return IRQ_HANDLED;
  69. }
  70. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  71. {
  72. return readw(c->onenand.base + reg);
  73. }
  74. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  75. int reg)
  76. {
  77. writew(value, c->onenand.base + reg);
  78. }
  79. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  80. {
  81. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  82. msg, state, ctrl, intr);
  83. }
  84. static void wait_warn(char *msg, int state, unsigned int ctrl,
  85. unsigned int intr)
  86. {
  87. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  88. "intr 0x%04x\n", msg, state, ctrl, intr);
  89. }
  90. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  91. {
  92. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  93. unsigned int intr = 0;
  94. unsigned int ctrl;
  95. unsigned long timeout;
  96. u32 syscfg;
  97. if (state == FL_RESETING) {
  98. int i;
  99. for (i = 0; i < 20; i++) {
  100. udelay(1);
  101. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  102. if (intr & ONENAND_INT_MASTER)
  103. break;
  104. }
  105. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  106. if (ctrl & ONENAND_CTRL_ERROR) {
  107. wait_err("controller error", state, ctrl, intr);
  108. return -EIO;
  109. }
  110. if (!(intr & ONENAND_INT_RESET)) {
  111. wait_err("timeout", state, ctrl, intr);
  112. return -EIO;
  113. }
  114. return 0;
  115. }
  116. if (state != FL_READING) {
  117. int result;
  118. /* Turn interrupts on */
  119. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  120. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  121. syscfg |= ONENAND_SYS_CFG1_IOBE;
  122. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  123. if (cpu_is_omap34xx())
  124. /* Add a delay to let GPIO settle */
  125. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  126. }
  127. INIT_COMPLETION(c->irq_done);
  128. if (c->gpio_irq) {
  129. result = gpio_get_value(c->gpio_irq);
  130. if (result == -1) {
  131. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  132. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  133. wait_err("gpio error", state, ctrl, intr);
  134. return -EIO;
  135. }
  136. } else
  137. result = 0;
  138. if (result == 0) {
  139. int retry_cnt = 0;
  140. retry:
  141. result = wait_for_completion_timeout(&c->irq_done,
  142. msecs_to_jiffies(20));
  143. if (result == 0) {
  144. /* Timeout after 20ms */
  145. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  146. if (ctrl & ONENAND_CTRL_ONGO) {
  147. /*
  148. * The operation seems to be still going
  149. * so give it some more time.
  150. */
  151. retry_cnt += 1;
  152. if (retry_cnt < 3)
  153. goto retry;
  154. intr = read_reg(c,
  155. ONENAND_REG_INTERRUPT);
  156. wait_err("timeout", state, ctrl, intr);
  157. return -EIO;
  158. }
  159. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  160. if ((intr & ONENAND_INT_MASTER) == 0)
  161. wait_warn("timeout", state, ctrl, intr);
  162. }
  163. }
  164. } else {
  165. int retry_cnt = 0;
  166. /* Turn interrupts off */
  167. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  168. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  169. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  170. timeout = jiffies + msecs_to_jiffies(20);
  171. while (1) {
  172. if (time_before(jiffies, timeout)) {
  173. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  174. if (intr & ONENAND_INT_MASTER)
  175. break;
  176. } else {
  177. /* Timeout after 20ms */
  178. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  179. if (ctrl & ONENAND_CTRL_ONGO) {
  180. /*
  181. * The operation seems to be still going
  182. * so give it some more time.
  183. */
  184. retry_cnt += 1;
  185. if (retry_cnt < 3) {
  186. timeout = jiffies +
  187. msecs_to_jiffies(20);
  188. continue;
  189. }
  190. }
  191. break;
  192. }
  193. }
  194. }
  195. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  196. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  197. if (intr & ONENAND_INT_READ) {
  198. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  199. if (ecc) {
  200. unsigned int addr1, addr8;
  201. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  202. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  203. if (ecc & ONENAND_ECC_2BIT_ALL) {
  204. printk(KERN_ERR "onenand_wait: ECC error = "
  205. "0x%04x, addr1 %#x, addr8 %#x\n",
  206. ecc, addr1, addr8);
  207. mtd->ecc_stats.failed++;
  208. return -EBADMSG;
  209. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  210. printk(KERN_NOTICE "onenand_wait: correctable "
  211. "ECC error = 0x%04x, addr1 %#x, "
  212. "addr8 %#x\n", ecc, addr1, addr8);
  213. mtd->ecc_stats.corrected++;
  214. }
  215. }
  216. } else if (state == FL_READING) {
  217. wait_err("timeout", state, ctrl, intr);
  218. return -EIO;
  219. }
  220. if (ctrl & ONENAND_CTRL_ERROR) {
  221. wait_err("controller error", state, ctrl, intr);
  222. if (ctrl & ONENAND_CTRL_LOCK)
  223. printk(KERN_ERR "onenand_wait: "
  224. "Device is write protected!!!\n");
  225. return -EIO;
  226. }
  227. if (ctrl & 0xFE9F)
  228. wait_warn("unexpected controller status", state, ctrl, intr);
  229. return 0;
  230. }
  231. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  232. {
  233. struct onenand_chip *this = mtd->priv;
  234. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  235. if (area == ONENAND_DATARAM)
  236. return this->writesize;
  237. if (area == ONENAND_SPARERAM)
  238. return mtd->oobsize;
  239. }
  240. return 0;
  241. }
  242. #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
  243. static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  244. unsigned char *buffer, int offset,
  245. size_t count)
  246. {
  247. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  248. struct onenand_chip *this = mtd->priv;
  249. dma_addr_t dma_src, dma_dst;
  250. int bram_offset;
  251. unsigned long timeout;
  252. void *buf = (void *)buffer;
  253. size_t xtra;
  254. volatile unsigned *done;
  255. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  256. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  257. goto out_copy;
  258. /* panic_write() may be in an interrupt context */
  259. if (in_interrupt())
  260. goto out_copy;
  261. if (buf >= high_memory) {
  262. struct page *p1;
  263. if (((size_t)buf & PAGE_MASK) !=
  264. ((size_t)(buf + count - 1) & PAGE_MASK))
  265. goto out_copy;
  266. p1 = vmalloc_to_page(buf);
  267. if (!p1)
  268. goto out_copy;
  269. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  270. }
  271. xtra = count & 3;
  272. if (xtra) {
  273. count -= xtra;
  274. memcpy(buf + count, this->base + bram_offset + count, xtra);
  275. }
  276. dma_src = c->phys_base + bram_offset;
  277. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  278. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  279. dev_err(&c->pdev->dev,
  280. "Couldn't DMA map a %d byte buffer\n",
  281. count);
  282. goto out_copy;
  283. }
  284. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  285. count >> 2, 1, 0, 0, 0);
  286. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  287. dma_src, 0, 0);
  288. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  289. dma_dst, 0, 0);
  290. INIT_COMPLETION(c->dma_done);
  291. omap_start_dma(c->dma_channel);
  292. timeout = jiffies + msecs_to_jiffies(20);
  293. done = &c->dma_done.done;
  294. while (time_before(jiffies, timeout))
  295. if (*done)
  296. break;
  297. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  298. if (!*done) {
  299. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  300. goto out_copy;
  301. }
  302. return 0;
  303. out_copy:
  304. memcpy(buf, this->base + bram_offset, count);
  305. return 0;
  306. }
  307. static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  308. const unsigned char *buffer,
  309. int offset, size_t count)
  310. {
  311. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  312. struct onenand_chip *this = mtd->priv;
  313. dma_addr_t dma_src, dma_dst;
  314. int bram_offset;
  315. unsigned long timeout;
  316. void *buf = (void *)buffer;
  317. volatile unsigned *done;
  318. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  319. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  320. goto out_copy;
  321. /* panic_write() may be in an interrupt context */
  322. if (in_interrupt())
  323. goto out_copy;
  324. if (buf >= high_memory) {
  325. struct page *p1;
  326. if (((size_t)buf & PAGE_MASK) !=
  327. ((size_t)(buf + count - 1) & PAGE_MASK))
  328. goto out_copy;
  329. p1 = vmalloc_to_page(buf);
  330. if (!p1)
  331. goto out_copy;
  332. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  333. }
  334. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  335. dma_dst = c->phys_base + bram_offset;
  336. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  337. dev_err(&c->pdev->dev,
  338. "Couldn't DMA map a %d byte buffer\n",
  339. count);
  340. return -1;
  341. }
  342. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  343. count >> 2, 1, 0, 0, 0);
  344. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  345. dma_src, 0, 0);
  346. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  347. dma_dst, 0, 0);
  348. INIT_COMPLETION(c->dma_done);
  349. omap_start_dma(c->dma_channel);
  350. timeout = jiffies + msecs_to_jiffies(20);
  351. done = &c->dma_done.done;
  352. while (time_before(jiffies, timeout))
  353. if (*done)
  354. break;
  355. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  356. if (!*done) {
  357. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  358. goto out_copy;
  359. }
  360. return 0;
  361. out_copy:
  362. memcpy(this->base + bram_offset, buf, count);
  363. return 0;
  364. }
  365. #else
  366. int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  367. unsigned char *buffer, int offset,
  368. size_t count);
  369. int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  370. const unsigned char *buffer,
  371. int offset, size_t count);
  372. #endif
  373. #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
  374. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  375. unsigned char *buffer, int offset,
  376. size_t count)
  377. {
  378. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  379. struct onenand_chip *this = mtd->priv;
  380. dma_addr_t dma_src, dma_dst;
  381. int bram_offset;
  382. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  383. /* DMA is not used. Revisit PM requirements before enabling it. */
  384. if (1 || (c->dma_channel < 0) ||
  385. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  386. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  387. memcpy(buffer, (__force void *)(this->base + bram_offset),
  388. count);
  389. return 0;
  390. }
  391. dma_src = c->phys_base + bram_offset;
  392. dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
  393. DMA_FROM_DEVICE);
  394. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  395. dev_err(&c->pdev->dev,
  396. "Couldn't DMA map a %d byte buffer\n",
  397. count);
  398. return -1;
  399. }
  400. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  401. count / 4, 1, 0, 0, 0);
  402. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  403. dma_src, 0, 0);
  404. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  405. dma_dst, 0, 0);
  406. INIT_COMPLETION(c->dma_done);
  407. omap_start_dma(c->dma_channel);
  408. wait_for_completion(&c->dma_done);
  409. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  410. return 0;
  411. }
  412. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  413. const unsigned char *buffer,
  414. int offset, size_t count)
  415. {
  416. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  417. struct onenand_chip *this = mtd->priv;
  418. dma_addr_t dma_src, dma_dst;
  419. int bram_offset;
  420. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  421. /* DMA is not used. Revisit PM requirements before enabling it. */
  422. if (1 || (c->dma_channel < 0) ||
  423. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  424. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  425. memcpy((__force void *)(this->base + bram_offset), buffer,
  426. count);
  427. return 0;
  428. }
  429. dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
  430. DMA_TO_DEVICE);
  431. dma_dst = c->phys_base + bram_offset;
  432. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  433. dev_err(&c->pdev->dev,
  434. "Couldn't DMA map a %d byte buffer\n",
  435. count);
  436. return -1;
  437. }
  438. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
  439. count / 2, 1, 0, 0, 0);
  440. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  441. dma_src, 0, 0);
  442. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  443. dma_dst, 0, 0);
  444. INIT_COMPLETION(c->dma_done);
  445. omap_start_dma(c->dma_channel);
  446. wait_for_completion(&c->dma_done);
  447. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  448. return 0;
  449. }
  450. #else
  451. int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  452. unsigned char *buffer, int offset,
  453. size_t count);
  454. int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  455. const unsigned char *buffer,
  456. int offset, size_t count);
  457. #endif
  458. static struct platform_driver omap2_onenand_driver;
  459. static int __adjust_timing(struct device *dev, void *data)
  460. {
  461. int ret = 0;
  462. struct omap2_onenand *c;
  463. c = dev_get_drvdata(dev);
  464. BUG_ON(c->setup == NULL);
  465. /* DMA is not in use so this is all that is needed */
  466. /* Revisit for OMAP3! */
  467. ret = c->setup(c->onenand.base, c->freq);
  468. return ret;
  469. }
  470. int omap2_onenand_rephase(void)
  471. {
  472. return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
  473. NULL, __adjust_timing);
  474. }
  475. static void omap2_onenand_shutdown(struct platform_device *pdev)
  476. {
  477. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  478. /* With certain content in the buffer RAM, the OMAP boot ROM code
  479. * can recognize the flash chip incorrectly. Zero it out before
  480. * soft reset.
  481. */
  482. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  483. }
  484. static int __devinit omap2_onenand_probe(struct platform_device *pdev)
  485. {
  486. struct omap_onenand_platform_data *pdata;
  487. struct omap2_onenand *c;
  488. int r;
  489. pdata = pdev->dev.platform_data;
  490. if (pdata == NULL) {
  491. dev_err(&pdev->dev, "platform data missing\n");
  492. return -ENODEV;
  493. }
  494. c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
  495. if (!c)
  496. return -ENOMEM;
  497. init_completion(&c->irq_done);
  498. init_completion(&c->dma_done);
  499. c->gpmc_cs = pdata->cs;
  500. c->gpio_irq = pdata->gpio_irq;
  501. c->dma_channel = pdata->dma_channel;
  502. if (c->dma_channel < 0) {
  503. /* if -1, don't use DMA */
  504. c->gpio_irq = 0;
  505. }
  506. r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
  507. if (r < 0) {
  508. dev_err(&pdev->dev, "Cannot request GPMC CS\n");
  509. goto err_kfree;
  510. }
  511. if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
  512. pdev->dev.driver->name) == NULL) {
  513. dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
  514. "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
  515. r = -EBUSY;
  516. goto err_free_cs;
  517. }
  518. c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
  519. if (c->onenand.base == NULL) {
  520. r = -ENOMEM;
  521. goto err_release_mem_region;
  522. }
  523. if (pdata->onenand_setup != NULL) {
  524. r = pdata->onenand_setup(c->onenand.base, c->freq);
  525. if (r < 0) {
  526. dev_err(&pdev->dev, "Onenand platform setup failed: "
  527. "%d\n", r);
  528. goto err_iounmap;
  529. }
  530. c->setup = pdata->onenand_setup;
  531. }
  532. if (c->gpio_irq) {
  533. if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
  534. dev_err(&pdev->dev, "Failed to request GPIO%d for "
  535. "OneNAND\n", c->gpio_irq);
  536. goto err_iounmap;
  537. }
  538. gpio_direction_input(c->gpio_irq);
  539. if ((r = request_irq(gpio_to_irq(c->gpio_irq),
  540. omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
  541. pdev->dev.driver->name, c)) < 0)
  542. goto err_release_gpio;
  543. }
  544. if (c->dma_channel >= 0) {
  545. r = omap_request_dma(0, pdev->dev.driver->name,
  546. omap2_onenand_dma_cb, (void *) c,
  547. &c->dma_channel);
  548. if (r == 0) {
  549. omap_set_dma_write_mode(c->dma_channel,
  550. OMAP_DMA_WRITE_NON_POSTED);
  551. omap_set_dma_src_data_pack(c->dma_channel, 1);
  552. omap_set_dma_src_burst_mode(c->dma_channel,
  553. OMAP_DMA_DATA_BURST_8);
  554. omap_set_dma_dest_data_pack(c->dma_channel, 1);
  555. omap_set_dma_dest_burst_mode(c->dma_channel,
  556. OMAP_DMA_DATA_BURST_8);
  557. } else {
  558. dev_info(&pdev->dev,
  559. "failed to allocate DMA for OneNAND, "
  560. "using PIO instead\n");
  561. c->dma_channel = -1;
  562. }
  563. }
  564. dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
  565. "base %p\n", c->gpmc_cs, c->phys_base,
  566. c->onenand.base);
  567. c->pdev = pdev;
  568. c->mtd.name = dev_name(&pdev->dev);
  569. c->mtd.priv = &c->onenand;
  570. c->mtd.owner = THIS_MODULE;
  571. c->mtd.dev.parent = &pdev->dev;
  572. if (c->dma_channel >= 0) {
  573. struct onenand_chip *this = &c->onenand;
  574. this->wait = omap2_onenand_wait;
  575. if (cpu_is_omap34xx()) {
  576. this->read_bufferram = omap3_onenand_read_bufferram;
  577. this->write_bufferram = omap3_onenand_write_bufferram;
  578. } else {
  579. this->read_bufferram = omap2_onenand_read_bufferram;
  580. this->write_bufferram = omap2_onenand_write_bufferram;
  581. }
  582. }
  583. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  584. goto err_release_dma;
  585. switch ((c->onenand.version_id >> 4) & 0xf) {
  586. case 0:
  587. c->freq = 40;
  588. break;
  589. case 1:
  590. c->freq = 54;
  591. break;
  592. case 2:
  593. c->freq = 66;
  594. break;
  595. case 3:
  596. c->freq = 83;
  597. break;
  598. }
  599. #ifdef CONFIG_MTD_PARTITIONS
  600. if (pdata->parts != NULL)
  601. r = add_mtd_partitions(&c->mtd, pdata->parts,
  602. pdata->nr_parts);
  603. else
  604. #endif
  605. r = add_mtd_device(&c->mtd);
  606. if (r < 0)
  607. goto err_release_onenand;
  608. platform_set_drvdata(pdev, c);
  609. return 0;
  610. err_release_onenand:
  611. onenand_release(&c->mtd);
  612. err_release_dma:
  613. if (c->dma_channel != -1)
  614. omap_free_dma(c->dma_channel);
  615. if (c->gpio_irq)
  616. free_irq(gpio_to_irq(c->gpio_irq), c);
  617. err_release_gpio:
  618. if (c->gpio_irq)
  619. gpio_free(c->gpio_irq);
  620. err_iounmap:
  621. iounmap(c->onenand.base);
  622. err_release_mem_region:
  623. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  624. err_free_cs:
  625. gpmc_cs_free(c->gpmc_cs);
  626. err_kfree:
  627. kfree(c);
  628. return r;
  629. }
  630. static int __devexit omap2_onenand_remove(struct platform_device *pdev)
  631. {
  632. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  633. BUG_ON(c == NULL);
  634. #ifdef CONFIG_MTD_PARTITIONS
  635. if (c->parts)
  636. del_mtd_partitions(&c->mtd);
  637. else
  638. del_mtd_device(&c->mtd);
  639. #else
  640. del_mtd_device(&c->mtd);
  641. #endif
  642. onenand_release(&c->mtd);
  643. if (c->dma_channel != -1)
  644. omap_free_dma(c->dma_channel);
  645. omap2_onenand_shutdown(pdev);
  646. platform_set_drvdata(pdev, NULL);
  647. if (c->gpio_irq) {
  648. free_irq(gpio_to_irq(c->gpio_irq), c);
  649. gpio_free(c->gpio_irq);
  650. }
  651. iounmap(c->onenand.base);
  652. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  653. gpmc_cs_free(c->gpmc_cs);
  654. kfree(c);
  655. return 0;
  656. }
  657. static struct platform_driver omap2_onenand_driver = {
  658. .probe = omap2_onenand_probe,
  659. .remove = __devexit_p(omap2_onenand_remove),
  660. .shutdown = omap2_onenand_shutdown,
  661. .driver = {
  662. .name = DRIVER_NAME,
  663. .owner = THIS_MODULE,
  664. },
  665. };
  666. static int __init omap2_onenand_init(void)
  667. {
  668. printk(KERN_INFO "OneNAND driver initializing\n");
  669. return platform_driver_register(&omap2_onenand_driver);
  670. }
  671. static void __exit omap2_onenand_exit(void)
  672. {
  673. platform_driver_unregister(&omap2_onenand_driver);
  674. }
  675. module_init(omap2_onenand_init);
  676. module_exit(omap2_onenand_exit);
  677. MODULE_ALIAS(DRIVER_NAME);
  678. MODULE_LICENSE("GPL");
  679. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  680. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");