omap2.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * linux/drivers/mtd/onenand/omap2.c
  3. *
  4. * OneNAND driver for OMAP2 / OMAP3
  5. *
  6. * Copyright © 2005-2006 Nokia Corporation
  7. *
  8. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9. * IRQ and DMA support written by Timo Teras
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published by
  13. * the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; see the file COPYING. If not, write to the Free Software
  22. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. */
  25. #include <linux/device.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/onenand.h>
  30. #include <linux/mtd/partitions.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/delay.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/io.h>
  36. #include <asm/mach/flash.h>
  37. #include <mach/gpmc.h>
  38. #include <mach/onenand.h>
  39. #include <mach/gpio.h>
  40. #include <mach/pm.h>
  41. #include <mach/dma.h>
  42. #include <mach/board.h>
  43. #define DRIVER_NAME "omap2-onenand"
  44. #define ONENAND_IO_SIZE SZ_128K
  45. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  46. struct omap2_onenand {
  47. struct platform_device *pdev;
  48. int gpmc_cs;
  49. unsigned long phys_base;
  50. int gpio_irq;
  51. struct mtd_info mtd;
  52. struct mtd_partition *parts;
  53. struct onenand_chip onenand;
  54. struct completion irq_done;
  55. struct completion dma_done;
  56. int dma_channel;
  57. int freq;
  58. int (*setup)(void __iomem *base, int freq);
  59. };
  60. static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
  61. {
  62. struct omap2_onenand *c = data;
  63. complete(&c->dma_done);
  64. }
  65. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  66. {
  67. struct omap2_onenand *c = dev_id;
  68. complete(&c->irq_done);
  69. return IRQ_HANDLED;
  70. }
  71. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  72. {
  73. return readw(c->onenand.base + reg);
  74. }
  75. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  76. int reg)
  77. {
  78. writew(value, c->onenand.base + reg);
  79. }
  80. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  81. {
  82. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  83. msg, state, ctrl, intr);
  84. }
  85. static void wait_warn(char *msg, int state, unsigned int ctrl,
  86. unsigned int intr)
  87. {
  88. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  89. "intr 0x%04x\n", msg, state, ctrl, intr);
  90. }
  91. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  92. {
  93. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  94. unsigned int intr = 0;
  95. unsigned int ctrl;
  96. unsigned long timeout;
  97. u32 syscfg;
  98. if (state == FL_RESETING) {
  99. int i;
  100. for (i = 0; i < 20; i++) {
  101. udelay(1);
  102. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  103. if (intr & ONENAND_INT_MASTER)
  104. break;
  105. }
  106. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  107. if (ctrl & ONENAND_CTRL_ERROR) {
  108. wait_err("controller error", state, ctrl, intr);
  109. return -EIO;
  110. }
  111. if (!(intr & ONENAND_INT_RESET)) {
  112. wait_err("timeout", state, ctrl, intr);
  113. return -EIO;
  114. }
  115. return 0;
  116. }
  117. if (state != FL_READING) {
  118. int result;
  119. /* Turn interrupts on */
  120. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  121. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  122. syscfg |= ONENAND_SYS_CFG1_IOBE;
  123. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  124. if (cpu_is_omap34xx())
  125. /* Add a delay to let GPIO settle */
  126. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  127. }
  128. INIT_COMPLETION(c->irq_done);
  129. if (c->gpio_irq) {
  130. result = gpio_get_value(c->gpio_irq);
  131. if (result == -1) {
  132. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  133. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  134. wait_err("gpio error", state, ctrl, intr);
  135. return -EIO;
  136. }
  137. } else
  138. result = 0;
  139. if (result == 0) {
  140. int retry_cnt = 0;
  141. retry:
  142. result = wait_for_completion_timeout(&c->irq_done,
  143. msecs_to_jiffies(20));
  144. if (result == 0) {
  145. /* Timeout after 20ms */
  146. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  147. if (ctrl & ONENAND_CTRL_ONGO) {
  148. /*
  149. * The operation seems to be still going
  150. * so give it some more time.
  151. */
  152. retry_cnt += 1;
  153. if (retry_cnt < 3)
  154. goto retry;
  155. intr = read_reg(c,
  156. ONENAND_REG_INTERRUPT);
  157. wait_err("timeout", state, ctrl, intr);
  158. return -EIO;
  159. }
  160. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  161. if ((intr & ONENAND_INT_MASTER) == 0)
  162. wait_warn("timeout", state, ctrl, intr);
  163. }
  164. }
  165. } else {
  166. int retry_cnt = 0;
  167. /* Turn interrupts off */
  168. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  169. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  170. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  171. timeout = jiffies + msecs_to_jiffies(20);
  172. while (1) {
  173. if (time_before(jiffies, timeout)) {
  174. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  175. if (intr & ONENAND_INT_MASTER)
  176. break;
  177. } else {
  178. /* Timeout after 20ms */
  179. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  180. if (ctrl & ONENAND_CTRL_ONGO) {
  181. /*
  182. * The operation seems to be still going
  183. * so give it some more time.
  184. */
  185. retry_cnt += 1;
  186. if (retry_cnt < 3) {
  187. timeout = jiffies +
  188. msecs_to_jiffies(20);
  189. continue;
  190. }
  191. }
  192. break;
  193. }
  194. }
  195. }
  196. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  197. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  198. if (intr & ONENAND_INT_READ) {
  199. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  200. if (ecc) {
  201. unsigned int addr1, addr8;
  202. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  203. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  204. if (ecc & ONENAND_ECC_2BIT_ALL) {
  205. printk(KERN_ERR "onenand_wait: ECC error = "
  206. "0x%04x, addr1 %#x, addr8 %#x\n",
  207. ecc, addr1, addr8);
  208. mtd->ecc_stats.failed++;
  209. return -EBADMSG;
  210. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  211. printk(KERN_NOTICE "onenand_wait: correctable "
  212. "ECC error = 0x%04x, addr1 %#x, "
  213. "addr8 %#x\n", ecc, addr1, addr8);
  214. mtd->ecc_stats.corrected++;
  215. }
  216. }
  217. } else if (state == FL_READING) {
  218. wait_err("timeout", state, ctrl, intr);
  219. return -EIO;
  220. }
  221. if (ctrl & ONENAND_CTRL_ERROR) {
  222. wait_err("controller error", state, ctrl, intr);
  223. if (ctrl & ONENAND_CTRL_LOCK)
  224. printk(KERN_ERR "onenand_wait: "
  225. "Device is write protected!!!\n");
  226. return -EIO;
  227. }
  228. if (ctrl & 0xFE9F)
  229. wait_warn("unexpected controller status", state, ctrl, intr);
  230. return 0;
  231. }
  232. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  233. {
  234. struct onenand_chip *this = mtd->priv;
  235. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  236. if (area == ONENAND_DATARAM)
  237. return mtd->writesize;
  238. if (area == ONENAND_SPARERAM)
  239. return mtd->oobsize;
  240. }
  241. return 0;
  242. }
  243. #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
  244. static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  245. unsigned char *buffer, int offset,
  246. size_t count)
  247. {
  248. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  249. struct onenand_chip *this = mtd->priv;
  250. dma_addr_t dma_src, dma_dst;
  251. int bram_offset;
  252. unsigned long timeout;
  253. void *buf = (void *)buffer;
  254. size_t xtra;
  255. volatile unsigned *done;
  256. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  257. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  258. goto out_copy;
  259. /* panic_write() may be in an interrupt context */
  260. if (in_interrupt())
  261. goto out_copy;
  262. if (buf >= high_memory) {
  263. struct page *p1;
  264. if (((size_t)buf & PAGE_MASK) !=
  265. ((size_t)(buf + count - 1) & PAGE_MASK))
  266. goto out_copy;
  267. p1 = vmalloc_to_page(buf);
  268. if (!p1)
  269. goto out_copy;
  270. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  271. }
  272. xtra = count & 3;
  273. if (xtra) {
  274. count -= xtra;
  275. memcpy(buf + count, this->base + bram_offset + count, xtra);
  276. }
  277. dma_src = c->phys_base + bram_offset;
  278. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  279. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  280. dev_err(&c->pdev->dev,
  281. "Couldn't DMA map a %d byte buffer\n",
  282. count);
  283. goto out_copy;
  284. }
  285. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  286. count >> 2, 1, 0, 0, 0);
  287. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  288. dma_src, 0, 0);
  289. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  290. dma_dst, 0, 0);
  291. INIT_COMPLETION(c->dma_done);
  292. omap_start_dma(c->dma_channel);
  293. timeout = jiffies + msecs_to_jiffies(20);
  294. done = &c->dma_done.done;
  295. while (time_before(jiffies, timeout))
  296. if (*done)
  297. break;
  298. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  299. if (!*done) {
  300. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  301. goto out_copy;
  302. }
  303. return 0;
  304. out_copy:
  305. memcpy(buf, this->base + bram_offset, count);
  306. return 0;
  307. }
  308. static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  309. const unsigned char *buffer,
  310. int offset, size_t count)
  311. {
  312. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  313. struct onenand_chip *this = mtd->priv;
  314. dma_addr_t dma_src, dma_dst;
  315. int bram_offset;
  316. unsigned long timeout;
  317. void *buf = (void *)buffer;
  318. volatile unsigned *done;
  319. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  320. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  321. goto out_copy;
  322. /* panic_write() may be in an interrupt context */
  323. if (in_interrupt())
  324. goto out_copy;
  325. if (buf >= high_memory) {
  326. struct page *p1;
  327. if (((size_t)buf & PAGE_MASK) !=
  328. ((size_t)(buf + count - 1) & PAGE_MASK))
  329. goto out_copy;
  330. p1 = vmalloc_to_page(buf);
  331. if (!p1)
  332. goto out_copy;
  333. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  334. }
  335. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  336. dma_dst = c->phys_base + bram_offset;
  337. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  338. dev_err(&c->pdev->dev,
  339. "Couldn't DMA map a %d byte buffer\n",
  340. count);
  341. return -1;
  342. }
  343. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  344. count >> 2, 1, 0, 0, 0);
  345. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  346. dma_src, 0, 0);
  347. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  348. dma_dst, 0, 0);
  349. INIT_COMPLETION(c->dma_done);
  350. omap_start_dma(c->dma_channel);
  351. timeout = jiffies + msecs_to_jiffies(20);
  352. done = &c->dma_done.done;
  353. while (time_before(jiffies, timeout))
  354. if (*done)
  355. break;
  356. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  357. if (!*done) {
  358. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  359. goto out_copy;
  360. }
  361. return 0;
  362. out_copy:
  363. memcpy(this->base + bram_offset, buf, count);
  364. return 0;
  365. }
  366. #else
  367. int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  368. unsigned char *buffer, int offset,
  369. size_t count);
  370. int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  371. const unsigned char *buffer,
  372. int offset, size_t count);
  373. #endif
  374. #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
  375. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  376. unsigned char *buffer, int offset,
  377. size_t count)
  378. {
  379. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  380. struct onenand_chip *this = mtd->priv;
  381. dma_addr_t dma_src, dma_dst;
  382. int bram_offset;
  383. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  384. /* DMA is not used. Revisit PM requirements before enabling it. */
  385. if (1 || (c->dma_channel < 0) ||
  386. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  387. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  388. memcpy(buffer, (__force void *)(this->base + bram_offset),
  389. count);
  390. return 0;
  391. }
  392. dma_src = c->phys_base + bram_offset;
  393. dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
  394. DMA_FROM_DEVICE);
  395. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  396. dev_err(&c->pdev->dev,
  397. "Couldn't DMA map a %d byte buffer\n",
  398. count);
  399. return -1;
  400. }
  401. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  402. count / 4, 1, 0, 0, 0);
  403. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  404. dma_src, 0, 0);
  405. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  406. dma_dst, 0, 0);
  407. INIT_COMPLETION(c->dma_done);
  408. omap_start_dma(c->dma_channel);
  409. wait_for_completion(&c->dma_done);
  410. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  411. return 0;
  412. }
  413. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  414. const unsigned char *buffer,
  415. int offset, size_t count)
  416. {
  417. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  418. struct onenand_chip *this = mtd->priv;
  419. dma_addr_t dma_src, dma_dst;
  420. int bram_offset;
  421. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  422. /* DMA is not used. Revisit PM requirements before enabling it. */
  423. if (1 || (c->dma_channel < 0) ||
  424. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  425. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  426. memcpy((__force void *)(this->base + bram_offset), buffer,
  427. count);
  428. return 0;
  429. }
  430. dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
  431. DMA_TO_DEVICE);
  432. dma_dst = c->phys_base + bram_offset;
  433. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  434. dev_err(&c->pdev->dev,
  435. "Couldn't DMA map a %d byte buffer\n",
  436. count);
  437. return -1;
  438. }
  439. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
  440. count / 2, 1, 0, 0, 0);
  441. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  442. dma_src, 0, 0);
  443. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  444. dma_dst, 0, 0);
  445. INIT_COMPLETION(c->dma_done);
  446. omap_start_dma(c->dma_channel);
  447. wait_for_completion(&c->dma_done);
  448. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
  449. return 0;
  450. }
  451. #else
  452. int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  453. unsigned char *buffer, int offset,
  454. size_t count);
  455. int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  456. const unsigned char *buffer,
  457. int offset, size_t count);
  458. #endif
  459. static struct platform_driver omap2_onenand_driver;
  460. static int __adjust_timing(struct device *dev, void *data)
  461. {
  462. int ret = 0;
  463. struct omap2_onenand *c;
  464. c = dev_get_drvdata(dev);
  465. BUG_ON(c->setup == NULL);
  466. /* DMA is not in use so this is all that is needed */
  467. /* Revisit for OMAP3! */
  468. ret = c->setup(c->onenand.base, c->freq);
  469. return ret;
  470. }
  471. int omap2_onenand_rephase(void)
  472. {
  473. return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
  474. NULL, __adjust_timing);
  475. }
  476. static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
  477. {
  478. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  479. /* With certain content in the buffer RAM, the OMAP boot ROM code
  480. * can recognize the flash chip incorrectly. Zero it out before
  481. * soft reset.
  482. */
  483. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  484. }
  485. static int __devinit omap2_onenand_probe(struct platform_device *pdev)
  486. {
  487. struct omap_onenand_platform_data *pdata;
  488. struct omap2_onenand *c;
  489. int r;
  490. pdata = pdev->dev.platform_data;
  491. if (pdata == NULL) {
  492. dev_err(&pdev->dev, "platform data missing\n");
  493. return -ENODEV;
  494. }
  495. c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
  496. if (!c)
  497. return -ENOMEM;
  498. init_completion(&c->irq_done);
  499. init_completion(&c->dma_done);
  500. c->gpmc_cs = pdata->cs;
  501. c->gpio_irq = pdata->gpio_irq;
  502. c->dma_channel = pdata->dma_channel;
  503. if (c->dma_channel < 0) {
  504. /* if -1, don't use DMA */
  505. c->gpio_irq = 0;
  506. }
  507. r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
  508. if (r < 0) {
  509. dev_err(&pdev->dev, "Cannot request GPMC CS\n");
  510. goto err_kfree;
  511. }
  512. if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
  513. pdev->dev.driver->name) == NULL) {
  514. dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
  515. "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
  516. r = -EBUSY;
  517. goto err_free_cs;
  518. }
  519. c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
  520. if (c->onenand.base == NULL) {
  521. r = -ENOMEM;
  522. goto err_release_mem_region;
  523. }
  524. if (pdata->onenand_setup != NULL) {
  525. r = pdata->onenand_setup(c->onenand.base, c->freq);
  526. if (r < 0) {
  527. dev_err(&pdev->dev, "Onenand platform setup failed: "
  528. "%d\n", r);
  529. goto err_iounmap;
  530. }
  531. c->setup = pdata->onenand_setup;
  532. }
  533. if (c->gpio_irq) {
  534. if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
  535. dev_err(&pdev->dev, "Failed to request GPIO%d for "
  536. "OneNAND\n", c->gpio_irq);
  537. goto err_iounmap;
  538. }
  539. gpio_direction_input(c->gpio_irq);
  540. if ((r = request_irq(gpio_to_irq(c->gpio_irq),
  541. omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
  542. pdev->dev.driver->name, c)) < 0)
  543. goto err_release_gpio;
  544. }
  545. if (c->dma_channel >= 0) {
  546. r = omap_request_dma(0, pdev->dev.driver->name,
  547. omap2_onenand_dma_cb, (void *) c,
  548. &c->dma_channel);
  549. if (r == 0) {
  550. omap_set_dma_write_mode(c->dma_channel,
  551. OMAP_DMA_WRITE_NON_POSTED);
  552. omap_set_dma_src_data_pack(c->dma_channel, 1);
  553. omap_set_dma_src_burst_mode(c->dma_channel,
  554. OMAP_DMA_DATA_BURST_8);
  555. omap_set_dma_dest_data_pack(c->dma_channel, 1);
  556. omap_set_dma_dest_burst_mode(c->dma_channel,
  557. OMAP_DMA_DATA_BURST_8);
  558. } else {
  559. dev_info(&pdev->dev,
  560. "failed to allocate DMA for OneNAND, "
  561. "using PIO instead\n");
  562. c->dma_channel = -1;
  563. }
  564. }
  565. dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
  566. "base %p\n", c->gpmc_cs, c->phys_base,
  567. c->onenand.base);
  568. c->pdev = pdev;
  569. c->mtd.name = dev_name(&pdev->dev);
  570. c->mtd.priv = &c->onenand;
  571. c->mtd.owner = THIS_MODULE;
  572. c->mtd.dev.parent = &pdev->dev;
  573. if (c->dma_channel >= 0) {
  574. struct onenand_chip *this = &c->onenand;
  575. this->wait = omap2_onenand_wait;
  576. if (cpu_is_omap34xx()) {
  577. this->read_bufferram = omap3_onenand_read_bufferram;
  578. this->write_bufferram = omap3_onenand_write_bufferram;
  579. } else {
  580. this->read_bufferram = omap2_onenand_read_bufferram;
  581. this->write_bufferram = omap2_onenand_write_bufferram;
  582. }
  583. }
  584. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  585. goto err_release_dma;
  586. switch ((c->onenand.version_id >> 4) & 0xf) {
  587. case 0:
  588. c->freq = 40;
  589. break;
  590. case 1:
  591. c->freq = 54;
  592. break;
  593. case 2:
  594. c->freq = 66;
  595. break;
  596. case 3:
  597. c->freq = 83;
  598. break;
  599. }
  600. #ifdef CONFIG_MTD_PARTITIONS
  601. if (pdata->parts != NULL)
  602. r = add_mtd_partitions(&c->mtd, pdata->parts,
  603. pdata->nr_parts);
  604. else
  605. #endif
  606. r = add_mtd_device(&c->mtd);
  607. if (r < 0)
  608. goto err_release_onenand;
  609. platform_set_drvdata(pdev, c);
  610. return 0;
  611. err_release_onenand:
  612. onenand_release(&c->mtd);
  613. err_release_dma:
  614. if (c->dma_channel != -1)
  615. omap_free_dma(c->dma_channel);
  616. if (c->gpio_irq)
  617. free_irq(gpio_to_irq(c->gpio_irq), c);
  618. err_release_gpio:
  619. if (c->gpio_irq)
  620. gpio_free(c->gpio_irq);
  621. err_iounmap:
  622. iounmap(c->onenand.base);
  623. err_release_mem_region:
  624. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  625. err_free_cs:
  626. gpmc_cs_free(c->gpmc_cs);
  627. err_kfree:
  628. kfree(c);
  629. return r;
  630. }
  631. static int __devexit omap2_onenand_remove(struct platform_device *pdev)
  632. {
  633. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  634. BUG_ON(c == NULL);
  635. #ifdef CONFIG_MTD_PARTITIONS
  636. if (c->parts)
  637. del_mtd_partitions(&c->mtd);
  638. else
  639. del_mtd_device(&c->mtd);
  640. #else
  641. del_mtd_device(&c->mtd);
  642. #endif
  643. onenand_release(&c->mtd);
  644. if (c->dma_channel != -1)
  645. omap_free_dma(c->dma_channel);
  646. omap2_onenand_shutdown(pdev);
  647. platform_set_drvdata(pdev, NULL);
  648. if (c->gpio_irq) {
  649. free_irq(gpio_to_irq(c->gpio_irq), c);
  650. gpio_free(c->gpio_irq);
  651. }
  652. iounmap(c->onenand.base);
  653. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  654. kfree(c);
  655. return 0;
  656. }
  657. static struct platform_driver omap2_onenand_driver = {
  658. .probe = omap2_onenand_probe,
  659. .remove = omap2_onenand_remove,
  660. .shutdown = omap2_onenand_shutdown,
  661. .driver = {
  662. .name = DRIVER_NAME,
  663. .owner = THIS_MODULE,
  664. },
  665. };
  666. static int __init omap2_onenand_init(void)
  667. {
  668. printk(KERN_INFO "OneNAND driver initializing\n");
  669. return platform_driver_register(&omap2_onenand_driver);
  670. }
  671. static void __exit omap2_onenand_exit(void)
  672. {
  673. platform_driver_unregister(&omap2_onenand_driver);
  674. }
  675. module_init(omap2_onenand_init);
  676. module_exit(omap2_onenand_exit);
  677. MODULE_ALIAS(DRIVER_NAME);
  678. MODULE_LICENSE("GPL");
  679. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  680. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");