omap2.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. /*
  2. * linux/drivers/mtd/onenand/omap2.c
  3. *
  4. * OneNAND driver for OMAP2 / OMAP3
  5. *
  6. * Copyright © 2005-2006 Nokia Corporation
  7. *
  8. * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
  9. * IRQ and DMA support written by Timo Teras
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License version 2 as published by
  13. * the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; see the file COPYING. If not, write to the Free Software
  22. * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. */
  25. #include <linux/device.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/onenand.h>
  30. #include <linux/mtd/partitions.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/delay.h>
  34. #include <linux/dma-mapping.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/regulator/consumer.h>
  38. #include <asm/mach/flash.h>
  39. #include <plat/gpmc.h>
  40. #include <plat/onenand.h>
  41. #include <mach/gpio.h>
  42. #include <plat/dma.h>
  43. #include <plat/board.h>
  44. #define DRIVER_NAME "omap2-onenand"
  45. #define ONENAND_IO_SIZE SZ_128K
  46. #define ONENAND_BUFRAM_SIZE (1024 * 5)
  47. struct omap2_onenand {
  48. struct platform_device *pdev;
  49. int gpmc_cs;
  50. unsigned long phys_base;
  51. int gpio_irq;
  52. struct mtd_info mtd;
  53. struct mtd_partition *parts;
  54. struct onenand_chip onenand;
  55. struct completion irq_done;
  56. struct completion dma_done;
  57. int dma_channel;
  58. int freq;
  59. int (*setup)(void __iomem *base, int *freq_ptr);
  60. struct regulator *regulator;
  61. };
  62. #ifdef CONFIG_MTD_PARTITIONS
  63. static const char *part_probes[] = { "cmdlinepart", NULL, };
  64. #endif
  65. static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
  66. {
  67. struct omap2_onenand *c = data;
  68. complete(&c->dma_done);
  69. }
  70. static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
  71. {
  72. struct omap2_onenand *c = dev_id;
  73. complete(&c->irq_done);
  74. return IRQ_HANDLED;
  75. }
  76. static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
  77. {
  78. return readw(c->onenand.base + reg);
  79. }
  80. static inline void write_reg(struct omap2_onenand *c, unsigned short value,
  81. int reg)
  82. {
  83. writew(value, c->onenand.base + reg);
  84. }
  85. static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
  86. {
  87. printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
  88. msg, state, ctrl, intr);
  89. }
  90. static void wait_warn(char *msg, int state, unsigned int ctrl,
  91. unsigned int intr)
  92. {
  93. printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
  94. "intr 0x%04x\n", msg, state, ctrl, intr);
  95. }
  96. static int omap2_onenand_wait(struct mtd_info *mtd, int state)
  97. {
  98. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  99. struct onenand_chip *this = mtd->priv;
  100. unsigned int intr = 0;
  101. unsigned int ctrl, ctrl_mask;
  102. unsigned long timeout;
  103. u32 syscfg;
  104. if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
  105. state == FL_VERIFYING_ERASE) {
  106. int i = 21;
  107. unsigned int intr_flags = ONENAND_INT_MASTER;
  108. switch (state) {
  109. case FL_RESETING:
  110. intr_flags |= ONENAND_INT_RESET;
  111. break;
  112. case FL_PREPARING_ERASE:
  113. intr_flags |= ONENAND_INT_ERASE;
  114. break;
  115. case FL_VERIFYING_ERASE:
  116. i = 101;
  117. break;
  118. }
  119. while (--i) {
  120. udelay(1);
  121. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  122. if (intr & ONENAND_INT_MASTER)
  123. break;
  124. }
  125. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  126. if (ctrl & ONENAND_CTRL_ERROR) {
  127. wait_err("controller error", state, ctrl, intr);
  128. return -EIO;
  129. }
  130. if ((intr & intr_flags) == intr_flags)
  131. return 0;
  132. /* Continue in wait for interrupt branch */
  133. }
  134. if (state != FL_READING) {
  135. int result;
  136. /* Turn interrupts on */
  137. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  138. if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
  139. syscfg |= ONENAND_SYS_CFG1_IOBE;
  140. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  141. if (cpu_is_omap34xx())
  142. /* Add a delay to let GPIO settle */
  143. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  144. }
  145. INIT_COMPLETION(c->irq_done);
  146. if (c->gpio_irq) {
  147. result = gpio_get_value(c->gpio_irq);
  148. if (result == -1) {
  149. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  150. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  151. wait_err("gpio error", state, ctrl, intr);
  152. return -EIO;
  153. }
  154. } else
  155. result = 0;
  156. if (result == 0) {
  157. int retry_cnt = 0;
  158. retry:
  159. result = wait_for_completion_timeout(&c->irq_done,
  160. msecs_to_jiffies(20));
  161. if (result == 0) {
  162. /* Timeout after 20ms */
  163. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  164. if (ctrl & ONENAND_CTRL_ONGO &&
  165. !this->ongoing) {
  166. /*
  167. * The operation seems to be still going
  168. * so give it some more time.
  169. */
  170. retry_cnt += 1;
  171. if (retry_cnt < 3)
  172. goto retry;
  173. intr = read_reg(c,
  174. ONENAND_REG_INTERRUPT);
  175. wait_err("timeout", state, ctrl, intr);
  176. return -EIO;
  177. }
  178. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  179. if ((intr & ONENAND_INT_MASTER) == 0)
  180. wait_warn("timeout", state, ctrl, intr);
  181. }
  182. }
  183. } else {
  184. int retry_cnt = 0;
  185. /* Turn interrupts off */
  186. syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
  187. syscfg &= ~ONENAND_SYS_CFG1_IOBE;
  188. write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
  189. timeout = jiffies + msecs_to_jiffies(20);
  190. while (1) {
  191. if (time_before(jiffies, timeout)) {
  192. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  193. if (intr & ONENAND_INT_MASTER)
  194. break;
  195. } else {
  196. /* Timeout after 20ms */
  197. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  198. if (ctrl & ONENAND_CTRL_ONGO) {
  199. /*
  200. * The operation seems to be still going
  201. * so give it some more time.
  202. */
  203. retry_cnt += 1;
  204. if (retry_cnt < 3) {
  205. timeout = jiffies +
  206. msecs_to_jiffies(20);
  207. continue;
  208. }
  209. }
  210. break;
  211. }
  212. }
  213. }
  214. intr = read_reg(c, ONENAND_REG_INTERRUPT);
  215. ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
  216. if (intr & ONENAND_INT_READ) {
  217. int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
  218. if (ecc) {
  219. unsigned int addr1, addr8;
  220. addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
  221. addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
  222. if (ecc & ONENAND_ECC_2BIT_ALL) {
  223. printk(KERN_ERR "onenand_wait: ECC error = "
  224. "0x%04x, addr1 %#x, addr8 %#x\n",
  225. ecc, addr1, addr8);
  226. mtd->ecc_stats.failed++;
  227. return -EBADMSG;
  228. } else if (ecc & ONENAND_ECC_1BIT_ALL) {
  229. printk(KERN_NOTICE "onenand_wait: correctable "
  230. "ECC error = 0x%04x, addr1 %#x, "
  231. "addr8 %#x\n", ecc, addr1, addr8);
  232. mtd->ecc_stats.corrected++;
  233. }
  234. }
  235. } else if (state == FL_READING) {
  236. wait_err("timeout", state, ctrl, intr);
  237. return -EIO;
  238. }
  239. if (ctrl & ONENAND_CTRL_ERROR) {
  240. wait_err("controller error", state, ctrl, intr);
  241. if (ctrl & ONENAND_CTRL_LOCK)
  242. printk(KERN_ERR "onenand_wait: "
  243. "Device is write protected!!!\n");
  244. return -EIO;
  245. }
  246. ctrl_mask = 0xFE9F;
  247. if (this->ongoing)
  248. ctrl_mask &= ~0x8000;
  249. if (ctrl & ctrl_mask)
  250. wait_warn("unexpected controller status", state, ctrl, intr);
  251. return 0;
  252. }
  253. static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
  254. {
  255. struct onenand_chip *this = mtd->priv;
  256. if (ONENAND_CURRENT_BUFFERRAM(this)) {
  257. if (area == ONENAND_DATARAM)
  258. return this->writesize;
  259. if (area == ONENAND_SPARERAM)
  260. return mtd->oobsize;
  261. }
  262. return 0;
  263. }
  264. #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
  265. static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  266. unsigned char *buffer, int offset,
  267. size_t count)
  268. {
  269. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  270. struct onenand_chip *this = mtd->priv;
  271. dma_addr_t dma_src, dma_dst;
  272. int bram_offset;
  273. unsigned long timeout;
  274. void *buf = (void *)buffer;
  275. size_t xtra;
  276. volatile unsigned *done;
  277. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  278. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  279. goto out_copy;
  280. /* panic_write() may be in an interrupt context */
  281. if (in_interrupt() || oops_in_progress)
  282. goto out_copy;
  283. if (buf >= high_memory) {
  284. struct page *p1;
  285. if (((size_t)buf & PAGE_MASK) !=
  286. ((size_t)(buf + count - 1) & PAGE_MASK))
  287. goto out_copy;
  288. p1 = vmalloc_to_page(buf);
  289. if (!p1)
  290. goto out_copy;
  291. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  292. }
  293. xtra = count & 3;
  294. if (xtra) {
  295. count -= xtra;
  296. memcpy(buf + count, this->base + bram_offset + count, xtra);
  297. }
  298. dma_src = c->phys_base + bram_offset;
  299. dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
  300. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  301. dev_err(&c->pdev->dev,
  302. "Couldn't DMA map a %d byte buffer\n",
  303. count);
  304. goto out_copy;
  305. }
  306. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  307. count >> 2, 1, 0, 0, 0);
  308. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  309. dma_src, 0, 0);
  310. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  311. dma_dst, 0, 0);
  312. INIT_COMPLETION(c->dma_done);
  313. omap_start_dma(c->dma_channel);
  314. timeout = jiffies + msecs_to_jiffies(20);
  315. done = &c->dma_done.done;
  316. while (time_before(jiffies, timeout))
  317. if (*done)
  318. break;
  319. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  320. if (!*done) {
  321. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  322. goto out_copy;
  323. }
  324. return 0;
  325. out_copy:
  326. memcpy(buf, this->base + bram_offset, count);
  327. return 0;
  328. }
  329. static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  330. const unsigned char *buffer,
  331. int offset, size_t count)
  332. {
  333. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  334. struct onenand_chip *this = mtd->priv;
  335. dma_addr_t dma_src, dma_dst;
  336. int bram_offset;
  337. unsigned long timeout;
  338. void *buf = (void *)buffer;
  339. volatile unsigned *done;
  340. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  341. if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
  342. goto out_copy;
  343. /* panic_write() may be in an interrupt context */
  344. if (in_interrupt() || oops_in_progress)
  345. goto out_copy;
  346. if (buf >= high_memory) {
  347. struct page *p1;
  348. if (((size_t)buf & PAGE_MASK) !=
  349. ((size_t)(buf + count - 1) & PAGE_MASK))
  350. goto out_copy;
  351. p1 = vmalloc_to_page(buf);
  352. if (!p1)
  353. goto out_copy;
  354. buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
  355. }
  356. dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
  357. dma_dst = c->phys_base + bram_offset;
  358. if (dma_mapping_error(&c->pdev->dev, dma_src)) {
  359. dev_err(&c->pdev->dev,
  360. "Couldn't DMA map a %d byte buffer\n",
  361. count);
  362. return -1;
  363. }
  364. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  365. count >> 2, 1, 0, 0, 0);
  366. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  367. dma_src, 0, 0);
  368. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  369. dma_dst, 0, 0);
  370. INIT_COMPLETION(c->dma_done);
  371. omap_start_dma(c->dma_channel);
  372. timeout = jiffies + msecs_to_jiffies(20);
  373. done = &c->dma_done.done;
  374. while (time_before(jiffies, timeout))
  375. if (*done)
  376. break;
  377. dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
  378. if (!*done) {
  379. dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
  380. goto out_copy;
  381. }
  382. return 0;
  383. out_copy:
  384. memcpy(this->base + bram_offset, buf, count);
  385. return 0;
  386. }
  387. #else
  388. int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
  389. unsigned char *buffer, int offset,
  390. size_t count);
  391. int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
  392. const unsigned char *buffer,
  393. int offset, size_t count);
  394. #endif
  395. #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
  396. static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  397. unsigned char *buffer, int offset,
  398. size_t count)
  399. {
  400. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  401. struct onenand_chip *this = mtd->priv;
  402. dma_addr_t dma_src, dma_dst;
  403. int bram_offset;
  404. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  405. /* DMA is not used. Revisit PM requirements before enabling it. */
  406. if (1 || (c->dma_channel < 0) ||
  407. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  408. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  409. memcpy(buffer, (__force void *)(this->base + bram_offset),
  410. count);
  411. return 0;
  412. }
  413. dma_src = c->phys_base + bram_offset;
  414. dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
  415. DMA_FROM_DEVICE);
  416. if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
  417. dev_err(&c->pdev->dev,
  418. "Couldn't DMA map a %d byte buffer\n",
  419. count);
  420. return -1;
  421. }
  422. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
  423. count / 4, 1, 0, 0, 0);
  424. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  425. dma_src, 0, 0);
  426. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  427. dma_dst, 0, 0);
  428. INIT_COMPLETION(c->dma_done);
  429. omap_start_dma(c->dma_channel);
  430. wait_for_completion(&c->dma_done);
  431. dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
  432. return 0;
  433. }
  434. static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  435. const unsigned char *buffer,
  436. int offset, size_t count)
  437. {
  438. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  439. struct onenand_chip *this = mtd->priv;
  440. dma_addr_t dma_src, dma_dst;
  441. int bram_offset;
  442. bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
  443. /* DMA is not used. Revisit PM requirements before enabling it. */
  444. if (1 || (c->dma_channel < 0) ||
  445. ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
  446. (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
  447. memcpy((__force void *)(this->base + bram_offset), buffer,
  448. count);
  449. return 0;
  450. }
  451. dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
  452. DMA_TO_DEVICE);
  453. dma_dst = c->phys_base + bram_offset;
  454. if (dma_mapping_error(&c->pdev->dev, dma_src)) {
  455. dev_err(&c->pdev->dev,
  456. "Couldn't DMA map a %d byte buffer\n",
  457. count);
  458. return -1;
  459. }
  460. omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
  461. count / 2, 1, 0, 0, 0);
  462. omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  463. dma_src, 0, 0);
  464. omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
  465. dma_dst, 0, 0);
  466. INIT_COMPLETION(c->dma_done);
  467. omap_start_dma(c->dma_channel);
  468. wait_for_completion(&c->dma_done);
  469. dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
  470. return 0;
  471. }
  472. #else
  473. int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
  474. unsigned char *buffer, int offset,
  475. size_t count);
  476. int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
  477. const unsigned char *buffer,
  478. int offset, size_t count);
  479. #endif
  480. static struct platform_driver omap2_onenand_driver;
  481. static int __adjust_timing(struct device *dev, void *data)
  482. {
  483. int ret = 0;
  484. struct omap2_onenand *c;
  485. c = dev_get_drvdata(dev);
  486. BUG_ON(c->setup == NULL);
  487. /* DMA is not in use so this is all that is needed */
  488. /* Revisit for OMAP3! */
  489. ret = c->setup(c->onenand.base, &c->freq);
  490. return ret;
  491. }
  492. int omap2_onenand_rephase(void)
  493. {
  494. return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
  495. NULL, __adjust_timing);
  496. }
  497. static void omap2_onenand_shutdown(struct platform_device *pdev)
  498. {
  499. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  500. /* With certain content in the buffer RAM, the OMAP boot ROM code
  501. * can recognize the flash chip incorrectly. Zero it out before
  502. * soft reset.
  503. */
  504. memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
  505. }
  506. static int omap2_onenand_enable(struct mtd_info *mtd)
  507. {
  508. int ret;
  509. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  510. ret = regulator_enable(c->regulator);
  511. if (ret != 0)
  512. dev_err(&c->pdev->dev, "cant enable regulator\n");
  513. return ret;
  514. }
  515. static int omap2_onenand_disable(struct mtd_info *mtd)
  516. {
  517. int ret;
  518. struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
  519. ret = regulator_disable(c->regulator);
  520. if (ret != 0)
  521. dev_err(&c->pdev->dev, "cant disable regulator\n");
  522. return ret;
  523. }
  524. static int __devinit omap2_onenand_probe(struct platform_device *pdev)
  525. {
  526. struct omap_onenand_platform_data *pdata;
  527. struct omap2_onenand *c;
  528. int r;
  529. pdata = pdev->dev.platform_data;
  530. if (pdata == NULL) {
  531. dev_err(&pdev->dev, "platform data missing\n");
  532. return -ENODEV;
  533. }
  534. c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
  535. if (!c)
  536. return -ENOMEM;
  537. init_completion(&c->irq_done);
  538. init_completion(&c->dma_done);
  539. c->gpmc_cs = pdata->cs;
  540. c->gpio_irq = pdata->gpio_irq;
  541. c->dma_channel = pdata->dma_channel;
  542. if (c->dma_channel < 0) {
  543. /* if -1, don't use DMA */
  544. c->gpio_irq = 0;
  545. }
  546. r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
  547. if (r < 0) {
  548. dev_err(&pdev->dev, "Cannot request GPMC CS\n");
  549. goto err_kfree;
  550. }
  551. if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
  552. pdev->dev.driver->name) == NULL) {
  553. dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
  554. "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
  555. r = -EBUSY;
  556. goto err_free_cs;
  557. }
  558. c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
  559. if (c->onenand.base == NULL) {
  560. r = -ENOMEM;
  561. goto err_release_mem_region;
  562. }
  563. if (pdata->onenand_setup != NULL) {
  564. r = pdata->onenand_setup(c->onenand.base, &c->freq);
  565. if (r < 0) {
  566. dev_err(&pdev->dev, "Onenand platform setup failed: "
  567. "%d\n", r);
  568. goto err_iounmap;
  569. }
  570. c->setup = pdata->onenand_setup;
  571. }
  572. if (c->gpio_irq) {
  573. if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
  574. dev_err(&pdev->dev, "Failed to request GPIO%d for "
  575. "OneNAND\n", c->gpio_irq);
  576. goto err_iounmap;
  577. }
  578. gpio_direction_input(c->gpio_irq);
  579. if ((r = request_irq(gpio_to_irq(c->gpio_irq),
  580. omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
  581. pdev->dev.driver->name, c)) < 0)
  582. goto err_release_gpio;
  583. }
  584. if (c->dma_channel >= 0) {
  585. r = omap_request_dma(0, pdev->dev.driver->name,
  586. omap2_onenand_dma_cb, (void *) c,
  587. &c->dma_channel);
  588. if (r == 0) {
  589. omap_set_dma_write_mode(c->dma_channel,
  590. OMAP_DMA_WRITE_NON_POSTED);
  591. omap_set_dma_src_data_pack(c->dma_channel, 1);
  592. omap_set_dma_src_burst_mode(c->dma_channel,
  593. OMAP_DMA_DATA_BURST_8);
  594. omap_set_dma_dest_data_pack(c->dma_channel, 1);
  595. omap_set_dma_dest_burst_mode(c->dma_channel,
  596. OMAP_DMA_DATA_BURST_8);
  597. } else {
  598. dev_info(&pdev->dev,
  599. "failed to allocate DMA for OneNAND, "
  600. "using PIO instead\n");
  601. c->dma_channel = -1;
  602. }
  603. }
  604. dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
  605. "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
  606. c->onenand.base, c->freq);
  607. c->pdev = pdev;
  608. c->mtd.name = dev_name(&pdev->dev);
  609. c->mtd.priv = &c->onenand;
  610. c->mtd.owner = THIS_MODULE;
  611. c->mtd.dev.parent = &pdev->dev;
  612. if (c->dma_channel >= 0) {
  613. struct onenand_chip *this = &c->onenand;
  614. this->wait = omap2_onenand_wait;
  615. if (cpu_is_omap34xx()) {
  616. this->read_bufferram = omap3_onenand_read_bufferram;
  617. this->write_bufferram = omap3_onenand_write_bufferram;
  618. } else {
  619. this->read_bufferram = omap2_onenand_read_bufferram;
  620. this->write_bufferram = omap2_onenand_write_bufferram;
  621. }
  622. }
  623. if (pdata->regulator_can_sleep) {
  624. c->regulator = regulator_get(&pdev->dev, "vonenand");
  625. if (IS_ERR(c->regulator)) {
  626. dev_err(&pdev->dev, "Failed to get regulator\n");
  627. goto err_release_dma;
  628. }
  629. c->onenand.enable = omap2_onenand_enable;
  630. c->onenand.disable = omap2_onenand_disable;
  631. }
  632. if ((r = onenand_scan(&c->mtd, 1)) < 0)
  633. goto err_release_regulator;
  634. #ifdef CONFIG_MTD_PARTITIONS
  635. r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
  636. if (r > 0)
  637. r = add_mtd_partitions(&c->mtd, c->parts, r);
  638. else if (pdata->parts != NULL)
  639. r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
  640. else
  641. #endif
  642. r = add_mtd_device(&c->mtd);
  643. if (r)
  644. goto err_release_onenand;
  645. platform_set_drvdata(pdev, c);
  646. return 0;
  647. err_release_onenand:
  648. onenand_release(&c->mtd);
  649. err_release_regulator:
  650. regulator_put(c->regulator);
  651. err_release_dma:
  652. if (c->dma_channel != -1)
  653. omap_free_dma(c->dma_channel);
  654. if (c->gpio_irq)
  655. free_irq(gpio_to_irq(c->gpio_irq), c);
  656. err_release_gpio:
  657. if (c->gpio_irq)
  658. gpio_free(c->gpio_irq);
  659. err_iounmap:
  660. iounmap(c->onenand.base);
  661. err_release_mem_region:
  662. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  663. err_free_cs:
  664. gpmc_cs_free(c->gpmc_cs);
  665. err_kfree:
  666. kfree(c->parts);
  667. kfree(c);
  668. return r;
  669. }
  670. static int __devexit omap2_onenand_remove(struct platform_device *pdev)
  671. {
  672. struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
  673. onenand_release(&c->mtd);
  674. regulator_put(c->regulator);
  675. if (c->dma_channel != -1)
  676. omap_free_dma(c->dma_channel);
  677. omap2_onenand_shutdown(pdev);
  678. platform_set_drvdata(pdev, NULL);
  679. if (c->gpio_irq) {
  680. free_irq(gpio_to_irq(c->gpio_irq), c);
  681. gpio_free(c->gpio_irq);
  682. }
  683. iounmap(c->onenand.base);
  684. release_mem_region(c->phys_base, ONENAND_IO_SIZE);
  685. gpmc_cs_free(c->gpmc_cs);
  686. kfree(c->parts);
  687. kfree(c);
  688. return 0;
  689. }
  690. static struct platform_driver omap2_onenand_driver = {
  691. .probe = omap2_onenand_probe,
  692. .remove = __devexit_p(omap2_onenand_remove),
  693. .shutdown = omap2_onenand_shutdown,
  694. .driver = {
  695. .name = DRIVER_NAME,
  696. .owner = THIS_MODULE,
  697. },
  698. };
  699. static int __init omap2_onenand_init(void)
  700. {
  701. printk(KERN_INFO "OneNAND driver initializing\n");
  702. return platform_driver_register(&omap2_onenand_driver);
  703. }
  704. static void __exit omap2_onenand_exit(void)
  705. {
  706. platform_driver_unregister(&omap2_onenand_driver);
  707. }
  708. module_init(omap2_onenand_init);
  709. module_exit(omap2_onenand_exit);
  710. MODULE_ALIAS("platform:" DRIVER_NAME);
  711. MODULE_LICENSE("GPL");
  712. MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
  713. MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");