tmio_mmc_pio.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * linux/drivers/mmc/host/tmio_mmc_pio.c
  3. *
  4. * Copyright (C) 2011 Guennadi Liakhovetski
  5. * Copyright (C) 2007 Ian Molton
  6. * Copyright (C) 2004 Ian Molton
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Driver for the MMC / SD / SDIO IP found in:
  13. *
  14. * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
  15. *
  16. * This driver draws mainly on scattered spec sheets, Reverse engineering
  17. * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
  18. * support). (Further 4 bit support from a later datasheet).
  19. *
  20. * TODO:
  21. * Investigate using a workqueue for PIO transfers
  22. * Eliminate FIXMEs
  23. * SDIO support
  24. * Better Power management
  25. * Handle MMC errors better
  26. * double buffer support
  27. *
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/device.h>
  31. #include <linux/highmem.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/io.h>
  34. #include <linux/irq.h>
  35. #include <linux/mfd/tmio.h>
  36. #include <linux/mmc/host.h>
  37. #include <linux/mmc/tmio.h>
  38. #include <linux/module.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/platform_device.h>
  41. #include <linux/pm_runtime.h>
  42. #include <linux/scatterlist.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/spinlock.h>
  45. #include "tmio_mmc.h"
  46. static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
  47. {
  48. return readw(host->ctl + (addr << host->bus_shift));
  49. }
  50. static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
  51. u16 *buf, int count)
  52. {
  53. readsw(host->ctl + (addr << host->bus_shift), buf, count);
  54. }
  55. static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
  56. {
  57. return readw(host->ctl + (addr << host->bus_shift)) |
  58. readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
  59. }
  60. static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
  61. {
  62. writew(val, host->ctl + (addr << host->bus_shift));
  63. }
  64. static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
  65. u16 *buf, int count)
  66. {
  67. writesw(host->ctl + (addr << host->bus_shift), buf, count);
  68. }
  69. static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
  70. {
  71. writew(val, host->ctl + (addr << host->bus_shift));
  72. writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
  73. }
  74. void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  75. {
  76. u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
  77. sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
  78. }
  79. void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  80. {
  81. u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
  82. sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
  83. }
  84. static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  85. {
  86. sd_ctrl_write32(host, CTL_STATUS, ~i);
  87. }
  88. static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
  89. {
  90. host->sg_len = data->sg_len;
  91. host->sg_ptr = data->sg;
  92. host->sg_orig = data->sg;
  93. host->sg_off = 0;
  94. }
  95. static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
  96. {
  97. host->sg_ptr = sg_next(host->sg_ptr);
  98. host->sg_off = 0;
  99. return --host->sg_len;
  100. }
  101. #ifdef CONFIG_MMC_DEBUG
  102. #define STATUS_TO_TEXT(a, status, i) \
  103. do { \
  104. if (status & TMIO_STAT_##a) { \
  105. if (i++) \
  106. printk(" | "); \
  107. printk(#a); \
  108. } \
  109. } while (0)
  110. static void pr_debug_status(u32 status)
  111. {
  112. int i = 0;
  113. printk(KERN_DEBUG "status: %08x = ", status);
  114. STATUS_TO_TEXT(CARD_REMOVE, status, i);
  115. STATUS_TO_TEXT(CARD_INSERT, status, i);
  116. STATUS_TO_TEXT(SIGSTATE, status, i);
  117. STATUS_TO_TEXT(WRPROTECT, status, i);
  118. STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
  119. STATUS_TO_TEXT(CARD_INSERT_A, status, i);
  120. STATUS_TO_TEXT(SIGSTATE_A, status, i);
  121. STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
  122. STATUS_TO_TEXT(STOPBIT_ERR, status, i);
  123. STATUS_TO_TEXT(ILL_FUNC, status, i);
  124. STATUS_TO_TEXT(CMD_BUSY, status, i);
  125. STATUS_TO_TEXT(CMDRESPEND, status, i);
  126. STATUS_TO_TEXT(DATAEND, status, i);
  127. STATUS_TO_TEXT(CRCFAIL, status, i);
  128. STATUS_TO_TEXT(DATATIMEOUT, status, i);
  129. STATUS_TO_TEXT(CMDTIMEOUT, status, i);
  130. STATUS_TO_TEXT(RXOVERFLOW, status, i);
  131. STATUS_TO_TEXT(TXUNDERRUN, status, i);
  132. STATUS_TO_TEXT(RXRDY, status, i);
  133. STATUS_TO_TEXT(TXRQ, status, i);
  134. STATUS_TO_TEXT(ILL_ACCESS, status, i);
  135. printk("\n");
  136. }
  137. #else
  138. #define pr_debug_status(s) do { } while (0)
  139. #endif
  140. static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  141. {
  142. struct tmio_mmc_host *host = mmc_priv(mmc);
  143. if (enable) {
  144. host->sdio_irq_enabled = 1;
  145. sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
  146. sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
  147. (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
  148. } else {
  149. sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
  150. sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
  151. host->sdio_irq_enabled = 0;
  152. }
  153. }
  154. static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
  155. {
  156. u32 clk = 0, clock;
  157. if (new_clock) {
  158. for (clock = host->mmc->f_min, clk = 0x80000080;
  159. new_clock >= (clock<<1); clk >>= 1)
  160. clock <<= 1;
  161. clk |= 0x100;
  162. }
  163. if (host->set_clk_div)
  164. host->set_clk_div(host->pdev, (clk>>22) & 1);
  165. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
  166. }
  167. static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
  168. {
  169. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  170. /* implicit BUG_ON(!res) */
  171. if (resource_size(res) > 0x100) {
  172. sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
  173. msleep(10);
  174. }
  175. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
  176. sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
  177. msleep(10);
  178. }
  179. static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
  180. {
  181. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  182. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
  183. sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
  184. msleep(10);
  185. /* implicit BUG_ON(!res) */
  186. if (resource_size(res) > 0x100) {
  187. sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
  188. msleep(10);
  189. }
  190. }
  191. static void tmio_mmc_reset(struct tmio_mmc_host *host)
  192. {
  193. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  194. /* FIXME - should we set stop clock reg here */
  195. sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
  196. /* implicit BUG_ON(!res) */
  197. if (resource_size(res) > 0x100)
  198. sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
  199. msleep(10);
  200. sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
  201. if (resource_size(res) > 0x100)
  202. sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
  203. msleep(10);
  204. }
  205. static void tmio_mmc_reset_work(struct work_struct *work)
  206. {
  207. struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
  208. delayed_reset_work.work);
  209. struct mmc_request *mrq;
  210. unsigned long flags;
  211. spin_lock_irqsave(&host->lock, flags);
  212. mrq = host->mrq;
  213. /*
  214. * is request already finished? Since we use a non-blocking
  215. * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
  216. * us, so, have to check for IS_ERR(host->mrq)
  217. */
  218. if (IS_ERR_OR_NULL(mrq)
  219. || time_is_after_jiffies(host->last_req_ts +
  220. msecs_to_jiffies(2000))) {
  221. spin_unlock_irqrestore(&host->lock, flags);
  222. return;
  223. }
  224. dev_warn(&host->pdev->dev,
  225. "timeout waiting for hardware interrupt (CMD%u)\n",
  226. mrq->cmd->opcode);
  227. if (host->data)
  228. host->data->error = -ETIMEDOUT;
  229. else if (host->cmd)
  230. host->cmd->error = -ETIMEDOUT;
  231. else
  232. mrq->cmd->error = -ETIMEDOUT;
  233. host->cmd = NULL;
  234. host->data = NULL;
  235. host->force_pio = false;
  236. spin_unlock_irqrestore(&host->lock, flags);
  237. tmio_mmc_reset(host);
  238. /* Ready for new calls */
  239. host->mrq = NULL;
  240. mmc_request_done(host->mmc, mrq);
  241. }
  242. /* called with host->lock held, interrupts disabled */
  243. static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
  244. {
  245. struct mmc_request *mrq = host->mrq;
  246. if (!mrq)
  247. return;
  248. host->cmd = NULL;
  249. host->data = NULL;
  250. host->force_pio = false;
  251. cancel_delayed_work(&host->delayed_reset_work);
  252. host->mrq = NULL;
  253. /* FIXME: mmc_request_done() can schedule! */
  254. mmc_request_done(host->mmc, mrq);
  255. }
  256. /* These are the bitmasks the tmio chip requires to implement the MMC response
  257. * types. Note that R1 and R6 are the same in this scheme. */
  258. #define APP_CMD 0x0040
  259. #define RESP_NONE 0x0300
  260. #define RESP_R1 0x0400
  261. #define RESP_R1B 0x0500
  262. #define RESP_R2 0x0600
  263. #define RESP_R3 0x0700
  264. #define DATA_PRESENT 0x0800
  265. #define TRANSFER_READ 0x1000
  266. #define TRANSFER_MULTI 0x2000
  267. #define SECURITY_CMD 0x4000
  268. static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
  269. {
  270. struct mmc_data *data = host->data;
  271. int c = cmd->opcode;
  272. /* Command 12 is handled by hardware */
  273. if (cmd->opcode == 12 && !cmd->arg) {
  274. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
  275. return 0;
  276. }
  277. switch (mmc_resp_type(cmd)) {
  278. case MMC_RSP_NONE: c |= RESP_NONE; break;
  279. case MMC_RSP_R1: c |= RESP_R1; break;
  280. case MMC_RSP_R1B: c |= RESP_R1B; break;
  281. case MMC_RSP_R2: c |= RESP_R2; break;
  282. case MMC_RSP_R3: c |= RESP_R3; break;
  283. default:
  284. pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
  285. return -EINVAL;
  286. }
  287. host->cmd = cmd;
  288. /* FIXME - this seems to be ok commented out but the spec suggest this bit
  289. * should be set when issuing app commands.
  290. * if(cmd->flags & MMC_FLAG_ACMD)
  291. * c |= APP_CMD;
  292. */
  293. if (data) {
  294. c |= DATA_PRESENT;
  295. if (data->blocks > 1) {
  296. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
  297. c |= TRANSFER_MULTI;
  298. }
  299. if (data->flags & MMC_DATA_READ)
  300. c |= TRANSFER_READ;
  301. }
  302. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
  303. /* Fire off the command */
  304. sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
  305. sd_ctrl_write16(host, CTL_SD_CMD, c);
  306. return 0;
  307. }
  308. /*
  309. * This chip always returns (at least?) as much data as you ask for.
  310. * I'm unsure what happens if you ask for less than a block. This should be
  311. * looked into to ensure that a funny length read doesn't hose the controller.
  312. */
  313. static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
  314. {
  315. struct mmc_data *data = host->data;
  316. void *sg_virt;
  317. unsigned short *buf;
  318. unsigned int count;
  319. unsigned long flags;
  320. if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
  321. pr_err("PIO IRQ in DMA mode!\n");
  322. return;
  323. } else if (!data) {
  324. pr_debug("Spurious PIO IRQ\n");
  325. return;
  326. }
  327. sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
  328. buf = (unsigned short *)(sg_virt + host->sg_off);
  329. count = host->sg_ptr->length - host->sg_off;
  330. if (count > data->blksz)
  331. count = data->blksz;
  332. pr_debug("count: %08x offset: %08x flags %08x\n",
  333. count, host->sg_off, data->flags);
  334. /* Transfer the data */
  335. if (data->flags & MMC_DATA_READ)
  336. sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
  337. else
  338. sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
  339. host->sg_off += count;
  340. tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
  341. if (host->sg_off == host->sg_ptr->length)
  342. tmio_mmc_next_sg(host);
  343. return;
  344. }
  345. static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
  346. {
  347. if (host->sg_ptr == &host->bounce_sg) {
  348. unsigned long flags;
  349. void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
  350. memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
  351. tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
  352. }
  353. }
  354. /* needs to be called with host->lock held */
  355. void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
  356. {
  357. struct mmc_data *data = host->data;
  358. struct mmc_command *stop;
  359. host->data = NULL;
  360. if (!data) {
  361. dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
  362. return;
  363. }
  364. stop = data->stop;
  365. /* FIXME - return correct transfer count on errors */
  366. if (!data->error)
  367. data->bytes_xfered = data->blocks * data->blksz;
  368. else
  369. data->bytes_xfered = 0;
  370. pr_debug("Completed data request\n");
  371. /*
  372. * FIXME: other drivers allow an optional stop command of any given type
  373. * which we dont do, as the chip can auto generate them.
  374. * Perhaps we can be smarter about when to use auto CMD12 and
  375. * only issue the auto request when we know this is the desired
  376. * stop command, allowing fallback to the stop command the
  377. * upper layers expect. For now, we do what works.
  378. */
  379. if (data->flags & MMC_DATA_READ) {
  380. if (host->chan_rx && !host->force_pio)
  381. tmio_mmc_check_bounce_buffer(host);
  382. dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
  383. host->mrq);
  384. } else {
  385. dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
  386. host->mrq);
  387. }
  388. if (stop) {
  389. if (stop->opcode == 12 && !stop->arg)
  390. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
  391. else
  392. BUG();
  393. }
  394. tmio_mmc_finish_request(host);
  395. }
  396. static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
  397. {
  398. struct mmc_data *data;
  399. spin_lock(&host->lock);
  400. data = host->data;
  401. if (!data)
  402. goto out;
  403. if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
  404. /*
  405. * Has all data been written out yet? Testing on SuperH showed,
  406. * that in most cases the first interrupt comes already with the
  407. * BUSY status bit clear, but on some operations, like mount or
  408. * in the beginning of a write / sync / umount, there is one
  409. * DATAEND interrupt with the BUSY bit set, in this cases
  410. * waiting for one more interrupt fixes the problem.
  411. */
  412. if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
  413. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
  414. tasklet_schedule(&host->dma_complete);
  415. }
  416. } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
  417. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
  418. tasklet_schedule(&host->dma_complete);
  419. } else {
  420. tmio_mmc_do_data_irq(host);
  421. tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
  422. }
  423. out:
  424. spin_unlock(&host->lock);
  425. }
  426. static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
  427. unsigned int stat)
  428. {
  429. struct mmc_command *cmd = host->cmd;
  430. int i, addr;
  431. spin_lock(&host->lock);
  432. if (!host->cmd) {
  433. pr_debug("Spurious CMD irq\n");
  434. goto out;
  435. }
  436. host->cmd = NULL;
  437. /* This controller is sicker than the PXA one. Not only do we need to
  438. * drop the top 8 bits of the first response word, we also need to
  439. * modify the order of the response for short response command types.
  440. */
  441. for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
  442. cmd->resp[i] = sd_ctrl_read32(host, addr);
  443. if (cmd->flags & MMC_RSP_136) {
  444. cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
  445. cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
  446. cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
  447. cmd->resp[3] <<= 8;
  448. } else if (cmd->flags & MMC_RSP_R3) {
  449. cmd->resp[0] = cmd->resp[3];
  450. }
  451. if (stat & TMIO_STAT_CMDTIMEOUT)
  452. cmd->error = -ETIMEDOUT;
  453. else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
  454. cmd->error = -EILSEQ;
  455. /* If there is data to handle we enable data IRQs here, and
  456. * we will ultimatley finish the request in the data_end handler.
  457. * If theres no data or we encountered an error, finish now.
  458. */
  459. if (host->data && !cmd->error) {
  460. if (host->data->flags & MMC_DATA_READ) {
  461. if (host->force_pio || !host->chan_rx)
  462. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
  463. else
  464. tasklet_schedule(&host->dma_issue);
  465. } else {
  466. if (host->force_pio || !host->chan_tx)
  467. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
  468. else
  469. tasklet_schedule(&host->dma_issue);
  470. }
  471. } else {
  472. tmio_mmc_finish_request(host);
  473. }
  474. out:
  475. spin_unlock(&host->lock);
  476. }
  477. static irqreturn_t tmio_mmc_irq(int irq, void *devid)
  478. {
  479. struct tmio_mmc_host *host = devid;
  480. struct tmio_mmc_data *pdata = host->pdata;
  481. unsigned int ireg, irq_mask, status;
  482. unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
  483. pr_debug("MMC IRQ begin\n");
  484. status = sd_ctrl_read32(host, CTL_STATUS);
  485. irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
  486. ireg = status & TMIO_MASK_IRQ & ~irq_mask;
  487. sdio_ireg = 0;
  488. if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
  489. sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
  490. sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
  491. sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
  492. sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
  493. if (sdio_ireg && !host->sdio_irq_enabled) {
  494. pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
  495. sdio_status, sdio_irq_mask, sdio_ireg);
  496. tmio_mmc_enable_sdio_irq(host->mmc, 0);
  497. goto out;
  498. }
  499. if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
  500. sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
  501. mmc_signal_sdio_irq(host->mmc);
  502. if (sdio_ireg)
  503. goto out;
  504. }
  505. pr_debug_status(status);
  506. pr_debug_status(ireg);
  507. if (!ireg) {
  508. tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
  509. pr_warning("tmio_mmc: Spurious irq, disabling! "
  510. "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
  511. pr_debug_status(status);
  512. goto out;
  513. }
  514. while (ireg) {
  515. /* Card insert / remove attempts */
  516. if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
  517. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
  518. TMIO_STAT_CARD_REMOVE);
  519. mmc_detect_change(host->mmc, msecs_to_jiffies(100));
  520. }
  521. /* CRC and other errors */
  522. /* if (ireg & TMIO_STAT_ERR_IRQ)
  523. * handled |= tmio_error_irq(host, irq, stat);
  524. */
  525. /* Command completion */
  526. if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
  527. tmio_mmc_ack_mmc_irqs(host,
  528. TMIO_STAT_CMDRESPEND |
  529. TMIO_STAT_CMDTIMEOUT);
  530. tmio_mmc_cmd_irq(host, status);
  531. }
  532. /* Data transfer */
  533. if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
  534. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
  535. tmio_mmc_pio_irq(host);
  536. }
  537. /* Data transfer completion */
  538. if (ireg & TMIO_STAT_DATAEND) {
  539. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
  540. tmio_mmc_data_irq(host);
  541. }
  542. /* Check status - keep going until we've handled it all */
  543. status = sd_ctrl_read32(host, CTL_STATUS);
  544. irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
  545. ireg = status & TMIO_MASK_IRQ & ~irq_mask;
  546. pr_debug("Status at end of loop: %08x\n", status);
  547. pr_debug_status(status);
  548. }
  549. pr_debug("MMC IRQ end\n");
  550. out:
  551. return IRQ_HANDLED;
  552. }
  553. static int tmio_mmc_start_data(struct tmio_mmc_host *host,
  554. struct mmc_data *data)
  555. {
  556. struct tmio_mmc_data *pdata = host->pdata;
  557. pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
  558. data->blksz, data->blocks);
  559. /* Some hardware cannot perform 2 byte requests in 4 bit mode */
  560. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
  561. int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
  562. if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
  563. pr_err("%s: %d byte block unsupported in 4 bit mode\n",
  564. mmc_hostname(host->mmc), data->blksz);
  565. return -EINVAL;
  566. }
  567. }
  568. tmio_mmc_init_sg(host, data);
  569. host->data = data;
  570. /* Set transfer length / blocksize */
  571. sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
  572. sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
  573. tmio_mmc_start_dma(host, data);
  574. return 0;
  575. }
  576. /* Process requests from the MMC layer */
  577. static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  578. {
  579. struct tmio_mmc_host *host = mmc_priv(mmc);
  580. unsigned long flags;
  581. int ret;
  582. spin_lock_irqsave(&host->lock, flags);
  583. if (host->mrq) {
  584. pr_debug("request not null\n");
  585. if (IS_ERR(host->mrq)) {
  586. spin_unlock_irqrestore(&host->lock, flags);
  587. mrq->cmd->error = -EAGAIN;
  588. mmc_request_done(mmc, mrq);
  589. return;
  590. }
  591. }
  592. host->last_req_ts = jiffies;
  593. wmb();
  594. host->mrq = mrq;
  595. spin_unlock_irqrestore(&host->lock, flags);
  596. if (mrq->data) {
  597. ret = tmio_mmc_start_data(host, mrq->data);
  598. if (ret)
  599. goto fail;
  600. }
  601. ret = tmio_mmc_start_command(host, mrq->cmd);
  602. if (!ret) {
  603. schedule_delayed_work(&host->delayed_reset_work,
  604. msecs_to_jiffies(2000));
  605. return;
  606. }
  607. fail:
  608. host->force_pio = false;
  609. host->mrq = NULL;
  610. mrq->cmd->error = ret;
  611. mmc_request_done(mmc, mrq);
  612. }
  613. /* Set MMC clock / power.
  614. * Note: This controller uses a simple divider scheme therefore it cannot
  615. * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
  616. * MMC wont run that fast, it has to be clocked at 12MHz which is the next
  617. * slowest setting.
  618. */
  619. static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  620. {
  621. struct tmio_mmc_host *host = mmc_priv(mmc);
  622. unsigned long flags;
  623. spin_lock_irqsave(&host->lock, flags);
  624. if (host->mrq) {
  625. if (IS_ERR(host->mrq)) {
  626. dev_dbg(&host->pdev->dev,
  627. "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
  628. current->comm, task_pid_nr(current),
  629. ios->clock, ios->power_mode);
  630. host->mrq = ERR_PTR(-EINTR);
  631. } else {
  632. dev_dbg(&host->pdev->dev,
  633. "%s.%d: CMD%u active since %lu, now %lu!\n",
  634. current->comm, task_pid_nr(current),
  635. host->mrq->cmd->opcode, host->last_req_ts, jiffies);
  636. }
  637. spin_unlock_irqrestore(&host->lock, flags);
  638. return;
  639. }
  640. host->mrq = ERR_PTR(-EBUSY);
  641. spin_unlock_irqrestore(&host->lock, flags);
  642. if (ios->clock)
  643. tmio_mmc_set_clock(host, ios->clock);
  644. /* Power sequence - OFF -> UP -> ON */
  645. if (ios->power_mode == MMC_POWER_UP) {
  646. /* power up SD bus */
  647. if (host->set_pwr)
  648. host->set_pwr(host->pdev, 1);
  649. } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
  650. /* power down SD bus */
  651. if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
  652. host->set_pwr(host->pdev, 0);
  653. tmio_mmc_clk_stop(host);
  654. } else {
  655. /* start bus clock */
  656. tmio_mmc_clk_start(host);
  657. }
  658. switch (ios->bus_width) {
  659. case MMC_BUS_WIDTH_1:
  660. sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
  661. break;
  662. case MMC_BUS_WIDTH_4:
  663. sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
  664. break;
  665. }
  666. /* Let things settle. delay taken from winCE driver */
  667. udelay(140);
  668. if (PTR_ERR(host->mrq) == -EINTR)
  669. dev_dbg(&host->pdev->dev,
  670. "%s.%d: IOS interrupted: clk %u, mode %u",
  671. current->comm, task_pid_nr(current),
  672. ios->clock, ios->power_mode);
  673. host->mrq = NULL;
  674. }
  675. static int tmio_mmc_get_ro(struct mmc_host *mmc)
  676. {
  677. struct tmio_mmc_host *host = mmc_priv(mmc);
  678. struct tmio_mmc_data *pdata = host->pdata;
  679. return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
  680. !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
  681. }
  682. static int tmio_mmc_get_cd(struct mmc_host *mmc)
  683. {
  684. struct tmio_mmc_host *host = mmc_priv(mmc);
  685. struct tmio_mmc_data *pdata = host->pdata;
  686. if (!pdata->get_cd)
  687. return -ENOSYS;
  688. else
  689. return pdata->get_cd(host->pdev);
  690. }
  691. static const struct mmc_host_ops tmio_mmc_ops = {
  692. .request = tmio_mmc_request,
  693. .set_ios = tmio_mmc_set_ios,
  694. .get_ro = tmio_mmc_get_ro,
  695. .get_cd = tmio_mmc_get_cd,
  696. .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
  697. };
  698. int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
  699. struct platform_device *pdev,
  700. struct tmio_mmc_data *pdata)
  701. {
  702. struct tmio_mmc_host *_host;
  703. struct mmc_host *mmc;
  704. struct resource *res_ctl;
  705. int ret;
  706. u32 irq_mask = TMIO_MASK_CMD;
  707. res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  708. if (!res_ctl)
  709. return -EINVAL;
  710. mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
  711. if (!mmc)
  712. return -ENOMEM;
  713. _host = mmc_priv(mmc);
  714. _host->pdata = pdata;
  715. _host->mmc = mmc;
  716. _host->pdev = pdev;
  717. platform_set_drvdata(pdev, mmc);
  718. _host->set_pwr = pdata->set_pwr;
  719. _host->set_clk_div = pdata->set_clk_div;
  720. /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
  721. _host->bus_shift = resource_size(res_ctl) >> 10;
  722. _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
  723. if (!_host->ctl) {
  724. ret = -ENOMEM;
  725. goto host_free;
  726. }
  727. mmc->ops = &tmio_mmc_ops;
  728. mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
  729. mmc->f_max = pdata->hclk;
  730. mmc->f_min = mmc->f_max / 512;
  731. mmc->max_segs = 32;
  732. mmc->max_blk_size = 512;
  733. mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
  734. mmc->max_segs;
  735. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  736. mmc->max_seg_size = mmc->max_req_size;
  737. if (pdata->ocr_mask)
  738. mmc->ocr_avail = pdata->ocr_mask;
  739. else
  740. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  741. pm_runtime_enable(&pdev->dev);
  742. ret = pm_runtime_resume(&pdev->dev);
  743. if (ret < 0)
  744. goto pm_disable;
  745. tmio_mmc_clk_stop(_host);
  746. tmio_mmc_reset(_host);
  747. ret = platform_get_irq(pdev, 0);
  748. if (ret < 0)
  749. goto pm_suspend;
  750. _host->irq = ret;
  751. tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
  752. if (pdata->flags & TMIO_MMC_SDIO_IRQ)
  753. tmio_mmc_enable_sdio_irq(mmc, 0);
  754. ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
  755. IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
  756. if (ret)
  757. goto pm_suspend;
  758. spin_lock_init(&_host->lock);
  759. /* Init delayed work for request timeouts */
  760. INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
  761. /* See if we also get DMA */
  762. tmio_mmc_request_dma(_host, pdata);
  763. /* We have to keep the device powered for its card detection to work */
  764. pm_runtime_get_noresume(&pdev->dev);
  765. mmc_add_host(mmc);
  766. /* Unmask the IRQs we want to know about */
  767. if (!_host->chan_rx)
  768. irq_mask |= TMIO_MASK_READOP;
  769. if (!_host->chan_tx)
  770. irq_mask |= TMIO_MASK_WRITEOP;
  771. tmio_mmc_enable_mmc_irqs(_host, irq_mask);
  772. *host = _host;
  773. return 0;
  774. pm_suspend:
  775. pm_runtime_suspend(&pdev->dev);
  776. pm_disable:
  777. pm_runtime_disable(&pdev->dev);
  778. iounmap(_host->ctl);
  779. host_free:
  780. mmc_free_host(mmc);
  781. return ret;
  782. }
  783. EXPORT_SYMBOL(tmio_mmc_host_probe);
  784. void tmio_mmc_host_remove(struct tmio_mmc_host *host)
  785. {
  786. struct platform_device *pdev = host->pdev;
  787. mmc_remove_host(host->mmc);
  788. cancel_delayed_work_sync(&host->delayed_reset_work);
  789. tmio_mmc_release_dma(host);
  790. free_irq(host->irq, host);
  791. iounmap(host->ctl);
  792. mmc_free_host(host->mmc);
  793. /* Compensate for pm_runtime_get_sync() in probe() above */
  794. pm_runtime_put_sync(&pdev->dev);
  795. pm_runtime_disable(&pdev->dev);
  796. }
  797. EXPORT_SYMBOL(tmio_mmc_host_remove);
  798. #ifdef CONFIG_PM
  799. int tmio_mmc_host_suspend(struct device *dev)
  800. {
  801. struct mmc_host *mmc = dev_get_drvdata(dev);
  802. struct tmio_mmc_host *host = mmc_priv(mmc);
  803. int ret = mmc_suspend_host(mmc);
  804. if (!ret)
  805. tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
  806. host->pm_error = pm_runtime_put_sync(dev);
  807. return ret;
  808. }
  809. EXPORT_SYMBOL(tmio_mmc_host_suspend);
  810. int tmio_mmc_host_resume(struct device *dev)
  811. {
  812. struct mmc_host *mmc = dev_get_drvdata(dev);
  813. struct tmio_mmc_host *host = mmc_priv(mmc);
  814. if (!host->pm_error)
  815. pm_runtime_get_sync(dev);
  816. tmio_mmc_reset(mmc_priv(mmc));
  817. tmio_mmc_request_dma(host, host->pdata);
  818. return mmc_resume_host(mmc);
  819. }
  820. EXPORT_SYMBOL(tmio_mmc_host_resume);
  821. #endif /* CONFIG_PM */
  822. MODULE_LICENSE("GPL v2");