tmio_mmc_pio.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003
  1. /*
  2. * linux/drivers/mmc/host/tmio_mmc_pio.c
  3. *
  4. * Copyright (C) 2011 Guennadi Liakhovetski
  5. * Copyright (C) 2007 Ian Molton
  6. * Copyright (C) 2004 Ian Molton
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Driver for the MMC / SD / SDIO IP found in:
  13. *
  14. * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
  15. *
  16. * This driver draws mainly on scattered spec sheets, Reverse engineering
  17. * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
  18. * support). (Further 4 bit support from a later datasheet).
  19. *
  20. * TODO:
  21. * Investigate using a workqueue for PIO transfers
  22. * Eliminate FIXMEs
  23. * SDIO support
  24. * Better Power management
  25. * Handle MMC errors better
  26. * double buffer support
  27. *
  28. */
  29. #include <linux/delay.h>
  30. #include <linux/device.h>
  31. #include <linux/highmem.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/io.h>
  34. #include <linux/irq.h>
  35. #include <linux/mfd/tmio.h>
  36. #include <linux/mmc/host.h>
  37. #include <linux/mmc/tmio.h>
  38. #include <linux/module.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/platform_device.h>
  41. #include <linux/pm_runtime.h>
  42. #include <linux/scatterlist.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/spinlock.h>
  45. #include "tmio_mmc.h"
  46. void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  47. {
  48. u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
  49. sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
  50. }
  51. void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  52. {
  53. u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
  54. sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
  55. }
  56. static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  57. {
  58. sd_ctrl_write32(host, CTL_STATUS, ~i);
  59. }
  60. static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
  61. {
  62. host->sg_len = data->sg_len;
  63. host->sg_ptr = data->sg;
  64. host->sg_orig = data->sg;
  65. host->sg_off = 0;
  66. }
  67. static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
  68. {
  69. host->sg_ptr = sg_next(host->sg_ptr);
  70. host->sg_off = 0;
  71. return --host->sg_len;
  72. }
  73. #ifdef CONFIG_MMC_DEBUG
  74. #define STATUS_TO_TEXT(a, status, i) \
  75. do { \
  76. if (status & TMIO_STAT_##a) { \
  77. if (i++) \
  78. printk(" | "); \
  79. printk(#a); \
  80. } \
  81. } while (0)
  82. static void pr_debug_status(u32 status)
  83. {
  84. int i = 0;
  85. printk(KERN_DEBUG "status: %08x = ", status);
  86. STATUS_TO_TEXT(CARD_REMOVE, status, i);
  87. STATUS_TO_TEXT(CARD_INSERT, status, i);
  88. STATUS_TO_TEXT(SIGSTATE, status, i);
  89. STATUS_TO_TEXT(WRPROTECT, status, i);
  90. STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
  91. STATUS_TO_TEXT(CARD_INSERT_A, status, i);
  92. STATUS_TO_TEXT(SIGSTATE_A, status, i);
  93. STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
  94. STATUS_TO_TEXT(STOPBIT_ERR, status, i);
  95. STATUS_TO_TEXT(ILL_FUNC, status, i);
  96. STATUS_TO_TEXT(CMD_BUSY, status, i);
  97. STATUS_TO_TEXT(CMDRESPEND, status, i);
  98. STATUS_TO_TEXT(DATAEND, status, i);
  99. STATUS_TO_TEXT(CRCFAIL, status, i);
  100. STATUS_TO_TEXT(DATATIMEOUT, status, i);
  101. STATUS_TO_TEXT(CMDTIMEOUT, status, i);
  102. STATUS_TO_TEXT(RXOVERFLOW, status, i);
  103. STATUS_TO_TEXT(TXUNDERRUN, status, i);
  104. STATUS_TO_TEXT(RXRDY, status, i);
  105. STATUS_TO_TEXT(TXRQ, status, i);
  106. STATUS_TO_TEXT(ILL_ACCESS, status, i);
  107. printk("\n");
  108. }
  109. #else
  110. #define pr_debug_status(s) do { } while (0)
  111. #endif
  112. static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  113. {
  114. struct tmio_mmc_host *host = mmc_priv(mmc);
  115. if (enable) {
  116. host->sdio_irq_enabled = 1;
  117. sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
  118. sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
  119. (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
  120. } else {
  121. sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
  122. sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
  123. host->sdio_irq_enabled = 0;
  124. }
  125. }
  126. static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
  127. {
  128. u32 clk = 0, clock;
  129. if (new_clock) {
  130. for (clock = host->mmc->f_min, clk = 0x80000080;
  131. new_clock >= (clock<<1); clk >>= 1)
  132. clock <<= 1;
  133. clk |= 0x100;
  134. }
  135. if (host->set_clk_div)
  136. host->set_clk_div(host->pdev, (clk>>22) & 1);
  137. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
  138. }
  139. static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
  140. {
  141. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  142. /* implicit BUG_ON(!res) */
  143. if (resource_size(res) > 0x100) {
  144. sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
  145. msleep(10);
  146. }
  147. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
  148. sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
  149. msleep(10);
  150. }
  151. static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
  152. {
  153. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  154. sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
  155. sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
  156. msleep(10);
  157. /* implicit BUG_ON(!res) */
  158. if (resource_size(res) > 0x100) {
  159. sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
  160. msleep(10);
  161. }
  162. }
  163. static void tmio_mmc_reset(struct tmio_mmc_host *host)
  164. {
  165. struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
  166. /* FIXME - should we set stop clock reg here */
  167. sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
  168. /* implicit BUG_ON(!res) */
  169. if (resource_size(res) > 0x100)
  170. sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
  171. msleep(10);
  172. sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
  173. if (resource_size(res) > 0x100)
  174. sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
  175. msleep(10);
  176. }
  177. static void tmio_mmc_reset_work(struct work_struct *work)
  178. {
  179. struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
  180. delayed_reset_work.work);
  181. struct mmc_request *mrq;
  182. unsigned long flags;
  183. spin_lock_irqsave(&host->lock, flags);
  184. mrq = host->mrq;
  185. /*
  186. * is request already finished? Since we use a non-blocking
  187. * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
  188. * us, so, have to check for IS_ERR(host->mrq)
  189. */
  190. if (IS_ERR_OR_NULL(mrq)
  191. || time_is_after_jiffies(host->last_req_ts +
  192. msecs_to_jiffies(2000))) {
  193. spin_unlock_irqrestore(&host->lock, flags);
  194. return;
  195. }
  196. dev_warn(&host->pdev->dev,
  197. "timeout waiting for hardware interrupt (CMD%u)\n",
  198. mrq->cmd->opcode);
  199. if (host->data)
  200. host->data->error = -ETIMEDOUT;
  201. else if (host->cmd)
  202. host->cmd->error = -ETIMEDOUT;
  203. else
  204. mrq->cmd->error = -ETIMEDOUT;
  205. host->cmd = NULL;
  206. host->data = NULL;
  207. host->force_pio = false;
  208. spin_unlock_irqrestore(&host->lock, flags);
  209. tmio_mmc_reset(host);
  210. /* Ready for new calls */
  211. host->mrq = NULL;
  212. mmc_request_done(host->mmc, mrq);
  213. }
  214. /* called with host->lock held, interrupts disabled */
  215. static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
  216. {
  217. struct mmc_request *mrq = host->mrq;
  218. if (!mrq)
  219. return;
  220. host->cmd = NULL;
  221. host->data = NULL;
  222. host->force_pio = false;
  223. cancel_delayed_work(&host->delayed_reset_work);
  224. host->mrq = NULL;
  225. /* FIXME: mmc_request_done() can schedule! */
  226. mmc_request_done(host->mmc, mrq);
  227. }
  228. /* These are the bitmasks the tmio chip requires to implement the MMC response
  229. * types. Note that R1 and R6 are the same in this scheme. */
  230. #define APP_CMD 0x0040
  231. #define RESP_NONE 0x0300
  232. #define RESP_R1 0x0400
  233. #define RESP_R1B 0x0500
  234. #define RESP_R2 0x0600
  235. #define RESP_R3 0x0700
  236. #define DATA_PRESENT 0x0800
  237. #define TRANSFER_READ 0x1000
  238. #define TRANSFER_MULTI 0x2000
  239. #define SECURITY_CMD 0x4000
  240. static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
  241. {
  242. struct mmc_data *data = host->data;
  243. int c = cmd->opcode;
  244. /* Command 12 is handled by hardware */
  245. if (cmd->opcode == 12 && !cmd->arg) {
  246. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
  247. return 0;
  248. }
  249. switch (mmc_resp_type(cmd)) {
  250. case MMC_RSP_NONE: c |= RESP_NONE; break;
  251. case MMC_RSP_R1: c |= RESP_R1; break;
  252. case MMC_RSP_R1B: c |= RESP_R1B; break;
  253. case MMC_RSP_R2: c |= RESP_R2; break;
  254. case MMC_RSP_R3: c |= RESP_R3; break;
  255. default:
  256. pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
  257. return -EINVAL;
  258. }
  259. host->cmd = cmd;
  260. /* FIXME - this seems to be ok commented out but the spec suggest this bit
  261. * should be set when issuing app commands.
  262. * if(cmd->flags & MMC_FLAG_ACMD)
  263. * c |= APP_CMD;
  264. */
  265. if (data) {
  266. c |= DATA_PRESENT;
  267. if (data->blocks > 1) {
  268. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
  269. c |= TRANSFER_MULTI;
  270. }
  271. if (data->flags & MMC_DATA_READ)
  272. c |= TRANSFER_READ;
  273. }
  274. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
  275. /* Fire off the command */
  276. sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
  277. sd_ctrl_write16(host, CTL_SD_CMD, c);
  278. return 0;
  279. }
  280. /*
  281. * This chip always returns (at least?) as much data as you ask for.
  282. * I'm unsure what happens if you ask for less than a block. This should be
  283. * looked into to ensure that a funny length read doesn't hose the controller.
  284. */
  285. static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
  286. {
  287. struct mmc_data *data = host->data;
  288. void *sg_virt;
  289. unsigned short *buf;
  290. unsigned int count;
  291. unsigned long flags;
  292. if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
  293. pr_err("PIO IRQ in DMA mode!\n");
  294. return;
  295. } else if (!data) {
  296. pr_debug("Spurious PIO IRQ\n");
  297. return;
  298. }
  299. sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
  300. buf = (unsigned short *)(sg_virt + host->sg_off);
  301. count = host->sg_ptr->length - host->sg_off;
  302. if (count > data->blksz)
  303. count = data->blksz;
  304. pr_debug("count: %08x offset: %08x flags %08x\n",
  305. count, host->sg_off, data->flags);
  306. /* Transfer the data */
  307. if (data->flags & MMC_DATA_READ)
  308. sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
  309. else
  310. sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
  311. host->sg_off += count;
  312. tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
  313. if (host->sg_off == host->sg_ptr->length)
  314. tmio_mmc_next_sg(host);
  315. return;
  316. }
  317. static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
  318. {
  319. if (host->sg_ptr == &host->bounce_sg) {
  320. unsigned long flags;
  321. void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
  322. memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
  323. tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
  324. }
  325. }
  326. /* needs to be called with host->lock held */
  327. void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
  328. {
  329. struct mmc_data *data = host->data;
  330. struct mmc_command *stop;
  331. host->data = NULL;
  332. if (!data) {
  333. dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
  334. return;
  335. }
  336. stop = data->stop;
  337. /* FIXME - return correct transfer count on errors */
  338. if (!data->error)
  339. data->bytes_xfered = data->blocks * data->blksz;
  340. else
  341. data->bytes_xfered = 0;
  342. pr_debug("Completed data request\n");
  343. /*
  344. * FIXME: other drivers allow an optional stop command of any given type
  345. * which we dont do, as the chip can auto generate them.
  346. * Perhaps we can be smarter about when to use auto CMD12 and
  347. * only issue the auto request when we know this is the desired
  348. * stop command, allowing fallback to the stop command the
  349. * upper layers expect. For now, we do what works.
  350. */
  351. if (data->flags & MMC_DATA_READ) {
  352. if (host->chan_rx && !host->force_pio)
  353. tmio_mmc_check_bounce_buffer(host);
  354. dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
  355. host->mrq);
  356. } else {
  357. dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
  358. host->mrq);
  359. }
  360. if (stop) {
  361. if (stop->opcode == 12 && !stop->arg)
  362. sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
  363. else
  364. BUG();
  365. }
  366. tmio_mmc_finish_request(host);
  367. }
  368. static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
  369. {
  370. struct mmc_data *data;
  371. spin_lock(&host->lock);
  372. data = host->data;
  373. if (!data)
  374. goto out;
  375. if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
  376. /*
  377. * Has all data been written out yet? Testing on SuperH showed,
  378. * that in most cases the first interrupt comes already with the
  379. * BUSY status bit clear, but on some operations, like mount or
  380. * in the beginning of a write / sync / umount, there is one
  381. * DATAEND interrupt with the BUSY bit set, in this cases
  382. * waiting for one more interrupt fixes the problem.
  383. */
  384. if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
  385. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
  386. tasklet_schedule(&host->dma_complete);
  387. }
  388. } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
  389. tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
  390. tasklet_schedule(&host->dma_complete);
  391. } else {
  392. tmio_mmc_do_data_irq(host);
  393. tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
  394. }
  395. out:
  396. spin_unlock(&host->lock);
  397. }
  398. static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
  399. unsigned int stat)
  400. {
  401. struct mmc_command *cmd = host->cmd;
  402. int i, addr;
  403. spin_lock(&host->lock);
  404. if (!host->cmd) {
  405. pr_debug("Spurious CMD irq\n");
  406. goto out;
  407. }
  408. host->cmd = NULL;
  409. /* This controller is sicker than the PXA one. Not only do we need to
  410. * drop the top 8 bits of the first response word, we also need to
  411. * modify the order of the response for short response command types.
  412. */
  413. for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
  414. cmd->resp[i] = sd_ctrl_read32(host, addr);
  415. if (cmd->flags & MMC_RSP_136) {
  416. cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
  417. cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
  418. cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
  419. cmd->resp[3] <<= 8;
  420. } else if (cmd->flags & MMC_RSP_R3) {
  421. cmd->resp[0] = cmd->resp[3];
  422. }
  423. if (stat & TMIO_STAT_CMDTIMEOUT)
  424. cmd->error = -ETIMEDOUT;
  425. else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
  426. cmd->error = -EILSEQ;
  427. /* If there is data to handle we enable data IRQs here, and
  428. * we will ultimatley finish the request in the data_end handler.
  429. * If theres no data or we encountered an error, finish now.
  430. */
  431. if (host->data && !cmd->error) {
  432. if (host->data->flags & MMC_DATA_READ) {
  433. if (host->force_pio || !host->chan_rx)
  434. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
  435. else
  436. tasklet_schedule(&host->dma_issue);
  437. } else {
  438. if (host->force_pio || !host->chan_tx)
  439. tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
  440. else
  441. tasklet_schedule(&host->dma_issue);
  442. }
  443. } else {
  444. tmio_mmc_finish_request(host);
  445. }
  446. out:
  447. spin_unlock(&host->lock);
  448. }
  449. irqreturn_t tmio_mmc_irq(int irq, void *devid)
  450. {
  451. struct tmio_mmc_host *host = devid;
  452. struct tmio_mmc_data *pdata = host->pdata;
  453. unsigned int ireg, irq_mask, status;
  454. unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
  455. pr_debug("MMC IRQ begin\n");
  456. status = sd_ctrl_read32(host, CTL_STATUS);
  457. irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
  458. ireg = status & TMIO_MASK_IRQ & ~irq_mask;
  459. sdio_ireg = 0;
  460. if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
  461. sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
  462. sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
  463. sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
  464. sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
  465. if (sdio_ireg && !host->sdio_irq_enabled) {
  466. pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
  467. sdio_status, sdio_irq_mask, sdio_ireg);
  468. tmio_mmc_enable_sdio_irq(host->mmc, 0);
  469. goto out;
  470. }
  471. if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
  472. sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
  473. mmc_signal_sdio_irq(host->mmc);
  474. if (sdio_ireg)
  475. goto out;
  476. }
  477. pr_debug_status(status);
  478. pr_debug_status(ireg);
  479. if (!ireg) {
  480. tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
  481. pr_warning("tmio_mmc: Spurious irq, disabling! "
  482. "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
  483. pr_debug_status(status);
  484. goto out;
  485. }
  486. while (ireg) {
  487. /* Card insert / remove attempts */
  488. if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
  489. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
  490. TMIO_STAT_CARD_REMOVE);
  491. mmc_detect_change(host->mmc, msecs_to_jiffies(100));
  492. }
  493. /* CRC and other errors */
  494. /* if (ireg & TMIO_STAT_ERR_IRQ)
  495. * handled |= tmio_error_irq(host, irq, stat);
  496. */
  497. /* Command completion */
  498. if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
  499. tmio_mmc_ack_mmc_irqs(host,
  500. TMIO_STAT_CMDRESPEND |
  501. TMIO_STAT_CMDTIMEOUT);
  502. tmio_mmc_cmd_irq(host, status);
  503. }
  504. /* Data transfer */
  505. if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
  506. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
  507. tmio_mmc_pio_irq(host);
  508. }
  509. /* Data transfer completion */
  510. if (ireg & TMIO_STAT_DATAEND) {
  511. tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
  512. tmio_mmc_data_irq(host);
  513. }
  514. /* Check status - keep going until we've handled it all */
  515. status = sd_ctrl_read32(host, CTL_STATUS);
  516. irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
  517. ireg = status & TMIO_MASK_IRQ & ~irq_mask;
  518. pr_debug("Status at end of loop: %08x\n", status);
  519. pr_debug_status(status);
  520. }
  521. pr_debug("MMC IRQ end\n");
  522. out:
  523. return IRQ_HANDLED;
  524. }
  525. EXPORT_SYMBOL(tmio_mmc_irq);
  526. static int tmio_mmc_start_data(struct tmio_mmc_host *host,
  527. struct mmc_data *data)
  528. {
  529. struct tmio_mmc_data *pdata = host->pdata;
  530. pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
  531. data->blksz, data->blocks);
  532. /* Some hardware cannot perform 2 byte requests in 4 bit mode */
  533. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
  534. int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
  535. if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
  536. pr_err("%s: %d byte block unsupported in 4 bit mode\n",
  537. mmc_hostname(host->mmc), data->blksz);
  538. return -EINVAL;
  539. }
  540. }
  541. tmio_mmc_init_sg(host, data);
  542. host->data = data;
  543. /* Set transfer length / blocksize */
  544. sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
  545. sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
  546. tmio_mmc_start_dma(host, data);
  547. return 0;
  548. }
  549. /* Process requests from the MMC layer */
  550. static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  551. {
  552. struct tmio_mmc_host *host = mmc_priv(mmc);
  553. unsigned long flags;
  554. int ret;
  555. spin_lock_irqsave(&host->lock, flags);
  556. if (host->mrq) {
  557. pr_debug("request not null\n");
  558. if (IS_ERR(host->mrq)) {
  559. spin_unlock_irqrestore(&host->lock, flags);
  560. mrq->cmd->error = -EAGAIN;
  561. mmc_request_done(mmc, mrq);
  562. return;
  563. }
  564. }
  565. host->last_req_ts = jiffies;
  566. wmb();
  567. host->mrq = mrq;
  568. spin_unlock_irqrestore(&host->lock, flags);
  569. if (mrq->data) {
  570. ret = tmio_mmc_start_data(host, mrq->data);
  571. if (ret)
  572. goto fail;
  573. }
  574. ret = tmio_mmc_start_command(host, mrq->cmd);
  575. if (!ret) {
  576. schedule_delayed_work(&host->delayed_reset_work,
  577. msecs_to_jiffies(2000));
  578. return;
  579. }
  580. fail:
  581. host->force_pio = false;
  582. host->mrq = NULL;
  583. mrq->cmd->error = ret;
  584. mmc_request_done(mmc, mrq);
  585. }
  586. /* Set MMC clock / power.
  587. * Note: This controller uses a simple divider scheme therefore it cannot
  588. * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
  589. * MMC wont run that fast, it has to be clocked at 12MHz which is the next
  590. * slowest setting.
  591. */
  592. static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  593. {
  594. struct tmio_mmc_host *host = mmc_priv(mmc);
  595. struct tmio_mmc_data *pdata = host->pdata;
  596. unsigned long flags;
  597. spin_lock_irqsave(&host->lock, flags);
  598. if (host->mrq) {
  599. if (IS_ERR(host->mrq)) {
  600. dev_dbg(&host->pdev->dev,
  601. "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
  602. current->comm, task_pid_nr(current),
  603. ios->clock, ios->power_mode);
  604. host->mrq = ERR_PTR(-EINTR);
  605. } else {
  606. dev_dbg(&host->pdev->dev,
  607. "%s.%d: CMD%u active since %lu, now %lu!\n",
  608. current->comm, task_pid_nr(current),
  609. host->mrq->cmd->opcode, host->last_req_ts, jiffies);
  610. }
  611. spin_unlock_irqrestore(&host->lock, flags);
  612. return;
  613. }
  614. host->mrq = ERR_PTR(-EBUSY);
  615. spin_unlock_irqrestore(&host->lock, flags);
  616. if (ios->clock)
  617. tmio_mmc_set_clock(host, ios->clock);
  618. /* Power sequence - OFF -> UP -> ON */
  619. if (ios->power_mode == MMC_POWER_UP) {
  620. if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
  621. pm_runtime_get_sync(&host->pdev->dev);
  622. pdata->power = true;
  623. }
  624. /* power up SD bus */
  625. if (host->set_pwr)
  626. host->set_pwr(host->pdev, 1);
  627. } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
  628. /* power down SD bus */
  629. if (ios->power_mode == MMC_POWER_OFF) {
  630. if (host->set_pwr)
  631. host->set_pwr(host->pdev, 0);
  632. if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
  633. pdata->power) {
  634. pdata->power = false;
  635. pm_runtime_put(&host->pdev->dev);
  636. }
  637. }
  638. tmio_mmc_clk_stop(host);
  639. } else {
  640. /* start bus clock */
  641. tmio_mmc_clk_start(host);
  642. }
  643. switch (ios->bus_width) {
  644. case MMC_BUS_WIDTH_1:
  645. sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
  646. break;
  647. case MMC_BUS_WIDTH_4:
  648. sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
  649. break;
  650. }
  651. /* Let things settle. delay taken from winCE driver */
  652. udelay(140);
  653. if (PTR_ERR(host->mrq) == -EINTR)
  654. dev_dbg(&host->pdev->dev,
  655. "%s.%d: IOS interrupted: clk %u, mode %u",
  656. current->comm, task_pid_nr(current),
  657. ios->clock, ios->power_mode);
  658. host->mrq = NULL;
  659. }
  660. static int tmio_mmc_get_ro(struct mmc_host *mmc)
  661. {
  662. struct tmio_mmc_host *host = mmc_priv(mmc);
  663. struct tmio_mmc_data *pdata = host->pdata;
  664. return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
  665. (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
  666. }
  667. static int tmio_mmc_get_cd(struct mmc_host *mmc)
  668. {
  669. struct tmio_mmc_host *host = mmc_priv(mmc);
  670. struct tmio_mmc_data *pdata = host->pdata;
  671. if (!pdata->get_cd)
  672. return -ENOSYS;
  673. else
  674. return pdata->get_cd(host->pdev);
  675. }
  676. static const struct mmc_host_ops tmio_mmc_ops = {
  677. .request = tmio_mmc_request,
  678. .set_ios = tmio_mmc_set_ios,
  679. .get_ro = tmio_mmc_get_ro,
  680. .get_cd = tmio_mmc_get_cd,
  681. .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
  682. };
  683. int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
  684. struct platform_device *pdev,
  685. struct tmio_mmc_data *pdata)
  686. {
  687. struct tmio_mmc_host *_host;
  688. struct mmc_host *mmc;
  689. struct resource *res_ctl;
  690. int ret;
  691. u32 irq_mask = TMIO_MASK_CMD;
  692. res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  693. if (!res_ctl)
  694. return -EINVAL;
  695. mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
  696. if (!mmc)
  697. return -ENOMEM;
  698. pdata->dev = &pdev->dev;
  699. _host = mmc_priv(mmc);
  700. _host->pdata = pdata;
  701. _host->mmc = mmc;
  702. _host->pdev = pdev;
  703. platform_set_drvdata(pdev, mmc);
  704. _host->set_pwr = pdata->set_pwr;
  705. _host->set_clk_div = pdata->set_clk_div;
  706. /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
  707. _host->bus_shift = resource_size(res_ctl) >> 10;
  708. _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
  709. if (!_host->ctl) {
  710. ret = -ENOMEM;
  711. goto host_free;
  712. }
  713. mmc->ops = &tmio_mmc_ops;
  714. mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
  715. mmc->f_max = pdata->hclk;
  716. mmc->f_min = mmc->f_max / 512;
  717. mmc->max_segs = 32;
  718. mmc->max_blk_size = 512;
  719. mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
  720. mmc->max_segs;
  721. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  722. mmc->max_seg_size = mmc->max_req_size;
  723. if (pdata->ocr_mask)
  724. mmc->ocr_avail = pdata->ocr_mask;
  725. else
  726. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  727. pdata->power = false;
  728. pm_runtime_enable(&pdev->dev);
  729. ret = pm_runtime_resume(&pdev->dev);
  730. if (ret < 0)
  731. goto pm_disable;
  732. tmio_mmc_clk_stop(_host);
  733. tmio_mmc_reset(_host);
  734. tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
  735. if (pdata->flags & TMIO_MMC_SDIO_IRQ)
  736. tmio_mmc_enable_sdio_irq(mmc, 0);
  737. spin_lock_init(&_host->lock);
  738. /* Init delayed work for request timeouts */
  739. INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
  740. /* See if we also get DMA */
  741. tmio_mmc_request_dma(_host, pdata);
  742. /* We have to keep the device powered for its card detection to work */
  743. if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
  744. pm_runtime_get_noresume(&pdev->dev);
  745. mmc_add_host(mmc);
  746. /* Unmask the IRQs we want to know about */
  747. if (!_host->chan_rx)
  748. irq_mask |= TMIO_MASK_READOP;
  749. if (!_host->chan_tx)
  750. irq_mask |= TMIO_MASK_WRITEOP;
  751. tmio_mmc_enable_mmc_irqs(_host, irq_mask);
  752. *host = _host;
  753. return 0;
  754. pm_disable:
  755. pm_runtime_disable(&pdev->dev);
  756. iounmap(_host->ctl);
  757. host_free:
  758. mmc_free_host(mmc);
  759. return ret;
  760. }
  761. EXPORT_SYMBOL(tmio_mmc_host_probe);
  762. void tmio_mmc_host_remove(struct tmio_mmc_host *host)
  763. {
  764. struct platform_device *pdev = host->pdev;
  765. /*
  766. * We don't have to manipulate pdata->power here: if there is a card in
  767. * the slot, the runtime PM is active and our .runtime_resume() will not
  768. * be run. If there is no card in the slot and the platform can suspend
  769. * the controller, the runtime PM is suspended and pdata->power == false,
  770. * so, our .runtime_resume() will not try to detect a card in the slot.
  771. */
  772. if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
  773. pm_runtime_get_sync(&pdev->dev);
  774. mmc_remove_host(host->mmc);
  775. cancel_delayed_work_sync(&host->delayed_reset_work);
  776. tmio_mmc_release_dma(host);
  777. pm_runtime_put_sync(&pdev->dev);
  778. pm_runtime_disable(&pdev->dev);
  779. iounmap(host->ctl);
  780. mmc_free_host(host->mmc);
  781. }
  782. EXPORT_SYMBOL(tmio_mmc_host_remove);
  783. #ifdef CONFIG_PM
  784. int tmio_mmc_host_suspend(struct device *dev)
  785. {
  786. struct mmc_host *mmc = dev_get_drvdata(dev);
  787. struct tmio_mmc_host *host = mmc_priv(mmc);
  788. int ret = mmc_suspend_host(mmc);
  789. if (!ret)
  790. tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
  791. host->pm_error = pm_runtime_put_sync(dev);
  792. return ret;
  793. }
  794. EXPORT_SYMBOL(tmio_mmc_host_suspend);
  795. int tmio_mmc_host_resume(struct device *dev)
  796. {
  797. struct mmc_host *mmc = dev_get_drvdata(dev);
  798. struct tmio_mmc_host *host = mmc_priv(mmc);
  799. /* The MMC core will perform the complete set up */
  800. host->pdata->power = false;
  801. if (!host->pm_error)
  802. pm_runtime_get_sync(dev);
  803. tmio_mmc_reset(mmc_priv(mmc));
  804. tmio_mmc_request_dma(host, host->pdata);
  805. return mmc_resume_host(mmc);
  806. }
  807. EXPORT_SYMBOL(tmio_mmc_host_resume);
  808. #endif /* CONFIG_PM */
  809. int tmio_mmc_host_runtime_suspend(struct device *dev)
  810. {
  811. return 0;
  812. }
  813. EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
  814. int tmio_mmc_host_runtime_resume(struct device *dev)
  815. {
  816. struct mmc_host *mmc = dev_get_drvdata(dev);
  817. struct tmio_mmc_host *host = mmc_priv(mmc);
  818. struct tmio_mmc_data *pdata = host->pdata;
  819. tmio_mmc_reset(host);
  820. if (pdata->power) {
  821. /* Only entered after a card-insert interrupt */
  822. tmio_mmc_set_ios(mmc, &mmc->ios);
  823. mmc_detect_change(mmc, msecs_to_jiffies(100));
  824. }
  825. return 0;
  826. }
  827. EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
  828. MODULE_LICENSE("GPL v2");