msm_sdcc.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297
  1. /*
  2. * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
  3. *
  4. * Copyright (C) 2007 Google Inc,
  5. * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Based on mmci.c
  12. *
  13. * Author: San Mehat (san@android.com)
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/init.h>
  19. #include <linux/ioport.h>
  20. #include <linux/device.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/highmem.h>
  25. #include <linux/log2.h>
  26. #include <linux/mmc/host.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/mmc/sdio.h>
  29. #include <linux/clk.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/debugfs.h>
  34. #include <linux/io.h>
  35. #include <linux/memory.h>
  36. #include <asm/cacheflush.h>
  37. #include <asm/div64.h>
  38. #include <asm/sizes.h>
  39. #include <mach/mmc.h>
  40. #include <mach/msm_iomap.h>
  41. #include <mach/dma.h>
  42. #include "msm_sdcc.h"
  43. #define DRIVER_NAME "msm-sdcc"
  44. static unsigned int msmsdcc_fmin = 144000;
  45. static unsigned int msmsdcc_fmax = 50000000;
  46. static unsigned int msmsdcc_4bit = 1;
  47. static unsigned int msmsdcc_pwrsave = 1;
  48. static unsigned int msmsdcc_piopoll = 1;
  49. static unsigned int msmsdcc_sdioirq;
  50. #define PIO_SPINMAX 30
  51. #define CMD_SPINMAX 20
  52. static void
  53. msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
  54. u32 c);
  55. static void
  56. msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
  57. {
  58. writel(0, host->base + MMCICOMMAND);
  59. BUG_ON(host->curr.data);
  60. host->curr.mrq = NULL;
  61. host->curr.cmd = NULL;
  62. if (mrq->data)
  63. mrq->data->bytes_xfered = host->curr.data_xfered;
  64. if (mrq->cmd->error == -ETIMEDOUT)
  65. mdelay(5);
  66. /*
  67. * Need to drop the host lock here; mmc_request_done may call
  68. * back into the driver...
  69. */
  70. spin_unlock(&host->lock);
  71. mmc_request_done(host->mmc, mrq);
  72. spin_lock(&host->lock);
  73. }
  74. static void
  75. msmsdcc_stop_data(struct msmsdcc_host *host)
  76. {
  77. writel(0, host->base + MMCIDATACTRL);
  78. host->curr.data = NULL;
  79. host->curr.got_dataend = host->curr.got_datablkend = 0;
  80. }
  81. uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
  82. {
  83. switch (host->pdev_id) {
  84. case 1:
  85. return MSM_SDC1_PHYS + MMCIFIFO;
  86. case 2:
  87. return MSM_SDC2_PHYS + MMCIFIFO;
  88. case 3:
  89. return MSM_SDC3_PHYS + MMCIFIFO;
  90. case 4:
  91. return MSM_SDC4_PHYS + MMCIFIFO;
  92. }
  93. BUG();
  94. return 0;
  95. }
  96. static void
  97. msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
  98. unsigned int result,
  99. struct msm_dmov_errdata *err)
  100. {
  101. struct msmsdcc_dma_data *dma_data =
  102. container_of(cmd, struct msmsdcc_dma_data, hdr);
  103. struct msmsdcc_host *host = dma_data->host;
  104. unsigned long flags;
  105. struct mmc_request *mrq;
  106. spin_lock_irqsave(&host->lock, flags);
  107. mrq = host->curr.mrq;
  108. BUG_ON(!mrq);
  109. if (!(result & DMOV_RSLT_VALID)) {
  110. pr_err("msmsdcc: Invalid DataMover result\n");
  111. goto out;
  112. }
  113. if (result & DMOV_RSLT_DONE) {
  114. host->curr.data_xfered = host->curr.xfer_size;
  115. } else {
  116. /* Error or flush */
  117. if (result & DMOV_RSLT_ERROR)
  118. pr_err("%s: DMA error (0x%.8x)\n",
  119. mmc_hostname(host->mmc), result);
  120. if (result & DMOV_RSLT_FLUSH)
  121. pr_err("%s: DMA channel flushed (0x%.8x)\n",
  122. mmc_hostname(host->mmc), result);
  123. if (err)
  124. pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
  125. err->flush[0], err->flush[1], err->flush[2],
  126. err->flush[3], err->flush[4], err->flush[5]);
  127. if (!mrq->data->error)
  128. mrq->data->error = -EIO;
  129. }
  130. host->dma.busy = 0;
  131. dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
  132. host->dma.dir);
  133. if (host->curr.user_pages) {
  134. struct scatterlist *sg = host->dma.sg;
  135. int i;
  136. for (i = 0; i < host->dma.num_ents; i++)
  137. flush_dcache_page(sg_page(sg++));
  138. }
  139. host->dma.sg = NULL;
  140. if ((host->curr.got_dataend && host->curr.got_datablkend)
  141. || mrq->data->error) {
  142. /*
  143. * If we've already gotten our DATAEND / DATABLKEND
  144. * for this request, then complete it through here.
  145. */
  146. msmsdcc_stop_data(host);
  147. if (!mrq->data->error)
  148. host->curr.data_xfered = host->curr.xfer_size;
  149. if (!mrq->data->stop || mrq->cmd->error) {
  150. writel(0, host->base + MMCICOMMAND);
  151. host->curr.mrq = NULL;
  152. host->curr.cmd = NULL;
  153. mrq->data->bytes_xfered = host->curr.data_xfered;
  154. spin_unlock_irqrestore(&host->lock, flags);
  155. mmc_request_done(host->mmc, mrq);
  156. return;
  157. } else
  158. msmsdcc_start_command(host, mrq->data->stop, 0);
  159. }
  160. out:
  161. spin_unlock_irqrestore(&host->lock, flags);
  162. return;
  163. }
  164. static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
  165. {
  166. if (host->dma.channel == -1)
  167. return -ENOENT;
  168. if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
  169. return -EINVAL;
  170. if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
  171. return -EINVAL;
  172. return 0;
  173. }
  174. static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
  175. {
  176. struct msmsdcc_nc_dmadata *nc;
  177. dmov_box *box;
  178. uint32_t rows;
  179. uint32_t crci;
  180. unsigned int n;
  181. int i, rc;
  182. struct scatterlist *sg = data->sg;
  183. rc = validate_dma(host, data);
  184. if (rc)
  185. return rc;
  186. host->dma.sg = data->sg;
  187. host->dma.num_ents = data->sg_len;
  188. nc = host->dma.nc;
  189. switch (host->pdev_id) {
  190. case 1:
  191. crci = MSMSDCC_CRCI_SDC1;
  192. break;
  193. case 2:
  194. crci = MSMSDCC_CRCI_SDC2;
  195. break;
  196. case 3:
  197. crci = MSMSDCC_CRCI_SDC3;
  198. break;
  199. case 4:
  200. crci = MSMSDCC_CRCI_SDC4;
  201. break;
  202. default:
  203. host->dma.sg = NULL;
  204. host->dma.num_ents = 0;
  205. return -ENOENT;
  206. }
  207. if (data->flags & MMC_DATA_READ)
  208. host->dma.dir = DMA_FROM_DEVICE;
  209. else
  210. host->dma.dir = DMA_TO_DEVICE;
  211. host->curr.user_pages = 0;
  212. n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
  213. host->dma.num_ents, host->dma.dir);
  214. if (n != host->dma.num_ents) {
  215. pr_err("%s: Unable to map in all sg elements\n",
  216. mmc_hostname(host->mmc));
  217. host->dma.sg = NULL;
  218. host->dma.num_ents = 0;
  219. return -ENOMEM;
  220. }
  221. box = &nc->cmd[0];
  222. for (i = 0; i < host->dma.num_ents; i++) {
  223. box->cmd = CMD_MODE_BOX;
  224. if (i == (host->dma.num_ents - 1))
  225. box->cmd |= CMD_LC;
  226. rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
  227. (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
  228. (sg_dma_len(sg) / MCI_FIFOSIZE) ;
  229. if (data->flags & MMC_DATA_READ) {
  230. box->src_row_addr = msmsdcc_fifo_addr(host);
  231. box->dst_row_addr = sg_dma_address(sg);
  232. box->src_dst_len = (MCI_FIFOSIZE << 16) |
  233. (MCI_FIFOSIZE);
  234. box->row_offset = MCI_FIFOSIZE;
  235. box->num_rows = rows * ((1 << 16) + 1);
  236. box->cmd |= CMD_SRC_CRCI(crci);
  237. } else {
  238. box->src_row_addr = sg_dma_address(sg);
  239. box->dst_row_addr = msmsdcc_fifo_addr(host);
  240. box->src_dst_len = (MCI_FIFOSIZE << 16) |
  241. (MCI_FIFOSIZE);
  242. box->row_offset = (MCI_FIFOSIZE << 16);
  243. box->num_rows = rows * ((1 << 16) + 1);
  244. box->cmd |= CMD_DST_CRCI(crci);
  245. }
  246. box++;
  247. sg++;
  248. }
  249. /* location of command block must be 64 bit aligned */
  250. BUG_ON(host->dma.cmd_busaddr & 0x07);
  251. nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
  252. host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
  253. DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
  254. host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
  255. host->dma.hdr.execute_func = NULL;
  256. return 0;
  257. }
  258. static void
  259. msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
  260. {
  261. unsigned int datactrl, timeout;
  262. unsigned long long clks;
  263. void __iomem *base = host->base;
  264. unsigned int pio_irqmask = 0;
  265. host->curr.data = data;
  266. host->curr.xfer_size = data->blksz * data->blocks;
  267. host->curr.xfer_remain = host->curr.xfer_size;
  268. host->curr.data_xfered = 0;
  269. host->curr.got_dataend = 0;
  270. host->curr.got_datablkend = 0;
  271. memset(&host->pio, 0, sizeof(host->pio));
  272. clks = (unsigned long long)data->timeout_ns * host->clk_rate;
  273. do_div(clks, NSEC_PER_SEC);
  274. timeout = data->timeout_clks + (unsigned int)clks;
  275. writel(timeout, base + MMCIDATATIMER);
  276. writel(host->curr.xfer_size, base + MMCIDATALENGTH);
  277. datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
  278. if (!msmsdcc_config_dma(host, data))
  279. datactrl |= MCI_DPSM_DMAENABLE;
  280. else {
  281. host->pio.sg = data->sg;
  282. host->pio.sg_len = data->sg_len;
  283. host->pio.sg_off = 0;
  284. if (data->flags & MMC_DATA_READ) {
  285. pio_irqmask = MCI_RXFIFOHALFFULLMASK;
  286. if (host->curr.xfer_remain < MCI_FIFOSIZE)
  287. pio_irqmask |= MCI_RXDATAAVLBLMASK;
  288. } else
  289. pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
  290. }
  291. if (data->flags & MMC_DATA_READ)
  292. datactrl |= MCI_DPSM_DIRECTION;
  293. writel(pio_irqmask, base + MMCIMASK1);
  294. writel(datactrl, base + MMCIDATACTRL);
  295. if (datactrl & MCI_DPSM_DMAENABLE) {
  296. host->dma.busy = 1;
  297. msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
  298. }
  299. }
  300. static int
  301. snoop_cccr_abort(struct mmc_command *cmd)
  302. {
  303. if ((cmd->opcode == 52) &&
  304. (cmd->arg & 0x80000000) &&
  305. (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
  306. return 1;
  307. return 0;
  308. }
  309. static void
  310. msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
  311. {
  312. void __iomem *base = host->base;
  313. if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
  314. writel(0, base + MMCICOMMAND);
  315. udelay(2 + ((5 * 1000000) / host->clk_rate));
  316. }
  317. c |= cmd->opcode | MCI_CPSM_ENABLE;
  318. if (cmd->flags & MMC_RSP_PRESENT) {
  319. if (cmd->flags & MMC_RSP_136)
  320. c |= MCI_CPSM_LONGRSP;
  321. c |= MCI_CPSM_RESPONSE;
  322. }
  323. if (cmd->opcode == 17 || cmd->opcode == 18 ||
  324. cmd->opcode == 24 || cmd->opcode == 25 ||
  325. cmd->opcode == 53)
  326. c |= MCI_CSPM_DATCMD;
  327. if (cmd == cmd->mrq->stop)
  328. c |= MCI_CSPM_MCIABORT;
  329. if (snoop_cccr_abort(cmd))
  330. c |= MCI_CSPM_MCIABORT;
  331. host->curr.cmd = cmd;
  332. host->stats.cmds++;
  333. writel(cmd->arg, base + MMCIARGUMENT);
  334. writel(c, base + MMCICOMMAND);
  335. }
  336. static void
  337. msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
  338. unsigned int status)
  339. {
  340. if (status & MCI_DATACRCFAIL) {
  341. pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
  342. pr_err("%s: opcode 0x%.8x\n", __func__,
  343. data->mrq->cmd->opcode);
  344. pr_err("%s: blksz %d, blocks %d\n", __func__,
  345. data->blksz, data->blocks);
  346. data->error = -EILSEQ;
  347. } else if (status & MCI_DATATIMEOUT) {
  348. pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
  349. data->error = -ETIMEDOUT;
  350. } else if (status & MCI_RXOVERRUN) {
  351. pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
  352. data->error = -EIO;
  353. } else if (status & MCI_TXUNDERRUN) {
  354. pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
  355. data->error = -EIO;
  356. } else {
  357. pr_err("%s: Unknown error (0x%.8x)\n",
  358. mmc_hostname(host->mmc), status);
  359. data->error = -EIO;
  360. }
  361. }
  362. static int
  363. msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
  364. {
  365. void __iomem *base = host->base;
  366. uint32_t *ptr = (uint32_t *) buffer;
  367. int count = 0;
  368. while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
  369. *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
  370. ptr++;
  371. count += sizeof(uint32_t);
  372. remain -= sizeof(uint32_t);
  373. if (remain == 0)
  374. break;
  375. }
  376. return count;
  377. }
  378. static int
  379. msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
  380. unsigned int remain, u32 status)
  381. {
  382. void __iomem *base = host->base;
  383. char *ptr = buffer;
  384. do {
  385. unsigned int count, maxcnt;
  386. maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
  387. MCI_FIFOHALFSIZE;
  388. count = min(remain, maxcnt);
  389. writesl(base + MMCIFIFO, ptr, count >> 2);
  390. ptr += count;
  391. remain -= count;
  392. if (remain == 0)
  393. break;
  394. status = readl(base + MMCISTATUS);
  395. } while (status & MCI_TXFIFOHALFEMPTY);
  396. return ptr - buffer;
  397. }
  398. static int
  399. msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
  400. {
  401. while (maxspin) {
  402. if ((readl(host->base + MMCISTATUS) & mask))
  403. return 0;
  404. udelay(1);
  405. --maxspin;
  406. }
  407. return -ETIMEDOUT;
  408. }
  409. static int
  410. msmsdcc_pio_irq(int irq, void *dev_id)
  411. {
  412. struct msmsdcc_host *host = dev_id;
  413. void __iomem *base = host->base;
  414. uint32_t status;
  415. status = readl(base + MMCISTATUS);
  416. do {
  417. unsigned long flags;
  418. unsigned int remain, len;
  419. char *buffer;
  420. if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
  421. if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
  422. break;
  423. if (msmsdcc_spin_on_status(host,
  424. (MCI_TXFIFOHALFEMPTY |
  425. MCI_RXDATAAVLBL),
  426. PIO_SPINMAX)) {
  427. break;
  428. }
  429. }
  430. /* Map the current scatter buffer */
  431. local_irq_save(flags);
  432. buffer = kmap_atomic(sg_page(host->pio.sg),
  433. KM_BIO_SRC_IRQ) + host->pio.sg->offset;
  434. buffer += host->pio.sg_off;
  435. remain = host->pio.sg->length - host->pio.sg_off;
  436. len = 0;
  437. if (status & MCI_RXACTIVE)
  438. len = msmsdcc_pio_read(host, buffer, remain);
  439. if (status & MCI_TXACTIVE)
  440. len = msmsdcc_pio_write(host, buffer, remain, status);
  441. /* Unmap the buffer */
  442. kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
  443. local_irq_restore(flags);
  444. host->pio.sg_off += len;
  445. host->curr.xfer_remain -= len;
  446. host->curr.data_xfered += len;
  447. remain -= len;
  448. if (remain == 0) {
  449. /* This sg page is full - do some housekeeping */
  450. if (status & MCI_RXACTIVE && host->curr.user_pages)
  451. flush_dcache_page(sg_page(host->pio.sg));
  452. if (!--host->pio.sg_len) {
  453. memset(&host->pio, 0, sizeof(host->pio));
  454. break;
  455. }
  456. /* Advance to next sg */
  457. host->pio.sg++;
  458. host->pio.sg_off = 0;
  459. }
  460. status = readl(base + MMCISTATUS);
  461. } while (1);
  462. if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
  463. writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
  464. if (!host->curr.xfer_remain)
  465. writel(0, base + MMCIMASK1);
  466. return IRQ_HANDLED;
  467. }
  468. static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
  469. {
  470. struct mmc_command *cmd = host->curr.cmd;
  471. void __iomem *base = host->base;
  472. host->curr.cmd = NULL;
  473. cmd->resp[0] = readl(base + MMCIRESPONSE0);
  474. cmd->resp[1] = readl(base + MMCIRESPONSE1);
  475. cmd->resp[2] = readl(base + MMCIRESPONSE2);
  476. cmd->resp[3] = readl(base + MMCIRESPONSE3);
  477. del_timer(&host->command_timer);
  478. if (status & MCI_CMDTIMEOUT) {
  479. cmd->error = -ETIMEDOUT;
  480. } else if (status & MCI_CMDCRCFAIL &&
  481. cmd->flags & MMC_RSP_CRC) {
  482. pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
  483. cmd->error = -EILSEQ;
  484. }
  485. if (!cmd->data || cmd->error) {
  486. if (host->curr.data && host->dma.sg)
  487. msm_dmov_stop_cmd(host->dma.channel,
  488. &host->dma.hdr, 0);
  489. else if (host->curr.data) { /* Non DMA */
  490. msmsdcc_stop_data(host);
  491. msmsdcc_request_end(host, cmd->mrq);
  492. } else /* host->data == NULL */
  493. msmsdcc_request_end(host, cmd->mrq);
  494. } else if (!(cmd->data->flags & MMC_DATA_READ))
  495. msmsdcc_start_data(host, cmd->data);
  496. }
  497. static void
  498. msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
  499. void __iomem *base)
  500. {
  501. struct mmc_data *data = host->curr.data;
  502. if (!data)
  503. return;
  504. /* Check for data errors */
  505. if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
  506. MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
  507. msmsdcc_data_err(host, data, status);
  508. host->curr.data_xfered = 0;
  509. if (host->dma.sg)
  510. msm_dmov_stop_cmd(host->dma.channel,
  511. &host->dma.hdr, 0);
  512. else {
  513. msmsdcc_stop_data(host);
  514. if (!data->stop)
  515. msmsdcc_request_end(host, data->mrq);
  516. else
  517. msmsdcc_start_command(host, data->stop, 0);
  518. }
  519. }
  520. /* Check for data done */
  521. if (!host->curr.got_dataend && (status & MCI_DATAEND))
  522. host->curr.got_dataend = 1;
  523. if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
  524. host->curr.got_datablkend = 1;
  525. /*
  526. * If DMA is still in progress, we complete via the completion handler
  527. */
  528. if (host->curr.got_dataend && host->curr.got_datablkend &&
  529. !host->dma.busy) {
  530. /*
  531. * There appears to be an issue in the controller where
  532. * if you request a small block transfer (< fifo size),
  533. * you may get your DATAEND/DATABLKEND irq without the
  534. * PIO data irq.
  535. *
  536. * Check to see if there is still data to be read,
  537. * and simulate a PIO irq.
  538. */
  539. if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
  540. msmsdcc_pio_irq(1, host);
  541. msmsdcc_stop_data(host);
  542. if (!data->error)
  543. host->curr.data_xfered = host->curr.xfer_size;
  544. if (!data->stop)
  545. msmsdcc_request_end(host, data->mrq);
  546. else
  547. msmsdcc_start_command(host, data->stop, 0);
  548. }
  549. }
  550. static irqreturn_t
  551. msmsdcc_irq(int irq, void *dev_id)
  552. {
  553. struct msmsdcc_host *host = dev_id;
  554. void __iomem *base = host->base;
  555. u32 status;
  556. int ret = 0;
  557. int cardint = 0;
  558. spin_lock(&host->lock);
  559. do {
  560. status = readl(base + MMCISTATUS);
  561. status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
  562. writel(status, base + MMCICLEAR);
  563. msmsdcc_handle_irq_data(host, status, base);
  564. if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
  565. MCI_CMDTIMEOUT) && host->curr.cmd) {
  566. msmsdcc_do_cmdirq(host, status);
  567. }
  568. if (status & MCI_SDIOINTOPER) {
  569. cardint = 1;
  570. status &= ~MCI_SDIOINTOPER;
  571. }
  572. ret = 1;
  573. } while (status);
  574. spin_unlock(&host->lock);
  575. /*
  576. * We have to delay handling the card interrupt as it calls
  577. * back into the driver.
  578. */
  579. if (cardint)
  580. mmc_signal_sdio_irq(host->mmc);
  581. return IRQ_RETVAL(ret);
  582. }
  583. static void
  584. msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  585. {
  586. struct msmsdcc_host *host = mmc_priv(mmc);
  587. unsigned long flags;
  588. WARN_ON(host->curr.mrq != NULL);
  589. WARN_ON(host->pwr == 0);
  590. spin_lock_irqsave(&host->lock, flags);
  591. host->stats.reqs++;
  592. if (host->eject) {
  593. if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
  594. mrq->cmd->error = 0;
  595. mrq->data->bytes_xfered = mrq->data->blksz *
  596. mrq->data->blocks;
  597. } else
  598. mrq->cmd->error = -ENOMEDIUM;
  599. spin_unlock_irqrestore(&host->lock, flags);
  600. mmc_request_done(mmc, mrq);
  601. return;
  602. }
  603. host->curr.mrq = mrq;
  604. if (mrq->data && mrq->data->flags & MMC_DATA_READ)
  605. msmsdcc_start_data(host, mrq->data);
  606. msmsdcc_start_command(host, mrq->cmd, 0);
  607. if (host->cmdpoll && !msmsdcc_spin_on_status(host,
  608. MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
  609. CMD_SPINMAX)) {
  610. uint32_t status = readl(host->base + MMCISTATUS);
  611. msmsdcc_do_cmdirq(host, status);
  612. writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
  613. host->base + MMCICLEAR);
  614. host->stats.cmdpoll_hits++;
  615. } else {
  616. host->stats.cmdpoll_misses++;
  617. mod_timer(&host->command_timer, jiffies + HZ);
  618. }
  619. spin_unlock_irqrestore(&host->lock, flags);
  620. }
  621. static int inline
  622. msmsdcc_enable_clocks(struct msmsdcc_host *host, int enable)
  623. {
  624. int rc;
  625. if (enable) {
  626. rc = clk_enable(host->pclk);
  627. if (rc)
  628. return rc;
  629. rc = clk_enable(host->clk);
  630. if (rc) {
  631. clk_disable(host->pclk);
  632. return rc;
  633. }
  634. host->clks_on = 1;
  635. udelay(10);
  636. } else {
  637. clk_disable(host->clk);
  638. clk_disable(host->pclk);
  639. host->clks_on = 0;
  640. }
  641. return 0;
  642. }
  643. static void
  644. msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  645. {
  646. struct msmsdcc_host *host = mmc_priv(mmc);
  647. u32 clk = 0, pwr = 0;
  648. int rc;
  649. unsigned long flags;
  650. spin_lock_irqsave(&host->lock, flags);
  651. if (ios->clock) {
  652. if (!host->clks_on)
  653. msmsdcc_enable_clocks(host, 1);
  654. if (ios->clock != host->clk_rate) {
  655. rc = clk_set_rate(host->clk, ios->clock);
  656. if (rc < 0)
  657. pr_err("%s: Error setting clock rate (%d)\n",
  658. mmc_hostname(host->mmc), rc);
  659. else
  660. host->clk_rate = ios->clock;
  661. }
  662. clk |= MCI_CLK_ENABLE;
  663. }
  664. if (ios->bus_width == MMC_BUS_WIDTH_4)
  665. clk |= (2 << 10); /* Set WIDEBUS */
  666. if (ios->clock > 400000 && msmsdcc_pwrsave)
  667. clk |= (1 << 9); /* PWRSAVE */
  668. clk |= (1 << 12); /* FLOW_ENA */
  669. clk |= (1 << 15); /* feedback clock */
  670. if (host->plat->translate_vdd)
  671. pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
  672. switch (ios->power_mode) {
  673. case MMC_POWER_OFF:
  674. break;
  675. case MMC_POWER_UP:
  676. pwr |= MCI_PWR_UP;
  677. break;
  678. case MMC_POWER_ON:
  679. pwr |= MCI_PWR_ON;
  680. break;
  681. }
  682. if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
  683. pwr |= MCI_OD;
  684. writel(clk, host->base + MMCICLOCK);
  685. if (host->pwr != pwr) {
  686. host->pwr = pwr;
  687. writel(pwr, host->base + MMCIPOWER);
  688. }
  689. if (!(clk & MCI_CLK_ENABLE) && host->clks_on)
  690. msmsdcc_enable_clocks(host, 0);
  691. spin_unlock_irqrestore(&host->lock, flags);
  692. }
  693. static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  694. {
  695. struct msmsdcc_host *host = mmc_priv(mmc);
  696. unsigned long flags;
  697. u32 status;
  698. spin_lock_irqsave(&host->lock, flags);
  699. if (msmsdcc_sdioirq == 1) {
  700. status = readl(host->base + MMCIMASK0);
  701. if (enable)
  702. status |= MCI_SDIOINTOPERMASK;
  703. else
  704. status &= ~MCI_SDIOINTOPERMASK;
  705. host->saved_irq0mask = status;
  706. writel(status, host->base + MMCIMASK0);
  707. }
  708. spin_unlock_irqrestore(&host->lock, flags);
  709. }
  710. static const struct mmc_host_ops msmsdcc_ops = {
  711. .request = msmsdcc_request,
  712. .set_ios = msmsdcc_set_ios,
  713. .enable_sdio_irq = msmsdcc_enable_sdio_irq,
  714. };
  715. static void
  716. msmsdcc_check_status(unsigned long data)
  717. {
  718. struct msmsdcc_host *host = (struct msmsdcc_host *)data;
  719. unsigned int status;
  720. if (!host->plat->status) {
  721. mmc_detect_change(host->mmc, 0);
  722. goto out;
  723. }
  724. status = host->plat->status(mmc_dev(host->mmc));
  725. host->eject = !status;
  726. if (status ^ host->oldstat) {
  727. pr_info("%s: Slot status change detected (%d -> %d)\n",
  728. mmc_hostname(host->mmc), host->oldstat, status);
  729. if (status)
  730. mmc_detect_change(host->mmc, (5 * HZ) / 2);
  731. else
  732. mmc_detect_change(host->mmc, 0);
  733. }
  734. host->oldstat = status;
  735. out:
  736. if (host->timer.function)
  737. mod_timer(&host->timer, jiffies + HZ);
  738. }
  739. static irqreturn_t
  740. msmsdcc_platform_status_irq(int irq, void *dev_id)
  741. {
  742. struct msmsdcc_host *host = dev_id;
  743. printk(KERN_DEBUG "%s: %d\n", __func__, irq);
  744. msmsdcc_check_status((unsigned long) host);
  745. return IRQ_HANDLED;
  746. }
  747. static void
  748. msmsdcc_status_notify_cb(int card_present, void *dev_id)
  749. {
  750. struct msmsdcc_host *host = dev_id;
  751. printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
  752. card_present);
  753. msmsdcc_check_status((unsigned long) host);
  754. }
  755. /*
  756. * called when a command expires.
  757. * Dump some debugging, and then error
  758. * out the transaction.
  759. */
  760. static void
  761. msmsdcc_command_expired(unsigned long _data)
  762. {
  763. struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
  764. struct mmc_request *mrq;
  765. unsigned long flags;
  766. spin_lock_irqsave(&host->lock, flags);
  767. mrq = host->curr.mrq;
  768. if (!mrq) {
  769. pr_info("%s: Command expiry misfire\n",
  770. mmc_hostname(host->mmc));
  771. spin_unlock_irqrestore(&host->lock, flags);
  772. return;
  773. }
  774. pr_err("%s: Command timeout (%p %p %p %p)\n",
  775. mmc_hostname(host->mmc), mrq, mrq->cmd,
  776. mrq->data, host->dma.sg);
  777. mrq->cmd->error = -ETIMEDOUT;
  778. msmsdcc_stop_data(host);
  779. writel(0, host->base + MMCICOMMAND);
  780. host->curr.mrq = NULL;
  781. host->curr.cmd = NULL;
  782. spin_unlock_irqrestore(&host->lock, flags);
  783. mmc_request_done(host->mmc, mrq);
  784. }
  785. static int
  786. msmsdcc_init_dma(struct msmsdcc_host *host)
  787. {
  788. memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
  789. host->dma.host = host;
  790. host->dma.channel = -1;
  791. if (!host->dmares)
  792. return -ENODEV;
  793. host->dma.nc = dma_alloc_coherent(NULL,
  794. sizeof(struct msmsdcc_nc_dmadata),
  795. &host->dma.nc_busaddr,
  796. GFP_KERNEL);
  797. if (host->dma.nc == NULL) {
  798. pr_err("Unable to allocate DMA buffer\n");
  799. return -ENOMEM;
  800. }
  801. memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
  802. host->dma.cmd_busaddr = host->dma.nc_busaddr;
  803. host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
  804. offsetof(struct msmsdcc_nc_dmadata, cmdptr);
  805. host->dma.channel = host->dmares->start;
  806. return 0;
  807. }
  808. #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
  809. static void
  810. do_resume_work(struct work_struct *work)
  811. {
  812. struct msmsdcc_host *host =
  813. container_of(work, struct msmsdcc_host, resume_task);
  814. struct mmc_host *mmc = host->mmc;
  815. if (mmc) {
  816. mmc_resume_host(mmc);
  817. if (host->stat_irq)
  818. enable_irq(host->stat_irq);
  819. }
  820. }
  821. #endif
  822. static int
  823. msmsdcc_probe(struct platform_device *pdev)
  824. {
  825. struct mmc_platform_data *plat = pdev->dev.platform_data;
  826. struct msmsdcc_host *host;
  827. struct mmc_host *mmc;
  828. struct resource *cmd_irqres = NULL;
  829. struct resource *pio_irqres = NULL;
  830. struct resource *stat_irqres = NULL;
  831. struct resource *memres = NULL;
  832. struct resource *dmares = NULL;
  833. int ret;
  834. /* must have platform data */
  835. if (!plat) {
  836. pr_err("%s: Platform data not available\n", __func__);
  837. ret = -EINVAL;
  838. goto out;
  839. }
  840. if (pdev->id < 1 || pdev->id > 4)
  841. return -EINVAL;
  842. if (pdev->resource == NULL || pdev->num_resources < 2) {
  843. pr_err("%s: Invalid resource\n", __func__);
  844. return -ENXIO;
  845. }
  846. memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  847. dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  848. cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  849. "cmd_irq");
  850. pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  851. "pio_irq");
  852. stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  853. "status_irq");
  854. if (!cmd_irqres || !pio_irqres || !memres) {
  855. pr_err("%s: Invalid resource\n", __func__);
  856. return -ENXIO;
  857. }
  858. /*
  859. * Setup our host structure
  860. */
  861. mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
  862. if (!mmc) {
  863. ret = -ENOMEM;
  864. goto out;
  865. }
  866. host = mmc_priv(mmc);
  867. host->pdev_id = pdev->id;
  868. host->plat = plat;
  869. host->mmc = mmc;
  870. host->cmdpoll = 1;
  871. host->base = ioremap(memres->start, PAGE_SIZE);
  872. if (!host->base) {
  873. ret = -ENOMEM;
  874. goto out;
  875. }
  876. host->cmd_irqres = cmd_irqres;
  877. host->pio_irqres = pio_irqres;
  878. host->memres = memres;
  879. host->dmares = dmares;
  880. spin_lock_init(&host->lock);
  881. /*
  882. * Setup DMA
  883. */
  884. msmsdcc_init_dma(host);
  885. /* Get our clocks */
  886. host->pclk = clk_get(&pdev->dev, "sdc_pclk");
  887. if (IS_ERR(host->pclk)) {
  888. ret = PTR_ERR(host->pclk);
  889. goto host_free;
  890. }
  891. host->clk = clk_get(&pdev->dev, "sdc_clk");
  892. if (IS_ERR(host->clk)) {
  893. ret = PTR_ERR(host->clk);
  894. goto pclk_put;
  895. }
  896. /* Enable clocks */
  897. ret = msmsdcc_enable_clocks(host, 1);
  898. if (ret)
  899. goto clk_put;
  900. ret = clk_set_rate(host->clk, msmsdcc_fmin);
  901. if (ret) {
  902. pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
  903. goto clk_disable;
  904. }
  905. host->pclk_rate = clk_get_rate(host->pclk);
  906. host->clk_rate = clk_get_rate(host->clk);
  907. /*
  908. * Setup MMC host structure
  909. */
  910. mmc->ops = &msmsdcc_ops;
  911. mmc->f_min = msmsdcc_fmin;
  912. mmc->f_max = msmsdcc_fmax;
  913. mmc->ocr_avail = plat->ocr_mask;
  914. if (msmsdcc_4bit)
  915. mmc->caps |= MMC_CAP_4_BIT_DATA;
  916. if (msmsdcc_sdioirq)
  917. mmc->caps |= MMC_CAP_SDIO_IRQ;
  918. mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
  919. mmc->max_phys_segs = NR_SG;
  920. mmc->max_hw_segs = NR_SG;
  921. mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
  922. mmc->max_blk_count = 65536;
  923. mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
  924. mmc->max_seg_size = mmc->max_req_size;
  925. writel(0, host->base + MMCIMASK0);
  926. writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
  927. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  928. host->saved_irq0mask = MCI_IRQENABLE;
  929. /*
  930. * Setup card detect change
  931. */
  932. memset(&host->timer, 0, sizeof(host->timer));
  933. if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
  934. unsigned long irqflags = IRQF_SHARED |
  935. (stat_irqres->flags & IRQF_TRIGGER_MASK);
  936. host->stat_irq = stat_irqres->start;
  937. ret = request_irq(host->stat_irq,
  938. msmsdcc_platform_status_irq,
  939. irqflags,
  940. DRIVER_NAME " (slot)",
  941. host);
  942. if (ret) {
  943. pr_err("%s: Unable to get slot IRQ %d (%d)\n",
  944. mmc_hostname(mmc), host->stat_irq, ret);
  945. goto clk_disable;
  946. }
  947. } else if (plat->register_status_notify) {
  948. plat->register_status_notify(msmsdcc_status_notify_cb, host);
  949. } else if (!plat->status)
  950. pr_err("%s: No card detect facilities available\n",
  951. mmc_hostname(mmc));
  952. else {
  953. init_timer(&host->timer);
  954. host->timer.data = (unsigned long)host;
  955. host->timer.function = msmsdcc_check_status;
  956. host->timer.expires = jiffies + HZ;
  957. add_timer(&host->timer);
  958. }
  959. if (plat->status) {
  960. host->oldstat = host->plat->status(mmc_dev(host->mmc));
  961. host->eject = !host->oldstat;
  962. }
  963. /*
  964. * Setup a command timer. We currently need this due to
  965. * some 'strange' timeout / error handling situations.
  966. */
  967. init_timer(&host->command_timer);
  968. host->command_timer.data = (unsigned long) host;
  969. host->command_timer.function = msmsdcc_command_expired;
  970. ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
  971. DRIVER_NAME " (cmd)", host);
  972. if (ret)
  973. goto stat_irq_free;
  974. ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
  975. DRIVER_NAME " (pio)", host);
  976. if (ret)
  977. goto cmd_irq_free;
  978. mmc_set_drvdata(pdev, mmc);
  979. mmc_add_host(mmc);
  980. pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
  981. mmc_hostname(mmc), (unsigned long long)memres->start,
  982. (unsigned int) cmd_irqres->start,
  983. (unsigned int) host->stat_irq, host->dma.channel);
  984. pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
  985. (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
  986. pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
  987. mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
  988. pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
  989. pr_info("%s: Power save feature enable = %d\n",
  990. mmc_hostname(mmc), msmsdcc_pwrsave);
  991. if (host->dma.channel != -1) {
  992. pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
  993. mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
  994. pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
  995. mmc_hostname(mmc), host->dma.cmd_busaddr,
  996. host->dma.cmdptr_busaddr);
  997. } else
  998. pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
  999. if (host->timer.function)
  1000. pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
  1001. return 0;
  1002. cmd_irq_free:
  1003. free_irq(cmd_irqres->start, host);
  1004. stat_irq_free:
  1005. if (host->stat_irq)
  1006. free_irq(host->stat_irq, host);
  1007. clk_disable:
  1008. msmsdcc_enable_clocks(host, 0);
  1009. clk_put:
  1010. clk_put(host->clk);
  1011. pclk_put:
  1012. clk_put(host->pclk);
  1013. host_free:
  1014. mmc_free_host(mmc);
  1015. out:
  1016. return ret;
  1017. }
  1018. static int
  1019. msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
  1020. {
  1021. struct mmc_host *mmc = mmc_get_drvdata(dev);
  1022. int rc = 0;
  1023. if (mmc) {
  1024. struct msmsdcc_host *host = mmc_priv(mmc);
  1025. if (host->stat_irq)
  1026. disable_irq(host->stat_irq);
  1027. if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
  1028. rc = mmc_suspend_host(mmc, state);
  1029. if (!rc) {
  1030. writel(0, host->base + MMCIMASK0);
  1031. if (host->clks_on)
  1032. msmsdcc_enable_clocks(host, 0);
  1033. }
  1034. }
  1035. return rc;
  1036. }
  1037. static int
  1038. msmsdcc_resume(struct platform_device *dev)
  1039. {
  1040. struct mmc_host *mmc = mmc_get_drvdata(dev);
  1041. unsigned long flags;
  1042. if (mmc) {
  1043. struct msmsdcc_host *host = mmc_priv(mmc);
  1044. spin_lock_irqsave(&host->lock, flags);
  1045. if (!host->clks_on)
  1046. msmsdcc_enable_clocks(host, 1);
  1047. writel(host->saved_irq0mask, host->base + MMCIMASK0);
  1048. spin_unlock_irqrestore(&host->lock, flags);
  1049. if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
  1050. mmc_resume_host(mmc);
  1051. if (host->stat_irq)
  1052. enable_irq(host->stat_irq);
  1053. }
  1054. return 0;
  1055. }
  1056. static struct platform_driver msmsdcc_driver = {
  1057. .probe = msmsdcc_probe,
  1058. .suspend = msmsdcc_suspend,
  1059. .resume = msmsdcc_resume,
  1060. .driver = {
  1061. .name = "msm_sdcc",
  1062. },
  1063. };
  1064. static int __init msmsdcc_init(void)
  1065. {
  1066. return platform_driver_register(&msmsdcc_driver);
  1067. }
  1068. static void __exit msmsdcc_exit(void)
  1069. {
  1070. platform_driver_unregister(&msmsdcc_driver);
  1071. }
  1072. module_init(msmsdcc_init);
  1073. module_exit(msmsdcc_exit);
  1074. MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
  1075. MODULE_LICENSE("GPL");