msm_sdcc.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. /*
  2. * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
  3. *
  4. * Copyright (C) 2007 Google Inc,
  5. * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Based on mmci.c
  12. *
  13. * Author: San Mehat (san@android.com)
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/init.h>
  19. #include <linux/ioport.h>
  20. #include <linux/device.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/highmem.h>
  25. #include <linux/log2.h>
  26. #include <linux/mmc/host.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/clk.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/io.h>
  34. #include <linux/memory.h>
  35. #include <asm/cacheflush.h>
  36. #include <asm/div64.h>
  37. #include <asm/sizes.h>
  38. #include <asm/mach/mmc.h>
  39. #include <mach/msm_iomap.h>
  40. #include <mach/dma.h>
  41. #include <mach/htc_pwrsink.h>
  42. #include "msm_sdcc.h"
  43. #define DRIVER_NAME "msm-sdcc"
  44. static unsigned int msmsdcc_fmin = 144000;
  45. static unsigned int msmsdcc_fmax = 50000000;
  46. static unsigned int msmsdcc_4bit = 1;
  47. static unsigned int msmsdcc_pwrsave = 1;
  48. static unsigned int msmsdcc_piopoll = 1;
  49. static unsigned int msmsdcc_sdioirq;
  50. #define PIO_SPINMAX 30
  51. #define CMD_SPINMAX 20
  52. static void
  53. msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
  54. u32 c);
  55. static void
  56. msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
  57. {
  58. writel(0, host->base + MMCICOMMAND);
  59. BUG_ON(host->curr.data);
  60. host->curr.mrq = NULL;
  61. host->curr.cmd = NULL;
  62. if (mrq->data)
  63. mrq->data->bytes_xfered = host->curr.data_xfered;
  64. if (mrq->cmd->error == -ETIMEDOUT)
  65. mdelay(5);
  66. /*
  67. * Need to drop the host lock here; mmc_request_done may call
  68. * back into the driver...
  69. */
  70. spin_unlock(&host->lock);
  71. mmc_request_done(host->mmc, mrq);
  72. spin_lock(&host->lock);
  73. }
  74. static void
  75. msmsdcc_stop_data(struct msmsdcc_host *host)
  76. {
  77. writel(0, host->base + MMCIDATACTRL);
  78. host->curr.data = NULL;
  79. host->curr.got_dataend = host->curr.got_datablkend = 0;
  80. }
  81. uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
  82. {
  83. if (host->pdev_id == 1)
  84. return MSM_SDC1_PHYS + MMCIFIFO;
  85. else if (host->pdev_id == 2)
  86. return MSM_SDC2_PHYS + MMCIFIFO;
  87. else if (host->pdev_id == 3)
  88. return MSM_SDC3_PHYS + MMCIFIFO;
  89. else if (host->pdev_id == 4)
  90. return MSM_SDC4_PHYS + MMCIFIFO;
  91. else
  92. BUG();
  93. return 0;
  94. }
  95. static void
  96. msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
  97. unsigned int result,
  98. struct msm_dmov_errdata *err)
  99. {
  100. struct msmsdcc_dma_data *dma_data =
  101. container_of(cmd, struct msmsdcc_dma_data, hdr);
  102. struct msmsdcc_host *host = dma_data->host;
  103. unsigned long flags;
  104. struct mmc_request *mrq;
  105. spin_lock_irqsave(&host->lock, flags);
  106. mrq = host->curr.mrq;
  107. BUG_ON(!mrq);
  108. if (!(result & DMOV_RSLT_VALID)) {
  109. pr_err("msmsdcc: Invalid DataMover result\n");
  110. goto out;
  111. }
  112. if (result & DMOV_RSLT_DONE) {
  113. host->curr.data_xfered = host->curr.xfer_size;
  114. } else {
  115. /* Error or flush */
  116. if (result & DMOV_RSLT_ERROR)
  117. pr_err("%s: DMA error (0x%.8x)\n",
  118. mmc_hostname(host->mmc), result);
  119. if (result & DMOV_RSLT_FLUSH)
  120. pr_err("%s: DMA channel flushed (0x%.8x)\n",
  121. mmc_hostname(host->mmc), result);
  122. if (err)
  123. pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
  124. err->flush[0], err->flush[1], err->flush[2],
  125. err->flush[3], err->flush[4], err->flush[5]);
  126. if (!mrq->data->error)
  127. mrq->data->error = -EIO;
  128. }
  129. host->dma.busy = 0;
  130. dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
  131. host->dma.dir);
  132. if (host->curr.user_pages) {
  133. struct scatterlist *sg = host->dma.sg;
  134. int i;
  135. for (i = 0; i < host->dma.num_ents; i++, sg++)
  136. flush_dcache_page(sg_page(sg));
  137. }
  138. host->dma.sg = NULL;
  139. if ((host->curr.got_dataend && host->curr.got_datablkend)
  140. || mrq->data->error) {
  141. /*
  142. * If we've already gotten our DATAEND / DATABLKEND
  143. * for this request, then complete it through here.
  144. */
  145. msmsdcc_stop_data(host);
  146. if (!mrq->data->error)
  147. host->curr.data_xfered = host->curr.xfer_size;
  148. if (!mrq->data->stop || mrq->cmd->error) {
  149. writel(0, host->base + MMCICOMMAND);
  150. host->curr.mrq = NULL;
  151. host->curr.cmd = NULL;
  152. mrq->data->bytes_xfered = host->curr.data_xfered;
  153. spin_unlock_irqrestore(&host->lock, flags);
  154. mmc_request_done(host->mmc, mrq);
  155. return;
  156. } else
  157. msmsdcc_start_command(host, mrq->data->stop, 0);
  158. }
  159. out:
  160. spin_unlock_irqrestore(&host->lock, flags);
  161. return;
  162. }
  163. static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
  164. {
  165. if (host->dma.channel == -1)
  166. return -ENOENT;
  167. if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
  168. return -EINVAL;
  169. if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
  170. return -EINVAL;
  171. return 0;
  172. }
  173. static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
  174. {
  175. struct msmsdcc_nc_dmadata *nc;
  176. dmov_box *box;
  177. uint32_t rows;
  178. uint32_t crci;
  179. unsigned int n;
  180. int i, rc;
  181. struct scatterlist *sg = data->sg;
  182. rc = validate_dma(host, data);
  183. if (rc)
  184. return rc;
  185. host->dma.sg = data->sg;
  186. host->dma.num_ents = data->sg_len;
  187. nc = host->dma.nc;
  188. if (host->pdev_id == 1)
  189. crci = MSMSDCC_CRCI_SDC1;
  190. else if (host->pdev_id == 2)
  191. crci = MSMSDCC_CRCI_SDC2;
  192. else if (host->pdev_id == 3)
  193. crci = MSMSDCC_CRCI_SDC3;
  194. else if (host->pdev_id == 4)
  195. crci = MSMSDCC_CRCI_SDC4;
  196. else {
  197. host->dma.sg = NULL;
  198. host->dma.num_ents = 0;
  199. return -ENOENT;
  200. }
  201. if (data->flags & MMC_DATA_READ)
  202. host->dma.dir = DMA_FROM_DEVICE;
  203. else
  204. host->dma.dir = DMA_TO_DEVICE;
  205. host->curr.user_pages = 0;
  206. n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
  207. host->dma.num_ents, host->dma.dir);
  208. if (n != host->dma.num_ents) {
  209. pr_err("%s: Unable to map in all sg elements\n",
  210. mmc_hostname(host->mmc));
  211. host->dma.sg = NULL;
  212. host->dma.num_ents = 0;
  213. return -ENOMEM;
  214. }
  215. box = &nc->cmd[0];
  216. for (i = 0; i < host->dma.num_ents; i++) {
  217. box->cmd = CMD_MODE_BOX;
  218. if (i == (host->dma.num_ents - 1))
  219. box->cmd |= CMD_LC;
  220. rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
  221. (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
  222. (sg_dma_len(sg) / MCI_FIFOSIZE) ;
  223. if (data->flags & MMC_DATA_READ) {
  224. box->src_row_addr = msmsdcc_fifo_addr(host);
  225. box->dst_row_addr = sg_dma_address(sg);
  226. box->src_dst_len = (MCI_FIFOSIZE << 16) |
  227. (MCI_FIFOSIZE);
  228. box->row_offset = MCI_FIFOSIZE;
  229. box->num_rows = rows * ((1 << 16) + 1);
  230. box->cmd |= CMD_SRC_CRCI(crci);
  231. } else {
  232. box->src_row_addr = sg_dma_address(sg);
  233. box->dst_row_addr = msmsdcc_fifo_addr(host);
  234. box->src_dst_len = (MCI_FIFOSIZE << 16) |
  235. (MCI_FIFOSIZE);
  236. box->row_offset = (MCI_FIFOSIZE << 16);
  237. box->num_rows = rows * ((1 << 16) + 1);
  238. box->cmd |= CMD_DST_CRCI(crci);
  239. }
  240. box++;
  241. sg++;
  242. }
  243. /* location of command block must be 64 bit aligned */
  244. BUG_ON(host->dma.cmd_busaddr & 0x07);
  245. nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
  246. host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
  247. DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
  248. host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
  249. return 0;
  250. }
  251. static void
  252. msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
  253. {
  254. unsigned int datactrl, timeout;
  255. unsigned long long clks;
  256. void __iomem *base = host->base;
  257. unsigned int pio_irqmask = 0;
  258. host->curr.data = data;
  259. host->curr.xfer_size = data->blksz * data->blocks;
  260. host->curr.xfer_remain = host->curr.xfer_size;
  261. host->curr.data_xfered = 0;
  262. host->curr.got_dataend = 0;
  263. host->curr.got_datablkend = 0;
  264. memset(&host->pio, 0, sizeof(host->pio));
  265. clks = (unsigned long long)data->timeout_ns * host->clk_rate;
  266. do_div(clks, 1000000000UL);
  267. timeout = data->timeout_clks + (unsigned int)clks;
  268. writel(timeout, base + MMCIDATATIMER);
  269. writel(host->curr.xfer_size, base + MMCIDATALENGTH);
  270. datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
  271. if (!msmsdcc_config_dma(host, data))
  272. datactrl |= MCI_DPSM_DMAENABLE;
  273. else {
  274. host->pio.sg = data->sg;
  275. host->pio.sg_len = data->sg_len;
  276. host->pio.sg_off = 0;
  277. if (data->flags & MMC_DATA_READ) {
  278. pio_irqmask = MCI_RXFIFOHALFFULLMASK;
  279. if (host->curr.xfer_remain < MCI_FIFOSIZE)
  280. pio_irqmask |= MCI_RXDATAAVLBLMASK;
  281. } else
  282. pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
  283. }
  284. if (data->flags & MMC_DATA_READ)
  285. datactrl |= MCI_DPSM_DIRECTION;
  286. writel(pio_irqmask, base + MMCIMASK1);
  287. writel(datactrl, base + MMCIDATACTRL);
  288. if (datactrl & MCI_DPSM_DMAENABLE) {
  289. host->dma.busy = 1;
  290. msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
  291. }
  292. }
  293. static void
  294. msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
  295. {
  296. void __iomem *base = host->base;
  297. if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
  298. writel(0, base + MMCICOMMAND);
  299. udelay(2 + ((5 * 1000000) / host->clk_rate));
  300. }
  301. c |= cmd->opcode | MCI_CPSM_ENABLE;
  302. if (cmd->flags & MMC_RSP_PRESENT) {
  303. if (cmd->flags & MMC_RSP_136)
  304. c |= MCI_CPSM_LONGRSP;
  305. c |= MCI_CPSM_RESPONSE;
  306. }
  307. if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
  308. ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
  309. (cmd->opcode == 53))
  310. c |= MCI_CSPM_DATCMD;
  311. if (cmd == cmd->mrq->stop)
  312. c |= MCI_CSPM_MCIABORT;
  313. host->curr.cmd = cmd;
  314. host->stats.cmds++;
  315. writel(cmd->arg, base + MMCIARGUMENT);
  316. writel(c, base + MMCICOMMAND);
  317. }
  318. static void
  319. msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
  320. unsigned int status)
  321. {
  322. if (status & MCI_DATACRCFAIL) {
  323. pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
  324. pr_err("%s: opcode 0x%.8x\n", __func__,
  325. data->mrq->cmd->opcode);
  326. pr_err("%s: blksz %d, blocks %d\n", __func__,
  327. data->blksz, data->blocks);
  328. data->error = -EILSEQ;
  329. } else if (status & MCI_DATATIMEOUT) {
  330. pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
  331. data->error = -ETIMEDOUT;
  332. } else if (status & MCI_RXOVERRUN) {
  333. pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
  334. data->error = -EIO;
  335. } else if (status & MCI_TXUNDERRUN) {
  336. pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
  337. data->error = -EIO;
  338. } else {
  339. pr_err("%s: Unknown error (0x%.8x)\n",
  340. mmc_hostname(host->mmc), status);
  341. data->error = -EIO;
  342. }
  343. }
  344. static int
  345. msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
  346. {
  347. void __iomem *base = host->base;
  348. uint32_t *ptr = (uint32_t *) buffer;
  349. int count = 0;
  350. while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
  351. *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
  352. ptr++;
  353. count += sizeof(uint32_t);
  354. remain -= sizeof(uint32_t);
  355. if (remain == 0)
  356. break;
  357. }
  358. return count;
  359. }
  360. static int
  361. msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
  362. unsigned int remain, u32 status)
  363. {
  364. void __iomem *base = host->base;
  365. char *ptr = buffer;
  366. do {
  367. unsigned int count, maxcnt;
  368. maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
  369. MCI_FIFOHALFSIZE;
  370. count = min(remain, maxcnt);
  371. writesl(base + MMCIFIFO, ptr, count >> 2);
  372. ptr += count;
  373. remain -= count;
  374. if (remain == 0)
  375. break;
  376. status = readl(base + MMCISTATUS);
  377. } while (status & MCI_TXFIFOHALFEMPTY);
  378. return ptr - buffer;
  379. }
  380. static int
  381. msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
  382. {
  383. while (maxspin) {
  384. if ((readl(host->base + MMCISTATUS) & mask))
  385. return 0;
  386. udelay(1);
  387. --maxspin;
  388. }
  389. return -ETIMEDOUT;
  390. }
  391. static int
  392. msmsdcc_pio_irq(int irq, void *dev_id)
  393. {
  394. struct msmsdcc_host *host = dev_id;
  395. void __iomem *base = host->base;
  396. uint32_t status;
  397. status = readl(base + MMCISTATUS);
  398. do {
  399. unsigned long flags;
  400. unsigned int remain, len;
  401. char *buffer;
  402. if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
  403. if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
  404. break;
  405. if (msmsdcc_spin_on_status(host,
  406. (MCI_TXFIFOHALFEMPTY |
  407. MCI_RXDATAAVLBL),
  408. PIO_SPINMAX)) {
  409. break;
  410. }
  411. }
  412. /* Map the current scatter buffer */
  413. local_irq_save(flags);
  414. buffer = kmap_atomic(sg_page(host->pio.sg),
  415. KM_BIO_SRC_IRQ) + host->pio.sg->offset;
  416. buffer += host->pio.sg_off;
  417. remain = host->pio.sg->length - host->pio.sg_off;
  418. len = 0;
  419. if (status & MCI_RXACTIVE)
  420. len = msmsdcc_pio_read(host, buffer, remain);
  421. if (status & MCI_TXACTIVE)
  422. len = msmsdcc_pio_write(host, buffer, remain, status);
  423. /* Unmap the buffer */
  424. kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
  425. local_irq_restore(flags);
  426. host->pio.sg_off += len;
  427. host->curr.xfer_remain -= len;
  428. host->curr.data_xfered += len;
  429. remain -= len;
  430. if (remain == 0) {
  431. /* This sg page is full - do some housekeeping */
  432. if (status & MCI_RXACTIVE && host->curr.user_pages)
  433. flush_dcache_page(sg_page(host->pio.sg));
  434. if (!--host->pio.sg_len) {
  435. memset(&host->pio, 0, sizeof(host->pio));
  436. break;
  437. }
  438. /* Advance to next sg */
  439. host->pio.sg++;
  440. host->pio.sg_off = 0;
  441. }
  442. status = readl(base + MMCISTATUS);
  443. } while (1);
  444. if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
  445. writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
  446. if (!host->curr.xfer_remain)
  447. writel(0, base + MMCIMASK1);
  448. return IRQ_HANDLED;
  449. }
  450. static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
  451. {
  452. struct mmc_command *cmd = host->curr.cmd;
  453. void __iomem *base = host->base;
  454. host->curr.cmd = NULL;
  455. cmd->resp[0] = readl(base + MMCIRESPONSE0);
  456. cmd->resp[1] = readl(base + MMCIRESPONSE1);
  457. cmd->resp[2] = readl(base + MMCIRESPONSE2);
  458. cmd->resp[3] = readl(base + MMCIRESPONSE3);
  459. del_timer(&host->command_timer);
  460. if (status & MCI_CMDTIMEOUT) {
  461. cmd->error = -ETIMEDOUT;
  462. } else if (status & MCI_CMDCRCFAIL &&
  463. cmd->flags & MMC_RSP_CRC) {
  464. pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
  465. cmd->error = -EILSEQ;
  466. }
  467. if (!cmd->data || cmd->error) {
  468. if (host->curr.data && host->dma.sg)
  469. msm_dmov_stop_cmd(host->dma.channel,
  470. &host->dma.hdr, 0);
  471. else if (host->curr.data) { /* Non DMA */
  472. msmsdcc_stop_data(host);
  473. msmsdcc_request_end(host, cmd->mrq);
  474. } else /* host->data == NULL */
  475. msmsdcc_request_end(host, cmd->mrq);
  476. } else if (!(cmd->data->flags & MMC_DATA_READ))
  477. msmsdcc_start_data(host, cmd->data);
  478. }
  479. static irqreturn_t
  480. msmsdcc_irq(int irq, void *dev_id)
  481. {
  482. struct msmsdcc_host *host = dev_id;
  483. void __iomem *base = host->base;
  484. u32 status;
  485. int ret = 0;
  486. int cardint = 0;
  487. spin_lock(&host->lock);
  488. do {
  489. struct mmc_data *data;
  490. status = readl(base + MMCISTATUS);
  491. status &= (readl(base + MMCIMASK0) |
  492. MCI_DATABLOCKENDMASK);
  493. writel(status, base + MMCICLEAR);
  494. data = host->curr.data;
  495. if (data) {
  496. /* Check for data errors */
  497. if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|
  498. MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
  499. msmsdcc_data_err(host, data, status);
  500. host->curr.data_xfered = 0;
  501. if (host->dma.sg)
  502. msm_dmov_stop_cmd(host->dma.channel,
  503. &host->dma.hdr, 0);
  504. else {
  505. msmsdcc_stop_data(host);
  506. if (!data->stop)
  507. msmsdcc_request_end(host,
  508. data->mrq);
  509. else
  510. msmsdcc_start_command(host,
  511. data->stop,
  512. 0);
  513. }
  514. }
  515. /* Check for data done */
  516. if (!host->curr.got_dataend && (status & MCI_DATAEND))
  517. host->curr.got_dataend = 1;
  518. if (!host->curr.got_datablkend &&
  519. (status & MCI_DATABLOCKEND)) {
  520. host->curr.got_datablkend = 1;
  521. }
  522. if (host->curr.got_dataend &&
  523. host->curr.got_datablkend) {
  524. /*
  525. * If DMA is still in progress, we complete
  526. * via the completion handler
  527. */
  528. if (!host->dma.busy) {
  529. /*
  530. * There appears to be an issue in the
  531. * controller where if you request a
  532. * small block transfer (< fifo size),
  533. * you may get your DATAEND/DATABLKEND
  534. * irq without the PIO data irq.
  535. *
  536. * Check to see if theres still data
  537. * to be read, and simulate a PIO irq.
  538. */
  539. if (readl(base + MMCISTATUS) &
  540. MCI_RXDATAAVLBL)
  541. msmsdcc_pio_irq(1, host);
  542. msmsdcc_stop_data(host);
  543. if (!data->error)
  544. host->curr.data_xfered =
  545. host->curr.xfer_size;
  546. if (!data->stop)
  547. msmsdcc_request_end(host,
  548. data->mrq);
  549. else
  550. msmsdcc_start_command(host,
  551. data->stop, 0);
  552. }
  553. }
  554. }
  555. if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
  556. MCI_CMDTIMEOUT) && host->curr.cmd) {
  557. msmsdcc_do_cmdirq(host, status);
  558. }
  559. if (status & MCI_SDIOINTOPER) {
  560. cardint = 1;
  561. status &= ~MCI_SDIOINTOPER;
  562. }
  563. ret = 1;
  564. } while (status);
  565. spin_unlock(&host->lock);
  566. /*
  567. * We have to delay handling the card interrupt as it calls
  568. * back into the driver.
  569. */
  570. if (cardint)
  571. mmc_signal_sdio_irq(host->mmc);
  572. return IRQ_RETVAL(ret);
  573. }
  574. static void
  575. msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  576. {
  577. struct msmsdcc_host *host = mmc_priv(mmc);
  578. unsigned long flags;
  579. WARN_ON(host->curr.mrq != NULL);
  580. WARN_ON(host->pwr == 0);
  581. spin_lock_irqsave(&host->lock, flags);
  582. host->stats.reqs++;
  583. if (host->eject) {
  584. if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
  585. mrq->cmd->error = 0;
  586. mrq->data->bytes_xfered = mrq->data->blksz *
  587. mrq->data->blocks;
  588. } else
  589. mrq->cmd->error = -ENOMEDIUM;
  590. spin_unlock_irqrestore(&host->lock, flags);
  591. mmc_request_done(mmc, mrq);
  592. return;
  593. }
  594. host->curr.mrq = mrq;
  595. if (mrq->data && mrq->data->flags & MMC_DATA_READ)
  596. msmsdcc_start_data(host, mrq->data);
  597. msmsdcc_start_command(host, mrq->cmd, 0);
  598. if (host->cmdpoll && !msmsdcc_spin_on_status(host,
  599. MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
  600. CMD_SPINMAX)) {
  601. uint32_t status = readl(host->base + MMCISTATUS);
  602. msmsdcc_do_cmdirq(host, status);
  603. writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
  604. host->base + MMCICLEAR);
  605. host->stats.cmdpoll_hits++;
  606. } else {
  607. host->stats.cmdpoll_misses++;
  608. mod_timer(&host->command_timer, jiffies + HZ);
  609. }
  610. spin_unlock_irqrestore(&host->lock, flags);
  611. }
  612. static void
  613. msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  614. {
  615. struct msmsdcc_host *host = mmc_priv(mmc);
  616. u32 clk = 0, pwr = 0;
  617. int rc;
  618. if (ios->clock) {
  619. if (!host->clks_on) {
  620. clk_enable(host->pclk);
  621. clk_enable(host->clk);
  622. host->clks_on = 1;
  623. }
  624. if (ios->clock != host->clk_rate) {
  625. rc = clk_set_rate(host->clk, ios->clock);
  626. if (rc < 0)
  627. pr_err("%s: Error setting clock rate (%d)\n",
  628. mmc_hostname(host->mmc), rc);
  629. else
  630. host->clk_rate = ios->clock;
  631. }
  632. clk |= MCI_CLK_ENABLE;
  633. }
  634. if (ios->bus_width == MMC_BUS_WIDTH_4)
  635. clk |= (2 << 10); /* Set WIDEBUS */
  636. if (ios->clock > 400000 && msmsdcc_pwrsave)
  637. clk |= (1 << 9); /* PWRSAVE */
  638. clk |= (1 << 12); /* FLOW_ENA */
  639. clk |= (1 << 15); /* feedback clock */
  640. if (host->plat->translate_vdd)
  641. pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
  642. switch (ios->power_mode) {
  643. case MMC_POWER_OFF:
  644. htc_pwrsink_set(PWRSINK_SDCARD, 0);
  645. break;
  646. case MMC_POWER_UP:
  647. pwr |= MCI_PWR_UP;
  648. break;
  649. case MMC_POWER_ON:
  650. htc_pwrsink_set(PWRSINK_SDCARD, 100);
  651. pwr |= MCI_PWR_ON;
  652. break;
  653. }
  654. if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
  655. pwr |= MCI_OD;
  656. writel(clk, host->base + MMCICLOCK);
  657. if (host->pwr != pwr) {
  658. host->pwr = pwr;
  659. writel(pwr, host->base + MMCIPOWER);
  660. }
  661. if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
  662. clk_disable(host->clk);
  663. clk_disable(host->pclk);
  664. host->clks_on = 0;
  665. }
  666. }
  667. static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  668. {
  669. struct msmsdcc_host *host = mmc_priv(mmc);
  670. unsigned long flags;
  671. u32 status;
  672. spin_lock_irqsave(&host->lock, flags);
  673. if (msmsdcc_sdioirq == 1) {
  674. status = readl(host->base + MMCIMASK0);
  675. if (enable)
  676. status |= MCI_SDIOINTOPERMASK;
  677. else
  678. status &= ~MCI_SDIOINTOPERMASK;
  679. host->saved_irq0mask = status;
  680. writel(status, host->base + MMCIMASK0);
  681. }
  682. spin_unlock_irqrestore(&host->lock, flags);
  683. }
  684. static const struct mmc_host_ops msmsdcc_ops = {
  685. .request = msmsdcc_request,
  686. .set_ios = msmsdcc_set_ios,
  687. .enable_sdio_irq = msmsdcc_enable_sdio_irq,
  688. };
  689. static void
  690. msmsdcc_check_status(unsigned long data)
  691. {
  692. struct msmsdcc_host *host = (struct msmsdcc_host *)data;
  693. unsigned int status;
  694. if (!host->plat->status) {
  695. mmc_detect_change(host->mmc, 0);
  696. goto out;
  697. }
  698. status = host->plat->status(mmc_dev(host->mmc));
  699. host->eject = !status;
  700. if (status ^ host->oldstat) {
  701. pr_info("%s: Slot status change detected (%d -> %d)\n",
  702. mmc_hostname(host->mmc), host->oldstat, status);
  703. if (status)
  704. mmc_detect_change(host->mmc, (5 * HZ) / 2);
  705. else
  706. mmc_detect_change(host->mmc, 0);
  707. }
  708. host->oldstat = status;
  709. out:
  710. if (host->timer.function)
  711. mod_timer(&host->timer, jiffies + HZ);
  712. }
  713. static irqreturn_t
  714. msmsdcc_platform_status_irq(int irq, void *dev_id)
  715. {
  716. struct msmsdcc_host *host = dev_id;
  717. printk(KERN_DEBUG "%s: %d\n", __func__, irq);
  718. msmsdcc_check_status((unsigned long) host);
  719. return IRQ_HANDLED;
  720. }
  721. static void
  722. msmsdcc_status_notify_cb(int card_present, void *dev_id)
  723. {
  724. struct msmsdcc_host *host = dev_id;
  725. printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
  726. card_present);
  727. msmsdcc_check_status((unsigned long) host);
  728. }
  729. /*
  730. * called when a command expires.
  731. * Dump some debugging, and then error
  732. * out the transaction.
  733. */
  734. static void
  735. msmsdcc_command_expired(unsigned long _data)
  736. {
  737. struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
  738. struct mmc_request *mrq;
  739. unsigned long flags;
  740. spin_lock_irqsave(&host->lock, flags);
  741. mrq = host->curr.mrq;
  742. if (!mrq) {
  743. pr_info("%s: Command expiry misfire\n",
  744. mmc_hostname(host->mmc));
  745. spin_unlock_irqrestore(&host->lock, flags);
  746. return;
  747. }
  748. pr_err("%s: Command timeout (%p %p %p %p)\n",
  749. mmc_hostname(host->mmc), mrq, mrq->cmd,
  750. mrq->data, host->dma.sg);
  751. mrq->cmd->error = -ETIMEDOUT;
  752. msmsdcc_stop_data(host);
  753. writel(0, host->base + MMCICOMMAND);
  754. host->curr.mrq = NULL;
  755. host->curr.cmd = NULL;
  756. spin_unlock_irqrestore(&host->lock, flags);
  757. mmc_request_done(host->mmc, mrq);
  758. }
  759. static int
  760. msmsdcc_init_dma(struct msmsdcc_host *host)
  761. {
  762. memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
  763. host->dma.host = host;
  764. host->dma.channel = -1;
  765. if (!host->dmares)
  766. return -ENODEV;
  767. host->dma.nc = dma_alloc_coherent(NULL,
  768. sizeof(struct msmsdcc_nc_dmadata),
  769. &host->dma.nc_busaddr,
  770. GFP_KERNEL);
  771. if (host->dma.nc == NULL) {
  772. pr_err("Unable to allocate DMA buffer\n");
  773. return -ENOMEM;
  774. }
  775. memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
  776. host->dma.cmd_busaddr = host->dma.nc_busaddr;
  777. host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
  778. offsetof(struct msmsdcc_nc_dmadata, cmdptr);
  779. host->dma.channel = host->dmares->start;
  780. return 0;
  781. }
  782. #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
  783. static void
  784. do_resume_work(struct work_struct *work)
  785. {
  786. struct msmsdcc_host *host =
  787. container_of(work, struct msmsdcc_host, resume_task);
  788. struct mmc_host *mmc = host->mmc;
  789. if (mmc) {
  790. mmc_resume_host(mmc);
  791. if (host->stat_irq)
  792. enable_irq(host->stat_irq);
  793. }
  794. }
  795. #endif
  796. static int
  797. msmsdcc_probe(struct platform_device *pdev)
  798. {
  799. struct mmc_platform_data *plat = pdev->dev.platform_data;
  800. struct msmsdcc_host *host;
  801. struct mmc_host *mmc;
  802. struct resource *cmd_irqres = NULL;
  803. struct resource *pio_irqres = NULL;
  804. struct resource *stat_irqres = NULL;
  805. struct resource *memres = NULL;
  806. struct resource *dmares = NULL;
  807. int ret;
  808. /* must have platform data */
  809. if (!plat) {
  810. pr_err("%s: Platform data not available\n", __func__);
  811. ret = -EINVAL;
  812. goto out;
  813. }
  814. if (pdev->id < 1 || pdev->id > 4)
  815. return -EINVAL;
  816. if (pdev->resource == NULL || pdev->num_resources < 2) {
  817. pr_err("%s: Invalid resource\n", __func__);
  818. return -ENXIO;
  819. }
  820. memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  821. dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  822. cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  823. "cmd_irq");
  824. pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  825. "pio_irq");
  826. stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  827. "status_irq");
  828. if (!cmd_irqres || !pio_irqres || !memres) {
  829. pr_err("%s: Invalid resource\n", __func__);
  830. return -ENXIO;
  831. }
  832. /*
  833. * Setup our host structure
  834. */
  835. mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
  836. if (!mmc) {
  837. ret = -ENOMEM;
  838. goto out;
  839. }
  840. host = mmc_priv(mmc);
  841. host->pdev_id = pdev->id;
  842. host->plat = plat;
  843. host->mmc = mmc;
  844. host->cmdpoll = 1;
  845. host->base = ioremap(memres->start, PAGE_SIZE);
  846. if (!host->base) {
  847. ret = -ENOMEM;
  848. goto out;
  849. }
  850. host->cmd_irqres = cmd_irqres;
  851. host->pio_irqres = pio_irqres;
  852. host->memres = memres;
  853. host->dmares = dmares;
  854. spin_lock_init(&host->lock);
  855. /*
  856. * Setup DMA
  857. */
  858. msmsdcc_init_dma(host);
  859. /*
  860. * Setup main peripheral bus clock
  861. */
  862. host->pclk = clk_get(&pdev->dev, "sdc_pclk");
  863. if (IS_ERR(host->pclk)) {
  864. ret = PTR_ERR(host->pclk);
  865. goto host_free;
  866. }
  867. ret = clk_enable(host->pclk);
  868. if (ret)
  869. goto pclk_put;
  870. host->pclk_rate = clk_get_rate(host->pclk);
  871. /*
  872. * Setup SDC MMC clock
  873. */
  874. host->clk = clk_get(&pdev->dev, "sdc_clk");
  875. if (IS_ERR(host->clk)) {
  876. ret = PTR_ERR(host->clk);
  877. goto pclk_disable;
  878. }
  879. ret = clk_enable(host->clk);
  880. if (ret)
  881. goto clk_put;
  882. ret = clk_set_rate(host->clk, msmsdcc_fmin);
  883. if (ret) {
  884. pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
  885. goto clk_disable;
  886. }
  887. host->clk_rate = clk_get_rate(host->clk);
  888. host->clks_on = 1;
  889. /*
  890. * Setup MMC host structure
  891. */
  892. mmc->ops = &msmsdcc_ops;
  893. mmc->f_min = msmsdcc_fmin;
  894. mmc->f_max = msmsdcc_fmax;
  895. mmc->ocr_avail = plat->ocr_mask;
  896. if (msmsdcc_4bit)
  897. mmc->caps |= MMC_CAP_4_BIT_DATA;
  898. if (msmsdcc_sdioirq)
  899. mmc->caps |= MMC_CAP_SDIO_IRQ;
  900. mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
  901. mmc->max_phys_segs = NR_SG;
  902. mmc->max_hw_segs = NR_SG;
  903. mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
  904. mmc->max_blk_count = 65536;
  905. mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
  906. mmc->max_seg_size = mmc->max_req_size;
  907. writel(0, host->base + MMCIMASK0);
  908. writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
  909. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  910. host->saved_irq0mask = MCI_IRQENABLE;
  911. /*
  912. * Setup card detect change
  913. */
  914. memset(&host->timer, 0, sizeof(host->timer));
  915. if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
  916. unsigned long irqflags = IRQF_SHARED |
  917. (stat_irqres->flags & IRQF_TRIGGER_MASK);
  918. host->stat_irq = stat_irqres->start;
  919. ret = request_irq(host->stat_irq,
  920. msmsdcc_platform_status_irq,
  921. irqflags,
  922. DRIVER_NAME " (slot)",
  923. host);
  924. if (ret) {
  925. pr_err("%s: Unable to get slot IRQ %d (%d)\n",
  926. mmc_hostname(mmc), host->stat_irq, ret);
  927. goto clk_disable;
  928. }
  929. } else if (plat->register_status_notify) {
  930. plat->register_status_notify(msmsdcc_status_notify_cb, host);
  931. } else if (!plat->status)
  932. pr_err("%s: No card detect facilities available\n",
  933. mmc_hostname(mmc));
  934. else {
  935. init_timer(&host->timer);
  936. host->timer.data = (unsigned long)host;
  937. host->timer.function = msmsdcc_check_status;
  938. host->timer.expires = jiffies + HZ;
  939. add_timer(&host->timer);
  940. }
  941. if (plat->status) {
  942. host->oldstat = host->plat->status(mmc_dev(host->mmc));
  943. host->eject = !host->oldstat;
  944. }
  945. /*
  946. * Setup a command timer. We currently need this due to
  947. * some 'strange' timeout / error handling situations.
  948. */
  949. init_timer(&host->command_timer);
  950. host->command_timer.data = (unsigned long) host;
  951. host->command_timer.function = msmsdcc_command_expired;
  952. ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
  953. DRIVER_NAME " (cmd)", host);
  954. if (ret)
  955. goto stat_irq_free;
  956. ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
  957. DRIVER_NAME " (pio)", host);
  958. if (ret)
  959. goto cmd_irq_free;
  960. mmc_set_drvdata(pdev, mmc);
  961. mmc_add_host(mmc);
  962. pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
  963. mmc_hostname(mmc), (unsigned long long)memres->start,
  964. (unsigned int) cmd_irqres->start,
  965. (unsigned int) host->stat_irq, host->dma.channel);
  966. pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
  967. (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
  968. pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
  969. mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
  970. pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
  971. pr_info("%s: Power save feature enable = %d\n",
  972. mmc_hostname(mmc), msmsdcc_pwrsave);
  973. if (host->dma.channel != -1) {
  974. pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
  975. mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
  976. pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
  977. mmc_hostname(mmc), host->dma.cmd_busaddr,
  978. host->dma.cmdptr_busaddr);
  979. } else
  980. pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
  981. if (host->timer.function)
  982. pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
  983. return 0;
  984. cmd_irq_free:
  985. free_irq(cmd_irqres->start, host);
  986. stat_irq_free:
  987. if (host->stat_irq)
  988. free_irq(host->stat_irq, host);
  989. clk_disable:
  990. clk_disable(host->clk);
  991. clk_put:
  992. clk_put(host->clk);
  993. pclk_disable:
  994. clk_disable(host->pclk);
  995. pclk_put:
  996. clk_put(host->pclk);
  997. host_free:
  998. mmc_free_host(mmc);
  999. out:
  1000. return ret;
  1001. }
  1002. static int
  1003. msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
  1004. {
  1005. struct mmc_host *mmc = mmc_get_drvdata(dev);
  1006. int rc = 0;
  1007. if (mmc) {
  1008. struct msmsdcc_host *host = mmc_priv(mmc);
  1009. if (host->stat_irq)
  1010. disable_irq(host->stat_irq);
  1011. if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
  1012. rc = mmc_suspend_host(mmc, state);
  1013. if (!rc) {
  1014. writel(0, host->base + MMCIMASK0);
  1015. if (host->clks_on) {
  1016. clk_disable(host->clk);
  1017. clk_disable(host->pclk);
  1018. host->clks_on = 0;
  1019. }
  1020. }
  1021. }
  1022. return rc;
  1023. }
  1024. static int
  1025. msmsdcc_resume(struct platform_device *dev)
  1026. {
  1027. struct mmc_host *mmc = mmc_get_drvdata(dev);
  1028. unsigned long flags;
  1029. if (mmc) {
  1030. struct msmsdcc_host *host = mmc_priv(mmc);
  1031. spin_lock_irqsave(&host->lock, flags);
  1032. if (!host->clks_on) {
  1033. clk_enable(host->pclk);
  1034. clk_enable(host->clk);
  1035. host->clks_on = 1;
  1036. }
  1037. writel(host->saved_irq0mask, host->base + MMCIMASK0);
  1038. spin_unlock_irqrestore(&host->lock, flags);
  1039. if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
  1040. mmc_resume_host(mmc);
  1041. if (host->stat_irq)
  1042. enable_irq(host->stat_irq);
  1043. else if (host->stat_irq)
  1044. enable_irq(host->stat_irq);
  1045. }
  1046. return 0;
  1047. }
  1048. static struct platform_driver msmsdcc_driver = {
  1049. .probe = msmsdcc_probe,
  1050. .suspend = msmsdcc_suspend,
  1051. .resume = msmsdcc_resume,
  1052. .driver = {
  1053. .name = "msm_sdcc",
  1054. },
  1055. };
  1056. static int __init msmsdcc_init(void)
  1057. {
  1058. return platform_driver_register(&msmsdcc_driver);
  1059. }
  1060. static void __exit msmsdcc_exit(void)
  1061. {
  1062. platform_driver_unregister(&msmsdcc_driver);
  1063. }
  1064. module_init(msmsdcc_init);
  1065. module_exit(msmsdcc_exit);
  1066. MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
  1067. MODULE_LICENSE("GPL");