mmci.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518
  1. /*
  2. * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
  3. *
  4. * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
  5. * Copyright (C) 2010 ST-Ericsson SA
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/init.h>
  14. #include <linux/ioport.h>
  15. #include <linux/device.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/kernel.h>
  18. #include <linux/delay.h>
  19. #include <linux/err.h>
  20. #include <linux/highmem.h>
  21. #include <linux/log2.h>
  22. #include <linux/mmc/host.h>
  23. #include <linux/mmc/card.h>
  24. #include <linux/amba/bus.h>
  25. #include <linux/clk.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/gpio.h>
  28. #include <linux/regulator/consumer.h>
  29. #include <linux/dmaengine.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/amba/mmci.h>
  32. #include <linux/pm_runtime.h>
  33. #include <asm/div64.h>
  34. #include <asm/io.h>
  35. #include <asm/sizes.h>
  36. #include "mmci.h"
  37. #define DRIVER_NAME "mmci-pl18x"
  38. static unsigned int fmax = 515633;
  39. /**
  40. * struct variant_data - MMCI variant-specific quirks
  41. * @clkreg: default value for MCICLOCK register
  42. * @clkreg_enable: enable value for MMCICLOCK register
  43. * @datalength_bits: number of bits in the MMCIDATALENGTH register
  44. * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
  45. * is asserted (likewise for RX)
  46. * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
  47. * is asserted (likewise for RX)
  48. * @sdio: variant supports SDIO
  49. * @st_clkdiv: true if using a ST-specific clock divider algorithm
  50. * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
  51. */
  52. struct variant_data {
  53. unsigned int clkreg;
  54. unsigned int clkreg_enable;
  55. unsigned int datalength_bits;
  56. unsigned int fifosize;
  57. unsigned int fifohalfsize;
  58. bool sdio;
  59. bool st_clkdiv;
  60. bool blksz_datactrl16;
  61. };
  62. static struct variant_data variant_arm = {
  63. .fifosize = 16 * 4,
  64. .fifohalfsize = 8 * 4,
  65. .datalength_bits = 16,
  66. };
  67. static struct variant_data variant_arm_extended_fifo = {
  68. .fifosize = 128 * 4,
  69. .fifohalfsize = 64 * 4,
  70. .datalength_bits = 16,
  71. };
  72. static struct variant_data variant_u300 = {
  73. .fifosize = 16 * 4,
  74. .fifohalfsize = 8 * 4,
  75. .clkreg_enable = MCI_ST_U300_HWFCEN,
  76. .datalength_bits = 16,
  77. .sdio = true,
  78. };
  79. static struct variant_data variant_ux500 = {
  80. .fifosize = 30 * 4,
  81. .fifohalfsize = 8 * 4,
  82. .clkreg = MCI_CLK_ENABLE,
  83. .clkreg_enable = MCI_ST_UX500_HWFCEN,
  84. .datalength_bits = 24,
  85. .sdio = true,
  86. .st_clkdiv = true,
  87. };
  88. static struct variant_data variant_ux500v2 = {
  89. .fifosize = 30 * 4,
  90. .fifohalfsize = 8 * 4,
  91. .clkreg = MCI_CLK_ENABLE,
  92. .clkreg_enable = MCI_ST_UX500_HWFCEN,
  93. .datalength_bits = 24,
  94. .sdio = true,
  95. .st_clkdiv = true,
  96. .blksz_datactrl16 = true,
  97. };
  98. /*
  99. * This must be called with host->lock held
  100. */
  101. static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
  102. {
  103. struct variant_data *variant = host->variant;
  104. u32 clk = variant->clkreg;
  105. if (desired) {
  106. if (desired >= host->mclk) {
  107. clk = MCI_CLK_BYPASS;
  108. if (variant->st_clkdiv)
  109. clk |= MCI_ST_UX500_NEG_EDGE;
  110. host->cclk = host->mclk;
  111. } else if (variant->st_clkdiv) {
  112. /*
  113. * DB8500 TRM says f = mclk / (clkdiv + 2)
  114. * => clkdiv = (mclk / f) - 2
  115. * Round the divider up so we don't exceed the max
  116. * frequency
  117. */
  118. clk = DIV_ROUND_UP(host->mclk, desired) - 2;
  119. if (clk >= 256)
  120. clk = 255;
  121. host->cclk = host->mclk / (clk + 2);
  122. } else {
  123. /*
  124. * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
  125. * => clkdiv = mclk / (2 * f) - 1
  126. */
  127. clk = host->mclk / (2 * desired) - 1;
  128. if (clk >= 256)
  129. clk = 255;
  130. host->cclk = host->mclk / (2 * (clk + 1));
  131. }
  132. clk |= variant->clkreg_enable;
  133. clk |= MCI_CLK_ENABLE;
  134. /* This hasn't proven to be worthwhile */
  135. /* clk |= MCI_CLK_PWRSAVE; */
  136. }
  137. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
  138. clk |= MCI_4BIT_BUS;
  139. if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
  140. clk |= MCI_ST_8BIT_BUS;
  141. writel(clk, host->base + MMCICLOCK);
  142. }
  143. static void
  144. mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
  145. {
  146. writel(0, host->base + MMCICOMMAND);
  147. BUG_ON(host->data);
  148. host->mrq = NULL;
  149. host->cmd = NULL;
  150. /*
  151. * Need to drop the host lock here; mmc_request_done may call
  152. * back into the driver...
  153. */
  154. spin_unlock(&host->lock);
  155. pm_runtime_put(mmc_dev(host->mmc));
  156. mmc_request_done(host->mmc, mrq);
  157. spin_lock(&host->lock);
  158. }
  159. static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
  160. {
  161. void __iomem *base = host->base;
  162. if (host->singleirq) {
  163. unsigned int mask0 = readl(base + MMCIMASK0);
  164. mask0 &= ~MCI_IRQ1MASK;
  165. mask0 |= mask;
  166. writel(mask0, base + MMCIMASK0);
  167. }
  168. writel(mask, base + MMCIMASK1);
  169. }
  170. static void mmci_stop_data(struct mmci_host *host)
  171. {
  172. writel(0, host->base + MMCIDATACTRL);
  173. mmci_set_mask1(host, 0);
  174. host->data = NULL;
  175. }
  176. static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
  177. {
  178. unsigned int flags = SG_MITER_ATOMIC;
  179. if (data->flags & MMC_DATA_READ)
  180. flags |= SG_MITER_TO_SG;
  181. else
  182. flags |= SG_MITER_FROM_SG;
  183. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  184. }
  185. /*
  186. * All the DMA operation mode stuff goes inside this ifdef.
  187. * This assumes that you have a generic DMA device interface,
  188. * no custom DMA interfaces are supported.
  189. */
  190. #ifdef CONFIG_DMA_ENGINE
  191. static void __devinit mmci_dma_setup(struct mmci_host *host)
  192. {
  193. struct mmci_platform_data *plat = host->plat;
  194. const char *rxname, *txname;
  195. dma_cap_mask_t mask;
  196. if (!plat || !plat->dma_filter) {
  197. dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
  198. return;
  199. }
  200. /* initialize pre request cookie */
  201. host->next_data.cookie = 1;
  202. /* Try to acquire a generic DMA engine slave channel */
  203. dma_cap_zero(mask);
  204. dma_cap_set(DMA_SLAVE, mask);
  205. /*
  206. * If only an RX channel is specified, the driver will
  207. * attempt to use it bidirectionally, however if it is
  208. * is specified but cannot be located, DMA will be disabled.
  209. */
  210. if (plat->dma_rx_param) {
  211. host->dma_rx_channel = dma_request_channel(mask,
  212. plat->dma_filter,
  213. plat->dma_rx_param);
  214. /* E.g if no DMA hardware is present */
  215. if (!host->dma_rx_channel)
  216. dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
  217. }
  218. if (plat->dma_tx_param) {
  219. host->dma_tx_channel = dma_request_channel(mask,
  220. plat->dma_filter,
  221. plat->dma_tx_param);
  222. if (!host->dma_tx_channel)
  223. dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
  224. } else {
  225. host->dma_tx_channel = host->dma_rx_channel;
  226. }
  227. if (host->dma_rx_channel)
  228. rxname = dma_chan_name(host->dma_rx_channel);
  229. else
  230. rxname = "none";
  231. if (host->dma_tx_channel)
  232. txname = dma_chan_name(host->dma_tx_channel);
  233. else
  234. txname = "none";
  235. dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
  236. rxname, txname);
  237. /*
  238. * Limit the maximum segment size in any SG entry according to
  239. * the parameters of the DMA engine device.
  240. */
  241. if (host->dma_tx_channel) {
  242. struct device *dev = host->dma_tx_channel->device->dev;
  243. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  244. if (max_seg_size < host->mmc->max_seg_size)
  245. host->mmc->max_seg_size = max_seg_size;
  246. }
  247. if (host->dma_rx_channel) {
  248. struct device *dev = host->dma_rx_channel->device->dev;
  249. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  250. if (max_seg_size < host->mmc->max_seg_size)
  251. host->mmc->max_seg_size = max_seg_size;
  252. }
  253. }
  254. /*
  255. * This is used in __devinit or __devexit so inline it
  256. * so it can be discarded.
  257. */
  258. static inline void mmci_dma_release(struct mmci_host *host)
  259. {
  260. struct mmci_platform_data *plat = host->plat;
  261. if (host->dma_rx_channel)
  262. dma_release_channel(host->dma_rx_channel);
  263. if (host->dma_tx_channel && plat->dma_tx_param)
  264. dma_release_channel(host->dma_tx_channel);
  265. host->dma_rx_channel = host->dma_tx_channel = NULL;
  266. }
  267. static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
  268. {
  269. struct dma_chan *chan = host->dma_current;
  270. enum dma_data_direction dir;
  271. u32 status;
  272. int i;
  273. /* Wait up to 1ms for the DMA to complete */
  274. for (i = 0; ; i++) {
  275. status = readl(host->base + MMCISTATUS);
  276. if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
  277. break;
  278. udelay(10);
  279. }
  280. /*
  281. * Check to see whether we still have some data left in the FIFO -
  282. * this catches DMA controllers which are unable to monitor the
  283. * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
  284. * contiguous buffers. On TX, we'll get a FIFO underrun error.
  285. */
  286. if (status & MCI_RXDATAAVLBLMASK) {
  287. dmaengine_terminate_all(chan);
  288. if (!data->error)
  289. data->error = -EIO;
  290. }
  291. if (data->flags & MMC_DATA_WRITE) {
  292. dir = DMA_TO_DEVICE;
  293. } else {
  294. dir = DMA_FROM_DEVICE;
  295. }
  296. if (!data->host_cookie)
  297. dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
  298. /*
  299. * Use of DMA with scatter-gather is impossible.
  300. * Give up with DMA and switch back to PIO mode.
  301. */
  302. if (status & MCI_RXDATAAVLBLMASK) {
  303. dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
  304. mmci_dma_release(host);
  305. }
  306. }
  307. static void mmci_dma_data_error(struct mmci_host *host)
  308. {
  309. dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
  310. dmaengine_terminate_all(host->dma_current);
  311. }
  312. static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
  313. struct mmci_host_next *next)
  314. {
  315. struct variant_data *variant = host->variant;
  316. struct dma_slave_config conf = {
  317. .src_addr = host->phybase + MMCIFIFO,
  318. .dst_addr = host->phybase + MMCIFIFO,
  319. .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  320. .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  321. .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
  322. .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
  323. };
  324. struct dma_chan *chan;
  325. struct dma_device *device;
  326. struct dma_async_tx_descriptor *desc;
  327. int nr_sg;
  328. /* Check if next job is already prepared */
  329. if (data->host_cookie && !next &&
  330. host->dma_current && host->dma_desc_current)
  331. return 0;
  332. if (!next) {
  333. host->dma_current = NULL;
  334. host->dma_desc_current = NULL;
  335. }
  336. if (data->flags & MMC_DATA_READ) {
  337. conf.direction = DMA_FROM_DEVICE;
  338. chan = host->dma_rx_channel;
  339. } else {
  340. conf.direction = DMA_TO_DEVICE;
  341. chan = host->dma_tx_channel;
  342. }
  343. /* If there's no DMA channel, fall back to PIO */
  344. if (!chan)
  345. return -EINVAL;
  346. /* If less than or equal to the fifo size, don't bother with DMA */
  347. if (data->blksz * data->blocks <= variant->fifosize)
  348. return -EINVAL;
  349. device = chan->device;
  350. nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
  351. if (nr_sg == 0)
  352. return -EINVAL;
  353. dmaengine_slave_config(chan, &conf);
  354. desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
  355. conf.direction, DMA_CTRL_ACK);
  356. if (!desc)
  357. goto unmap_exit;
  358. if (next) {
  359. next->dma_chan = chan;
  360. next->dma_desc = desc;
  361. } else {
  362. host->dma_current = chan;
  363. host->dma_desc_current = desc;
  364. }
  365. return 0;
  366. unmap_exit:
  367. if (!next)
  368. dmaengine_terminate_all(chan);
  369. dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
  370. return -ENOMEM;
  371. }
  372. static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
  373. {
  374. int ret;
  375. struct mmc_data *data = host->data;
  376. ret = mmci_dma_prep_data(host, host->data, NULL);
  377. if (ret)
  378. return ret;
  379. /* Okay, go for it. */
  380. dev_vdbg(mmc_dev(host->mmc),
  381. "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
  382. data->sg_len, data->blksz, data->blocks, data->flags);
  383. dmaengine_submit(host->dma_desc_current);
  384. dma_async_issue_pending(host->dma_current);
  385. datactrl |= MCI_DPSM_DMAENABLE;
  386. /* Trigger the DMA transfer */
  387. writel(datactrl, host->base + MMCIDATACTRL);
  388. /*
  389. * Let the MMCI say when the data is ended and it's time
  390. * to fire next DMA request. When that happens, MMCI will
  391. * call mmci_data_end()
  392. */
  393. writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
  394. host->base + MMCIMASK0);
  395. return 0;
  396. }
  397. static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
  398. {
  399. struct mmci_host_next *next = &host->next_data;
  400. if (data->host_cookie && data->host_cookie != next->cookie) {
  401. printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
  402. " host->next_data.cookie %d\n",
  403. __func__, data->host_cookie, host->next_data.cookie);
  404. data->host_cookie = 0;
  405. }
  406. if (!data->host_cookie)
  407. return;
  408. host->dma_desc_current = next->dma_desc;
  409. host->dma_current = next->dma_chan;
  410. next->dma_desc = NULL;
  411. next->dma_chan = NULL;
  412. }
  413. static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
  414. bool is_first_req)
  415. {
  416. struct mmci_host *host = mmc_priv(mmc);
  417. struct mmc_data *data = mrq->data;
  418. struct mmci_host_next *nd = &host->next_data;
  419. if (!data)
  420. return;
  421. if (data->host_cookie) {
  422. data->host_cookie = 0;
  423. return;
  424. }
  425. /* if config for dma */
  426. if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
  427. ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
  428. if (mmci_dma_prep_data(host, data, nd))
  429. data->host_cookie = 0;
  430. else
  431. data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
  432. }
  433. }
  434. static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
  435. int err)
  436. {
  437. struct mmci_host *host = mmc_priv(mmc);
  438. struct mmc_data *data = mrq->data;
  439. struct dma_chan *chan;
  440. enum dma_data_direction dir;
  441. if (!data)
  442. return;
  443. if (data->flags & MMC_DATA_READ) {
  444. dir = DMA_FROM_DEVICE;
  445. chan = host->dma_rx_channel;
  446. } else {
  447. dir = DMA_TO_DEVICE;
  448. chan = host->dma_tx_channel;
  449. }
  450. /* if config for dma */
  451. if (chan) {
  452. if (err)
  453. dmaengine_terminate_all(chan);
  454. if (err || data->host_cookie)
  455. dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  456. data->sg_len, dir);
  457. mrq->data->host_cookie = 0;
  458. }
  459. }
  460. #else
  461. /* Blank functions if the DMA engine is not available */
  462. static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
  463. {
  464. }
  465. static inline void mmci_dma_setup(struct mmci_host *host)
  466. {
  467. }
  468. static inline void mmci_dma_release(struct mmci_host *host)
  469. {
  470. }
  471. static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
  472. {
  473. }
  474. static inline void mmci_dma_data_error(struct mmci_host *host)
  475. {
  476. }
  477. static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
  478. {
  479. return -ENOSYS;
  480. }
  481. #define mmci_pre_request NULL
  482. #define mmci_post_request NULL
  483. #endif
  484. static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
  485. {
  486. struct variant_data *variant = host->variant;
  487. unsigned int datactrl, timeout, irqmask;
  488. unsigned long long clks;
  489. void __iomem *base;
  490. int blksz_bits;
  491. dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
  492. data->blksz, data->blocks, data->flags);
  493. host->data = data;
  494. host->size = data->blksz * data->blocks;
  495. data->bytes_xfered = 0;
  496. clks = (unsigned long long)data->timeout_ns * host->cclk;
  497. do_div(clks, 1000000000UL);
  498. timeout = data->timeout_clks + (unsigned int)clks;
  499. base = host->base;
  500. writel(timeout, base + MMCIDATATIMER);
  501. writel(host->size, base + MMCIDATALENGTH);
  502. blksz_bits = ffs(data->blksz) - 1;
  503. BUG_ON(1 << blksz_bits != data->blksz);
  504. if (variant->blksz_datactrl16)
  505. datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
  506. else
  507. datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
  508. if (data->flags & MMC_DATA_READ)
  509. datactrl |= MCI_DPSM_DIRECTION;
  510. /*
  511. * Attempt to use DMA operation mode, if this
  512. * should fail, fall back to PIO mode
  513. */
  514. if (!mmci_dma_start_data(host, datactrl))
  515. return;
  516. /* IRQ mode, map the SG list for CPU reading/writing */
  517. mmci_init_sg(host, data);
  518. if (data->flags & MMC_DATA_READ) {
  519. irqmask = MCI_RXFIFOHALFFULLMASK;
  520. /*
  521. * If we have less than the fifo 'half-full' threshold to
  522. * transfer, trigger a PIO interrupt as soon as any data
  523. * is available.
  524. */
  525. if (host->size < variant->fifohalfsize)
  526. irqmask |= MCI_RXDATAAVLBLMASK;
  527. } else {
  528. /*
  529. * We don't actually need to include "FIFO empty" here
  530. * since its implicit in "FIFO half empty".
  531. */
  532. irqmask = MCI_TXFIFOHALFEMPTYMASK;
  533. }
  534. /* The ST Micro variants has a special bit to enable SDIO */
  535. if (variant->sdio && host->mmc->card)
  536. if (mmc_card_sdio(host->mmc->card))
  537. datactrl |= MCI_ST_DPSM_SDIOEN;
  538. writel(datactrl, base + MMCIDATACTRL);
  539. writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
  540. mmci_set_mask1(host, irqmask);
  541. }
  542. static void
  543. mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
  544. {
  545. void __iomem *base = host->base;
  546. dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
  547. cmd->opcode, cmd->arg, cmd->flags);
  548. if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
  549. writel(0, base + MMCICOMMAND);
  550. udelay(1);
  551. }
  552. c |= cmd->opcode | MCI_CPSM_ENABLE;
  553. if (cmd->flags & MMC_RSP_PRESENT) {
  554. if (cmd->flags & MMC_RSP_136)
  555. c |= MCI_CPSM_LONGRSP;
  556. c |= MCI_CPSM_RESPONSE;
  557. }
  558. if (/*interrupt*/0)
  559. c |= MCI_CPSM_INTERRUPT;
  560. host->cmd = cmd;
  561. writel(cmd->arg, base + MMCIARGUMENT);
  562. writel(c, base + MMCICOMMAND);
  563. }
  564. static void
  565. mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
  566. unsigned int status)
  567. {
  568. /* First check for errors */
  569. if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
  570. u32 remain, success;
  571. /* Terminate the DMA transfer */
  572. if (dma_inprogress(host))
  573. mmci_dma_data_error(host);
  574. /*
  575. * Calculate how far we are into the transfer. Note that
  576. * the data counter gives the number of bytes transferred
  577. * on the MMC bus, not on the host side. On reads, this
  578. * can be as much as a FIFO-worth of data ahead. This
  579. * matters for FIFO overruns only.
  580. */
  581. remain = readl(host->base + MMCIDATACNT);
  582. success = data->blksz * data->blocks - remain;
  583. dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
  584. status, success);
  585. if (status & MCI_DATACRCFAIL) {
  586. /* Last block was not successful */
  587. success -= 1;
  588. data->error = -EILSEQ;
  589. } else if (status & MCI_DATATIMEOUT) {
  590. data->error = -ETIMEDOUT;
  591. } else if (status & MCI_STARTBITERR) {
  592. data->error = -ECOMM;
  593. } else if (status & MCI_TXUNDERRUN) {
  594. data->error = -EIO;
  595. } else if (status & MCI_RXOVERRUN) {
  596. if (success > host->variant->fifosize)
  597. success -= host->variant->fifosize;
  598. else
  599. success = 0;
  600. data->error = -EIO;
  601. }
  602. data->bytes_xfered = round_down(success, data->blksz);
  603. }
  604. if (status & MCI_DATABLOCKEND)
  605. dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
  606. if (status & MCI_DATAEND || data->error) {
  607. if (dma_inprogress(host))
  608. mmci_dma_unmap(host, data);
  609. mmci_stop_data(host);
  610. if (!data->error)
  611. /* The error clause is handled above, success! */
  612. data->bytes_xfered = data->blksz * data->blocks;
  613. if (!data->stop) {
  614. mmci_request_end(host, data->mrq);
  615. } else {
  616. mmci_start_command(host, data->stop, 0);
  617. }
  618. }
  619. }
  620. static void
  621. mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
  622. unsigned int status)
  623. {
  624. void __iomem *base = host->base;
  625. host->cmd = NULL;
  626. if (status & MCI_CMDTIMEOUT) {
  627. cmd->error = -ETIMEDOUT;
  628. } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
  629. cmd->error = -EILSEQ;
  630. } else {
  631. cmd->resp[0] = readl(base + MMCIRESPONSE0);
  632. cmd->resp[1] = readl(base + MMCIRESPONSE1);
  633. cmd->resp[2] = readl(base + MMCIRESPONSE2);
  634. cmd->resp[3] = readl(base + MMCIRESPONSE3);
  635. }
  636. if (!cmd->data || cmd->error) {
  637. if (host->data)
  638. mmci_stop_data(host);
  639. mmci_request_end(host, cmd->mrq);
  640. } else if (!(cmd->data->flags & MMC_DATA_READ)) {
  641. mmci_start_data(host, cmd->data);
  642. }
  643. }
  644. static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
  645. {
  646. void __iomem *base = host->base;
  647. char *ptr = buffer;
  648. u32 status;
  649. int host_remain = host->size;
  650. do {
  651. int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
  652. if (count > remain)
  653. count = remain;
  654. if (count <= 0)
  655. break;
  656. readsl(base + MMCIFIFO, ptr, count >> 2);
  657. ptr += count;
  658. remain -= count;
  659. host_remain -= count;
  660. if (remain == 0)
  661. break;
  662. status = readl(base + MMCISTATUS);
  663. } while (status & MCI_RXDATAAVLBL);
  664. return ptr - buffer;
  665. }
  666. static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
  667. {
  668. struct variant_data *variant = host->variant;
  669. void __iomem *base = host->base;
  670. char *ptr = buffer;
  671. do {
  672. unsigned int count, maxcnt;
  673. maxcnt = status & MCI_TXFIFOEMPTY ?
  674. variant->fifosize : variant->fifohalfsize;
  675. count = min(remain, maxcnt);
  676. /*
  677. * The ST Micro variant for SDIO transfer sizes
  678. * less then 8 bytes should have clock H/W flow
  679. * control disabled.
  680. */
  681. if (variant->sdio &&
  682. mmc_card_sdio(host->mmc->card)) {
  683. if (count < 8)
  684. writel(readl(host->base + MMCICLOCK) &
  685. ~variant->clkreg_enable,
  686. host->base + MMCICLOCK);
  687. else
  688. writel(readl(host->base + MMCICLOCK) |
  689. variant->clkreg_enable,
  690. host->base + MMCICLOCK);
  691. }
  692. /*
  693. * SDIO especially may want to send something that is
  694. * not divisible by 4 (as opposed to card sectors
  695. * etc), and the FIFO only accept full 32-bit writes.
  696. * So compensate by adding +3 on the count, a single
  697. * byte become a 32bit write, 7 bytes will be two
  698. * 32bit writes etc.
  699. */
  700. writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
  701. ptr += count;
  702. remain -= count;
  703. if (remain == 0)
  704. break;
  705. status = readl(base + MMCISTATUS);
  706. } while (status & MCI_TXFIFOHALFEMPTY);
  707. return ptr - buffer;
  708. }
  709. /*
  710. * PIO data transfer IRQ handler.
  711. */
  712. static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
  713. {
  714. struct mmci_host *host = dev_id;
  715. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  716. struct variant_data *variant = host->variant;
  717. void __iomem *base = host->base;
  718. unsigned long flags;
  719. u32 status;
  720. status = readl(base + MMCISTATUS);
  721. dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
  722. local_irq_save(flags);
  723. do {
  724. unsigned int remain, len;
  725. char *buffer;
  726. /*
  727. * For write, we only need to test the half-empty flag
  728. * here - if the FIFO is completely empty, then by
  729. * definition it is more than half empty.
  730. *
  731. * For read, check for data available.
  732. */
  733. if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
  734. break;
  735. if (!sg_miter_next(sg_miter))
  736. break;
  737. buffer = sg_miter->addr;
  738. remain = sg_miter->length;
  739. len = 0;
  740. if (status & MCI_RXACTIVE)
  741. len = mmci_pio_read(host, buffer, remain);
  742. if (status & MCI_TXACTIVE)
  743. len = mmci_pio_write(host, buffer, remain, status);
  744. sg_miter->consumed = len;
  745. host->size -= len;
  746. remain -= len;
  747. if (remain)
  748. break;
  749. status = readl(base + MMCISTATUS);
  750. } while (1);
  751. sg_miter_stop(sg_miter);
  752. local_irq_restore(flags);
  753. /*
  754. * If we have less than the fifo 'half-full' threshold to transfer,
  755. * trigger a PIO interrupt as soon as any data is available.
  756. */
  757. if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
  758. mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
  759. /*
  760. * If we run out of data, disable the data IRQs; this
  761. * prevents a race where the FIFO becomes empty before
  762. * the chip itself has disabled the data path, and
  763. * stops us racing with our data end IRQ.
  764. */
  765. if (host->size == 0) {
  766. mmci_set_mask1(host, 0);
  767. writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
  768. }
  769. return IRQ_HANDLED;
  770. }
  771. /*
  772. * Handle completion of command and data transfers.
  773. */
  774. static irqreturn_t mmci_irq(int irq, void *dev_id)
  775. {
  776. struct mmci_host *host = dev_id;
  777. u32 status;
  778. int ret = 0;
  779. spin_lock(&host->lock);
  780. do {
  781. struct mmc_command *cmd;
  782. struct mmc_data *data;
  783. status = readl(host->base + MMCISTATUS);
  784. if (host->singleirq) {
  785. if (status & readl(host->base + MMCIMASK1))
  786. mmci_pio_irq(irq, dev_id);
  787. status &= ~MCI_IRQ1MASK;
  788. }
  789. status &= readl(host->base + MMCIMASK0);
  790. writel(status, host->base + MMCICLEAR);
  791. dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
  792. data = host->data;
  793. if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
  794. MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
  795. mmci_data_irq(host, data, status);
  796. cmd = host->cmd;
  797. if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
  798. mmci_cmd_irq(host, cmd, status);
  799. ret = 1;
  800. } while (status);
  801. spin_unlock(&host->lock);
  802. return IRQ_RETVAL(ret);
  803. }
  804. static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  805. {
  806. struct mmci_host *host = mmc_priv(mmc);
  807. unsigned long flags;
  808. WARN_ON(host->mrq != NULL);
  809. if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
  810. dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
  811. mrq->data->blksz);
  812. mrq->cmd->error = -EINVAL;
  813. mmc_request_done(mmc, mrq);
  814. return;
  815. }
  816. pm_runtime_get_sync(mmc_dev(mmc));
  817. spin_lock_irqsave(&host->lock, flags);
  818. host->mrq = mrq;
  819. if (mrq->data)
  820. mmci_get_next_data(host, mrq->data);
  821. if (mrq->data && mrq->data->flags & MMC_DATA_READ)
  822. mmci_start_data(host, mrq->data);
  823. mmci_start_command(host, mrq->cmd, 0);
  824. spin_unlock_irqrestore(&host->lock, flags);
  825. }
  826. static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  827. {
  828. struct mmci_host *host = mmc_priv(mmc);
  829. u32 pwr = 0;
  830. unsigned long flags;
  831. int ret;
  832. switch (ios->power_mode) {
  833. case MMC_POWER_OFF:
  834. if (host->vcc)
  835. ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
  836. break;
  837. case MMC_POWER_UP:
  838. if (host->vcc) {
  839. ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
  840. if (ret) {
  841. dev_err(mmc_dev(mmc), "unable to set OCR\n");
  842. /*
  843. * The .set_ios() function in the mmc_host_ops
  844. * struct return void, and failing to set the
  845. * power should be rare so we print an error
  846. * and return here.
  847. */
  848. return;
  849. }
  850. }
  851. if (host->plat->vdd_handler)
  852. pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
  853. ios->power_mode);
  854. /* The ST version does not have this, fall through to POWER_ON */
  855. if (host->hw_designer != AMBA_VENDOR_ST) {
  856. pwr |= MCI_PWR_UP;
  857. break;
  858. }
  859. case MMC_POWER_ON:
  860. pwr |= MCI_PWR_ON;
  861. break;
  862. }
  863. if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
  864. if (host->hw_designer != AMBA_VENDOR_ST)
  865. pwr |= MCI_ROD;
  866. else {
  867. /*
  868. * The ST Micro variant use the ROD bit for something
  869. * else and only has OD (Open Drain).
  870. */
  871. pwr |= MCI_OD;
  872. }
  873. }
  874. spin_lock_irqsave(&host->lock, flags);
  875. mmci_set_clkreg(host, ios->clock);
  876. if (host->pwr != pwr) {
  877. host->pwr = pwr;
  878. writel(pwr, host->base + MMCIPOWER);
  879. }
  880. spin_unlock_irqrestore(&host->lock, flags);
  881. }
  882. static int mmci_get_ro(struct mmc_host *mmc)
  883. {
  884. struct mmci_host *host = mmc_priv(mmc);
  885. if (host->gpio_wp == -ENOSYS)
  886. return -ENOSYS;
  887. return gpio_get_value_cansleep(host->gpio_wp);
  888. }
  889. static int mmci_get_cd(struct mmc_host *mmc)
  890. {
  891. struct mmci_host *host = mmc_priv(mmc);
  892. struct mmci_platform_data *plat = host->plat;
  893. unsigned int status;
  894. if (host->gpio_cd == -ENOSYS) {
  895. if (!plat->status)
  896. return 1; /* Assume always present */
  897. status = plat->status(mmc_dev(host->mmc));
  898. } else
  899. status = !!gpio_get_value_cansleep(host->gpio_cd)
  900. ^ plat->cd_invert;
  901. /*
  902. * Use positive logic throughout - status is zero for no card,
  903. * non-zero for card inserted.
  904. */
  905. return status;
  906. }
  907. static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
  908. {
  909. struct mmci_host *host = dev_id;
  910. mmc_detect_change(host->mmc, msecs_to_jiffies(500));
  911. return IRQ_HANDLED;
  912. }
  913. static const struct mmc_host_ops mmci_ops = {
  914. .request = mmci_request,
  915. .pre_req = mmci_pre_request,
  916. .post_req = mmci_post_request,
  917. .set_ios = mmci_set_ios,
  918. .get_ro = mmci_get_ro,
  919. .get_cd = mmci_get_cd,
  920. };
  921. static int __devinit mmci_probe(struct amba_device *dev,
  922. const struct amba_id *id)
  923. {
  924. struct mmci_platform_data *plat = dev->dev.platform_data;
  925. struct variant_data *variant = id->data;
  926. struct mmci_host *host;
  927. struct mmc_host *mmc;
  928. int ret;
  929. /* must have platform data */
  930. if (!plat) {
  931. ret = -EINVAL;
  932. goto out;
  933. }
  934. ret = amba_request_regions(dev, DRIVER_NAME);
  935. if (ret)
  936. goto out;
  937. mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
  938. if (!mmc) {
  939. ret = -ENOMEM;
  940. goto rel_regions;
  941. }
  942. host = mmc_priv(mmc);
  943. host->mmc = mmc;
  944. host->gpio_wp = -ENOSYS;
  945. host->gpio_cd = -ENOSYS;
  946. host->gpio_cd_irq = -1;
  947. host->hw_designer = amba_manf(dev);
  948. host->hw_revision = amba_rev(dev);
  949. dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
  950. dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
  951. host->clk = clk_get(&dev->dev, NULL);
  952. if (IS_ERR(host->clk)) {
  953. ret = PTR_ERR(host->clk);
  954. host->clk = NULL;
  955. goto host_free;
  956. }
  957. ret = clk_enable(host->clk);
  958. if (ret)
  959. goto clk_free;
  960. host->plat = plat;
  961. host->variant = variant;
  962. host->mclk = clk_get_rate(host->clk);
  963. /*
  964. * According to the spec, mclk is max 100 MHz,
  965. * so we try to adjust the clock down to this,
  966. * (if possible).
  967. */
  968. if (host->mclk > 100000000) {
  969. ret = clk_set_rate(host->clk, 100000000);
  970. if (ret < 0)
  971. goto clk_disable;
  972. host->mclk = clk_get_rate(host->clk);
  973. dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
  974. host->mclk);
  975. }
  976. host->phybase = dev->res.start;
  977. host->base = ioremap(dev->res.start, resource_size(&dev->res));
  978. if (!host->base) {
  979. ret = -ENOMEM;
  980. goto clk_disable;
  981. }
  982. mmc->ops = &mmci_ops;
  983. /*
  984. * The ARM and ST versions of the block have slightly different
  985. * clock divider equations which means that the minimum divider
  986. * differs too.
  987. */
  988. if (variant->st_clkdiv)
  989. mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
  990. else
  991. mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
  992. /*
  993. * If the platform data supplies a maximum operating
  994. * frequency, this takes precedence. Else, we fall back
  995. * to using the module parameter, which has a (low)
  996. * default value in case it is not specified. Either
  997. * value must not exceed the clock rate into the block,
  998. * of course.
  999. */
  1000. if (plat->f_max)
  1001. mmc->f_max = min(host->mclk, plat->f_max);
  1002. else
  1003. mmc->f_max = min(host->mclk, fmax);
  1004. dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
  1005. #ifdef CONFIG_REGULATOR
  1006. /* If we're using the regulator framework, try to fetch a regulator */
  1007. host->vcc = regulator_get(&dev->dev, "vmmc");
  1008. if (IS_ERR(host->vcc))
  1009. host->vcc = NULL;
  1010. else {
  1011. int mask = mmc_regulator_get_ocrmask(host->vcc);
  1012. if (mask < 0)
  1013. dev_err(&dev->dev, "error getting OCR mask (%d)\n",
  1014. mask);
  1015. else {
  1016. host->mmc->ocr_avail = (u32) mask;
  1017. if (plat->ocr_mask)
  1018. dev_warn(&dev->dev,
  1019. "Provided ocr_mask/setpower will not be used "
  1020. "(using regulator instead)\n");
  1021. }
  1022. }
  1023. #endif
  1024. /* Fall back to platform data if no regulator is found */
  1025. if (host->vcc == NULL)
  1026. mmc->ocr_avail = plat->ocr_mask;
  1027. mmc->caps = plat->capabilities;
  1028. /*
  1029. * We can do SGIO
  1030. */
  1031. mmc->max_segs = NR_SG;
  1032. /*
  1033. * Since only a certain number of bits are valid in the data length
  1034. * register, we must ensure that we don't exceed 2^num-1 bytes in a
  1035. * single request.
  1036. */
  1037. mmc->max_req_size = (1 << variant->datalength_bits) - 1;
  1038. /*
  1039. * Set the maximum segment size. Since we aren't doing DMA
  1040. * (yet) we are only limited by the data length register.
  1041. */
  1042. mmc->max_seg_size = mmc->max_req_size;
  1043. /*
  1044. * Block size can be up to 2048 bytes, but must be a power of two.
  1045. */
  1046. mmc->max_blk_size = 2048;
  1047. /*
  1048. * No limit on the number of blocks transferred.
  1049. */
  1050. mmc->max_blk_count = mmc->max_req_size;
  1051. spin_lock_init(&host->lock);
  1052. writel(0, host->base + MMCIMASK0);
  1053. writel(0, host->base + MMCIMASK1);
  1054. writel(0xfff, host->base + MMCICLEAR);
  1055. if (gpio_is_valid(plat->gpio_cd)) {
  1056. ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
  1057. if (ret == 0)
  1058. ret = gpio_direction_input(plat->gpio_cd);
  1059. if (ret == 0)
  1060. host->gpio_cd = plat->gpio_cd;
  1061. else if (ret != -ENOSYS)
  1062. goto err_gpio_cd;
  1063. /*
  1064. * A gpio pin that will detect cards when inserted and removed
  1065. * will most likely want to trigger on the edges if it is
  1066. * 0 when ejected and 1 when inserted (or mutatis mutandis
  1067. * for the inverted case) so we request triggers on both
  1068. * edges.
  1069. */
  1070. ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
  1071. mmci_cd_irq,
  1072. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
  1073. DRIVER_NAME " (cd)", host);
  1074. if (ret >= 0)
  1075. host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
  1076. }
  1077. if (gpio_is_valid(plat->gpio_wp)) {
  1078. ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
  1079. if (ret == 0)
  1080. ret = gpio_direction_input(plat->gpio_wp);
  1081. if (ret == 0)
  1082. host->gpio_wp = plat->gpio_wp;
  1083. else if (ret != -ENOSYS)
  1084. goto err_gpio_wp;
  1085. }
  1086. if ((host->plat->status || host->gpio_cd != -ENOSYS)
  1087. && host->gpio_cd_irq < 0)
  1088. mmc->caps |= MMC_CAP_NEEDS_POLL;
  1089. ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
  1090. if (ret)
  1091. goto unmap;
  1092. if (dev->irq[1] == NO_IRQ)
  1093. host->singleirq = true;
  1094. else {
  1095. ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
  1096. DRIVER_NAME " (pio)", host);
  1097. if (ret)
  1098. goto irq0_free;
  1099. }
  1100. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  1101. amba_set_drvdata(dev, mmc);
  1102. dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
  1103. mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
  1104. amba_rev(dev), (unsigned long long)dev->res.start,
  1105. dev->irq[0], dev->irq[1]);
  1106. mmci_dma_setup(host);
  1107. pm_runtime_put(&dev->dev);
  1108. mmc_add_host(mmc);
  1109. return 0;
  1110. irq0_free:
  1111. free_irq(dev->irq[0], host);
  1112. unmap:
  1113. if (host->gpio_wp != -ENOSYS)
  1114. gpio_free(host->gpio_wp);
  1115. err_gpio_wp:
  1116. if (host->gpio_cd_irq >= 0)
  1117. free_irq(host->gpio_cd_irq, host);
  1118. if (host->gpio_cd != -ENOSYS)
  1119. gpio_free(host->gpio_cd);
  1120. err_gpio_cd:
  1121. iounmap(host->base);
  1122. clk_disable:
  1123. clk_disable(host->clk);
  1124. clk_free:
  1125. clk_put(host->clk);
  1126. host_free:
  1127. mmc_free_host(mmc);
  1128. rel_regions:
  1129. amba_release_regions(dev);
  1130. out:
  1131. return ret;
  1132. }
  1133. static int __devexit mmci_remove(struct amba_device *dev)
  1134. {
  1135. struct mmc_host *mmc = amba_get_drvdata(dev);
  1136. amba_set_drvdata(dev, NULL);
  1137. if (mmc) {
  1138. struct mmci_host *host = mmc_priv(mmc);
  1139. /*
  1140. * Undo pm_runtime_put() in probe. We use the _sync
  1141. * version here so that we can access the primecell.
  1142. */
  1143. pm_runtime_get_sync(&dev->dev);
  1144. mmc_remove_host(mmc);
  1145. writel(0, host->base + MMCIMASK0);
  1146. writel(0, host->base + MMCIMASK1);
  1147. writel(0, host->base + MMCICOMMAND);
  1148. writel(0, host->base + MMCIDATACTRL);
  1149. mmci_dma_release(host);
  1150. free_irq(dev->irq[0], host);
  1151. if (!host->singleirq)
  1152. free_irq(dev->irq[1], host);
  1153. if (host->gpio_wp != -ENOSYS)
  1154. gpio_free(host->gpio_wp);
  1155. if (host->gpio_cd_irq >= 0)
  1156. free_irq(host->gpio_cd_irq, host);
  1157. if (host->gpio_cd != -ENOSYS)
  1158. gpio_free(host->gpio_cd);
  1159. iounmap(host->base);
  1160. clk_disable(host->clk);
  1161. clk_put(host->clk);
  1162. if (host->vcc)
  1163. mmc_regulator_set_ocr(mmc, host->vcc, 0);
  1164. regulator_put(host->vcc);
  1165. mmc_free_host(mmc);
  1166. amba_release_regions(dev);
  1167. }
  1168. return 0;
  1169. }
  1170. #ifdef CONFIG_PM
  1171. static int mmci_suspend(struct amba_device *dev, pm_message_t state)
  1172. {
  1173. struct mmc_host *mmc = amba_get_drvdata(dev);
  1174. int ret = 0;
  1175. if (mmc) {
  1176. struct mmci_host *host = mmc_priv(mmc);
  1177. ret = mmc_suspend_host(mmc);
  1178. if (ret == 0)
  1179. writel(0, host->base + MMCIMASK0);
  1180. }
  1181. return ret;
  1182. }
  1183. static int mmci_resume(struct amba_device *dev)
  1184. {
  1185. struct mmc_host *mmc = amba_get_drvdata(dev);
  1186. int ret = 0;
  1187. if (mmc) {
  1188. struct mmci_host *host = mmc_priv(mmc);
  1189. writel(MCI_IRQENABLE, host->base + MMCIMASK0);
  1190. ret = mmc_resume_host(mmc);
  1191. }
  1192. return ret;
  1193. }
  1194. #else
  1195. #define mmci_suspend NULL
  1196. #define mmci_resume NULL
  1197. #endif
  1198. static struct amba_id mmci_ids[] = {
  1199. {
  1200. .id = 0x00041180,
  1201. .mask = 0xff0fffff,
  1202. .data = &variant_arm,
  1203. },
  1204. {
  1205. .id = 0x01041180,
  1206. .mask = 0xff0fffff,
  1207. .data = &variant_arm_extended_fifo,
  1208. },
  1209. {
  1210. .id = 0x00041181,
  1211. .mask = 0x000fffff,
  1212. .data = &variant_arm,
  1213. },
  1214. /* ST Micro variants */
  1215. {
  1216. .id = 0x00180180,
  1217. .mask = 0x00ffffff,
  1218. .data = &variant_u300,
  1219. },
  1220. {
  1221. .id = 0x00280180,
  1222. .mask = 0x00ffffff,
  1223. .data = &variant_u300,
  1224. },
  1225. {
  1226. .id = 0x00480180,
  1227. .mask = 0xf0ffffff,
  1228. .data = &variant_ux500,
  1229. },
  1230. {
  1231. .id = 0x10480180,
  1232. .mask = 0xf0ffffff,
  1233. .data = &variant_ux500v2,
  1234. },
  1235. { 0, 0 },
  1236. };
  1237. static struct amba_driver mmci_driver = {
  1238. .drv = {
  1239. .name = DRIVER_NAME,
  1240. },
  1241. .probe = mmci_probe,
  1242. .remove = __devexit_p(mmci_remove),
  1243. .suspend = mmci_suspend,
  1244. .resume = mmci_resume,
  1245. .id_table = mmci_ids,
  1246. };
  1247. static int __init mmci_init(void)
  1248. {
  1249. return amba_driver_register(&mmci_driver);
  1250. }
  1251. static void __exit mmci_exit(void)
  1252. {
  1253. amba_driver_unregister(&mmci_driver);
  1254. }
  1255. module_init(mmci_init);
  1256. module_exit(mmci_exit);
  1257. module_param(fmax, uint, 0444);
  1258. MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
  1259. MODULE_LICENSE("GPL");