sh_mmcif.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. /*
  2. * MMCIF eMMC driver.
  3. *
  4. * Copyright (C) 2010 Renesas Solutions Corp.
  5. * Yusuke Goda <yusuke.goda.sx@renesas.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License.
  10. *
  11. *
  12. * TODO
  13. * 1. DMA
  14. * 2. Power management
  15. * 3. Handle MMC errors better
  16. *
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/mmc/card.h>
  24. #include <linux/mmc/core.h>
  25. #include <linux/mmc/host.h>
  26. #include <linux/mmc/mmc.h>
  27. #include <linux/mmc/sdio.h>
  28. #include <linux/mmc/sh_mmcif.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/module.h>
  34. #define DRIVER_NAME "sh_mmcif"
  35. #define DRIVER_VERSION "2010-04-28"
  36. /* CE_CMD_SET */
  37. #define CMD_MASK 0x3f000000
  38. #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
  39. #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
  40. #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
  41. #define CMD_SET_RBSY (1 << 21) /* R1b */
  42. #define CMD_SET_CCSEN (1 << 20)
  43. #define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
  44. #define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
  45. #define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
  46. #define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
  47. #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
  48. #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
  49. #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
  50. #define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
  51. #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
  52. #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
  53. #define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
  54. #define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
  55. #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
  56. #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
  57. #define CMD_SET_CCSH (1 << 5)
  58. #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
  59. #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
  60. #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
  61. /* CE_CMD_CTRL */
  62. #define CMD_CTRL_BREAK (1 << 0)
  63. /* CE_BLOCK_SET */
  64. #define BLOCK_SIZE_MASK 0x0000ffff
  65. /* CE_INT */
  66. #define INT_CCSDE (1 << 29)
  67. #define INT_CMD12DRE (1 << 26)
  68. #define INT_CMD12RBE (1 << 25)
  69. #define INT_CMD12CRE (1 << 24)
  70. #define INT_DTRANE (1 << 23)
  71. #define INT_BUFRE (1 << 22)
  72. #define INT_BUFWEN (1 << 21)
  73. #define INT_BUFREN (1 << 20)
  74. #define INT_CCSRCV (1 << 19)
  75. #define INT_RBSYE (1 << 17)
  76. #define INT_CRSPE (1 << 16)
  77. #define INT_CMDVIO (1 << 15)
  78. #define INT_BUFVIO (1 << 14)
  79. #define INT_WDATERR (1 << 11)
  80. #define INT_RDATERR (1 << 10)
  81. #define INT_RIDXERR (1 << 9)
  82. #define INT_RSPERR (1 << 8)
  83. #define INT_CCSTO (1 << 5)
  84. #define INT_CRCSTO (1 << 4)
  85. #define INT_WDATTO (1 << 3)
  86. #define INT_RDATTO (1 << 2)
  87. #define INT_RBSYTO (1 << 1)
  88. #define INT_RSPTO (1 << 0)
  89. #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
  90. INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
  91. INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
  92. INT_RDATTO | INT_RBSYTO | INT_RSPTO)
  93. /* CE_INT_MASK */
  94. #define MASK_ALL 0x00000000
  95. #define MASK_MCCSDE (1 << 29)
  96. #define MASK_MCMD12DRE (1 << 26)
  97. #define MASK_MCMD12RBE (1 << 25)
  98. #define MASK_MCMD12CRE (1 << 24)
  99. #define MASK_MDTRANE (1 << 23)
  100. #define MASK_MBUFRE (1 << 22)
  101. #define MASK_MBUFWEN (1 << 21)
  102. #define MASK_MBUFREN (1 << 20)
  103. #define MASK_MCCSRCV (1 << 19)
  104. #define MASK_MRBSYE (1 << 17)
  105. #define MASK_MCRSPE (1 << 16)
  106. #define MASK_MCMDVIO (1 << 15)
  107. #define MASK_MBUFVIO (1 << 14)
  108. #define MASK_MWDATERR (1 << 11)
  109. #define MASK_MRDATERR (1 << 10)
  110. #define MASK_MRIDXERR (1 << 9)
  111. #define MASK_MRSPERR (1 << 8)
  112. #define MASK_MCCSTO (1 << 5)
  113. #define MASK_MCRCSTO (1 << 4)
  114. #define MASK_MWDATTO (1 << 3)
  115. #define MASK_MRDATTO (1 << 2)
  116. #define MASK_MRBSYTO (1 << 1)
  117. #define MASK_MRSPTO (1 << 0)
  118. /* CE_HOST_STS1 */
  119. #define STS1_CMDSEQ (1 << 31)
  120. /* CE_HOST_STS2 */
  121. #define STS2_CRCSTE (1 << 31)
  122. #define STS2_CRC16E (1 << 30)
  123. #define STS2_AC12CRCE (1 << 29)
  124. #define STS2_RSPCRC7E (1 << 28)
  125. #define STS2_CRCSTEBE (1 << 27)
  126. #define STS2_RDATEBE (1 << 26)
  127. #define STS2_AC12REBE (1 << 25)
  128. #define STS2_RSPEBE (1 << 24)
  129. #define STS2_AC12IDXE (1 << 23)
  130. #define STS2_RSPIDXE (1 << 22)
  131. #define STS2_CCSTO (1 << 15)
  132. #define STS2_RDATTO (1 << 14)
  133. #define STS2_DATBSYTO (1 << 13)
  134. #define STS2_CRCSTTO (1 << 12)
  135. #define STS2_AC12BSYTO (1 << 11)
  136. #define STS2_RSPBSYTO (1 << 10)
  137. #define STS2_AC12RSPTO (1 << 9)
  138. #define STS2_RSPTO (1 << 8)
  139. #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
  140. STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
  141. #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
  142. STS2_DATBSYTO | STS2_CRCSTTO | \
  143. STS2_AC12BSYTO | STS2_RSPBSYTO | \
  144. STS2_AC12RSPTO | STS2_RSPTO)
  145. #define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
  146. #define CLKDEV_MMC_DATA 20000000 /* 20MHz */
  147. #define CLKDEV_INIT 400000 /* 400 KHz */
  148. enum mmcif_state {
  149. STATE_IDLE,
  150. STATE_REQUEST,
  151. STATE_IOS,
  152. };
  153. struct sh_mmcif_host {
  154. struct mmc_host *mmc;
  155. struct mmc_data *data;
  156. struct platform_device *pd;
  157. struct sh_dmae_slave dma_slave_tx;
  158. struct sh_dmae_slave dma_slave_rx;
  159. struct clk *hclk;
  160. unsigned int clk;
  161. int bus_width;
  162. bool sd_error;
  163. long timeout;
  164. void __iomem *addr;
  165. struct completion intr_wait;
  166. enum mmcif_state state;
  167. spinlock_t lock;
  168. bool power;
  169. bool card_present;
  170. /* DMA support */
  171. struct dma_chan *chan_rx;
  172. struct dma_chan *chan_tx;
  173. struct completion dma_complete;
  174. bool dma_active;
  175. };
  176. static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
  177. unsigned int reg, u32 val)
  178. {
  179. writel(val | readl(host->addr + reg), host->addr + reg);
  180. }
  181. static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
  182. unsigned int reg, u32 val)
  183. {
  184. writel(~val & readl(host->addr + reg), host->addr + reg);
  185. }
  186. static void mmcif_dma_complete(void *arg)
  187. {
  188. struct sh_mmcif_host *host = arg;
  189. dev_dbg(&host->pd->dev, "Command completed\n");
  190. if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
  191. dev_name(&host->pd->dev)))
  192. return;
  193. if (host->data->flags & MMC_DATA_READ)
  194. dma_unmap_sg(host->chan_rx->device->dev,
  195. host->data->sg, host->data->sg_len,
  196. DMA_FROM_DEVICE);
  197. else
  198. dma_unmap_sg(host->chan_tx->device->dev,
  199. host->data->sg, host->data->sg_len,
  200. DMA_TO_DEVICE);
  201. complete(&host->dma_complete);
  202. }
  203. static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
  204. {
  205. struct scatterlist *sg = host->data->sg;
  206. struct dma_async_tx_descriptor *desc = NULL;
  207. struct dma_chan *chan = host->chan_rx;
  208. dma_cookie_t cookie = -EINVAL;
  209. int ret;
  210. ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
  211. DMA_FROM_DEVICE);
  212. if (ret > 0) {
  213. host->dma_active = true;
  214. desc = chan->device->device_prep_slave_sg(chan, sg, ret,
  215. DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  216. }
  217. if (desc) {
  218. desc->callback = mmcif_dma_complete;
  219. desc->callback_param = host;
  220. cookie = dmaengine_submit(desc);
  221. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
  222. dma_async_issue_pending(chan);
  223. }
  224. dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
  225. __func__, host->data->sg_len, ret, cookie);
  226. if (!desc) {
  227. /* DMA failed, fall back to PIO */
  228. if (ret >= 0)
  229. ret = -EIO;
  230. host->chan_rx = NULL;
  231. host->dma_active = false;
  232. dma_release_channel(chan);
  233. /* Free the Tx channel too */
  234. chan = host->chan_tx;
  235. if (chan) {
  236. host->chan_tx = NULL;
  237. dma_release_channel(chan);
  238. }
  239. dev_warn(&host->pd->dev,
  240. "DMA failed: %d, falling back to PIO\n", ret);
  241. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  242. }
  243. dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
  244. desc, cookie, host->data->sg_len);
  245. }
  246. static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
  247. {
  248. struct scatterlist *sg = host->data->sg;
  249. struct dma_async_tx_descriptor *desc = NULL;
  250. struct dma_chan *chan = host->chan_tx;
  251. dma_cookie_t cookie = -EINVAL;
  252. int ret;
  253. ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
  254. DMA_TO_DEVICE);
  255. if (ret > 0) {
  256. host->dma_active = true;
  257. desc = chan->device->device_prep_slave_sg(chan, sg, ret,
  258. DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  259. }
  260. if (desc) {
  261. desc->callback = mmcif_dma_complete;
  262. desc->callback_param = host;
  263. cookie = dmaengine_submit(desc);
  264. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
  265. dma_async_issue_pending(chan);
  266. }
  267. dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
  268. __func__, host->data->sg_len, ret, cookie);
  269. if (!desc) {
  270. /* DMA failed, fall back to PIO */
  271. if (ret >= 0)
  272. ret = -EIO;
  273. host->chan_tx = NULL;
  274. host->dma_active = false;
  275. dma_release_channel(chan);
  276. /* Free the Rx channel too */
  277. chan = host->chan_rx;
  278. if (chan) {
  279. host->chan_rx = NULL;
  280. dma_release_channel(chan);
  281. }
  282. dev_warn(&host->pd->dev,
  283. "DMA failed: %d, falling back to PIO\n", ret);
  284. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  285. }
  286. dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
  287. desc, cookie);
  288. }
  289. static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
  290. {
  291. dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
  292. chan->private = arg;
  293. return true;
  294. }
  295. static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
  296. struct sh_mmcif_plat_data *pdata)
  297. {
  298. struct sh_dmae_slave *tx, *rx;
  299. host->dma_active = false;
  300. /* We can only either use DMA for both Tx and Rx or not use it at all */
  301. if (pdata->dma) {
  302. dev_warn(&host->pd->dev,
  303. "Update your platform to use embedded DMA slave IDs\n");
  304. tx = &pdata->dma->chan_priv_tx;
  305. rx = &pdata->dma->chan_priv_rx;
  306. } else {
  307. tx = &host->dma_slave_tx;
  308. tx->slave_id = pdata->slave_id_tx;
  309. rx = &host->dma_slave_rx;
  310. rx->slave_id = pdata->slave_id_rx;
  311. }
  312. if (tx->slave_id > 0 && rx->slave_id > 0) {
  313. dma_cap_mask_t mask;
  314. dma_cap_zero(mask);
  315. dma_cap_set(DMA_SLAVE, mask);
  316. host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
  317. dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
  318. host->chan_tx);
  319. if (!host->chan_tx)
  320. return;
  321. host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
  322. dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
  323. host->chan_rx);
  324. if (!host->chan_rx) {
  325. dma_release_channel(host->chan_tx);
  326. host->chan_tx = NULL;
  327. return;
  328. }
  329. init_completion(&host->dma_complete);
  330. }
  331. }
  332. static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
  333. {
  334. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  335. /* Descriptors are freed automatically */
  336. if (host->chan_tx) {
  337. struct dma_chan *chan = host->chan_tx;
  338. host->chan_tx = NULL;
  339. dma_release_channel(chan);
  340. }
  341. if (host->chan_rx) {
  342. struct dma_chan *chan = host->chan_rx;
  343. host->chan_rx = NULL;
  344. dma_release_channel(chan);
  345. }
  346. host->dma_active = false;
  347. }
  348. static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
  349. {
  350. struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
  351. sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
  352. sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
  353. if (!clk)
  354. return;
  355. if (p->sup_pclk && clk == host->clk)
  356. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
  357. else
  358. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
  359. (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
  360. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
  361. }
  362. static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
  363. {
  364. u32 tmp;
  365. tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
  366. sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
  367. sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
  368. sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
  369. SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
  370. /* byte swap on */
  371. sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
  372. }
  373. static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
  374. {
  375. u32 state1, state2;
  376. int ret, timeout = 10000000;
  377. host->sd_error = false;
  378. state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
  379. state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
  380. dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
  381. dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
  382. if (state1 & STS1_CMDSEQ) {
  383. sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
  384. sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
  385. while (1) {
  386. timeout--;
  387. if (timeout < 0) {
  388. dev_err(&host->pd->dev,
  389. "Forceed end of command sequence timeout err\n");
  390. return -EIO;
  391. }
  392. if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
  393. & STS1_CMDSEQ))
  394. break;
  395. mdelay(1);
  396. }
  397. sh_mmcif_sync_reset(host);
  398. dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
  399. return -EIO;
  400. }
  401. if (state2 & STS2_CRC_ERR) {
  402. dev_dbg(&host->pd->dev, ": Happened CRC error\n");
  403. ret = -EIO;
  404. } else if (state2 & STS2_TIMEOUT_ERR) {
  405. dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
  406. ret = -ETIMEDOUT;
  407. } else {
  408. dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
  409. ret = -EIO;
  410. }
  411. return ret;
  412. }
  413. static int sh_mmcif_single_read(struct sh_mmcif_host *host,
  414. struct mmc_request *mrq)
  415. {
  416. struct mmc_data *data = mrq->data;
  417. long time;
  418. u32 blocksize, i, *p = sg_virt(data->sg);
  419. /* buf read enable */
  420. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  421. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  422. host->timeout);
  423. if (time <= 0 || host->sd_error)
  424. return sh_mmcif_error_manage(host);
  425. blocksize = (BLOCK_SIZE_MASK &
  426. sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
  427. for (i = 0; i < blocksize / 4; i++)
  428. *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
  429. /* buffer read end */
  430. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
  431. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  432. host->timeout);
  433. if (time <= 0 || host->sd_error)
  434. return sh_mmcif_error_manage(host);
  435. return 0;
  436. }
  437. static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
  438. struct mmc_request *mrq)
  439. {
  440. struct mmc_data *data = mrq->data;
  441. long time;
  442. u32 blocksize, i, j, sec, *p;
  443. blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
  444. MMCIF_CE_BLOCK_SET);
  445. for (j = 0; j < data->sg_len; j++) {
  446. p = sg_virt(data->sg);
  447. for (sec = 0; sec < data->sg->length / blocksize; sec++) {
  448. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  449. /* buf read enable */
  450. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  451. host->timeout);
  452. if (time <= 0 || host->sd_error)
  453. return sh_mmcif_error_manage(host);
  454. for (i = 0; i < blocksize / 4; i++)
  455. *p++ = sh_mmcif_readl(host->addr,
  456. MMCIF_CE_DATA);
  457. }
  458. if (j < data->sg_len - 1)
  459. data->sg++;
  460. }
  461. return 0;
  462. }
  463. static int sh_mmcif_single_write(struct sh_mmcif_host *host,
  464. struct mmc_request *mrq)
  465. {
  466. struct mmc_data *data = mrq->data;
  467. long time;
  468. u32 blocksize, i, *p = sg_virt(data->sg);
  469. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  470. /* buf write enable */
  471. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  472. host->timeout);
  473. if (time <= 0 || host->sd_error)
  474. return sh_mmcif_error_manage(host);
  475. blocksize = (BLOCK_SIZE_MASK &
  476. sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
  477. for (i = 0; i < blocksize / 4; i++)
  478. sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
  479. /* buffer write end */
  480. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
  481. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  482. host->timeout);
  483. if (time <= 0 || host->sd_error)
  484. return sh_mmcif_error_manage(host);
  485. return 0;
  486. }
  487. static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
  488. struct mmc_request *mrq)
  489. {
  490. struct mmc_data *data = mrq->data;
  491. long time;
  492. u32 i, sec, j, blocksize, *p;
  493. blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
  494. MMCIF_CE_BLOCK_SET);
  495. for (j = 0; j < data->sg_len; j++) {
  496. p = sg_virt(data->sg);
  497. for (sec = 0; sec < data->sg->length / blocksize; sec++) {
  498. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  499. /* buf write enable*/
  500. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  501. host->timeout);
  502. if (time <= 0 || host->sd_error)
  503. return sh_mmcif_error_manage(host);
  504. for (i = 0; i < blocksize / 4; i++)
  505. sh_mmcif_writel(host->addr,
  506. MMCIF_CE_DATA, *p++);
  507. }
  508. if (j < data->sg_len - 1)
  509. data->sg++;
  510. }
  511. return 0;
  512. }
  513. static void sh_mmcif_get_response(struct sh_mmcif_host *host,
  514. struct mmc_command *cmd)
  515. {
  516. if (cmd->flags & MMC_RSP_136) {
  517. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
  518. cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
  519. cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
  520. cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
  521. } else
  522. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
  523. }
  524. static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
  525. struct mmc_command *cmd)
  526. {
  527. cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
  528. }
  529. static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
  530. struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
  531. {
  532. u32 tmp = 0;
  533. /* Response Type check */
  534. switch (mmc_resp_type(cmd)) {
  535. case MMC_RSP_NONE:
  536. tmp |= CMD_SET_RTYP_NO;
  537. break;
  538. case MMC_RSP_R1:
  539. case MMC_RSP_R1B:
  540. case MMC_RSP_R3:
  541. tmp |= CMD_SET_RTYP_6B;
  542. break;
  543. case MMC_RSP_R2:
  544. tmp |= CMD_SET_RTYP_17B;
  545. break;
  546. default:
  547. dev_err(&host->pd->dev, "Unsupported response type.\n");
  548. break;
  549. }
  550. switch (opc) {
  551. /* RBSY */
  552. case MMC_SWITCH:
  553. case MMC_STOP_TRANSMISSION:
  554. case MMC_SET_WRITE_PROT:
  555. case MMC_CLR_WRITE_PROT:
  556. case MMC_ERASE:
  557. case MMC_GEN_CMD:
  558. tmp |= CMD_SET_RBSY;
  559. break;
  560. }
  561. /* WDAT / DATW */
  562. if (host->data) {
  563. tmp |= CMD_SET_WDAT;
  564. switch (host->bus_width) {
  565. case MMC_BUS_WIDTH_1:
  566. tmp |= CMD_SET_DATW_1;
  567. break;
  568. case MMC_BUS_WIDTH_4:
  569. tmp |= CMD_SET_DATW_4;
  570. break;
  571. case MMC_BUS_WIDTH_8:
  572. tmp |= CMD_SET_DATW_8;
  573. break;
  574. default:
  575. dev_err(&host->pd->dev, "Unsupported bus width.\n");
  576. break;
  577. }
  578. }
  579. /* DWEN */
  580. if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
  581. tmp |= CMD_SET_DWEN;
  582. /* CMLTE/CMD12EN */
  583. if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
  584. tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
  585. sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
  586. mrq->data->blocks << 16);
  587. }
  588. /* RIDXC[1:0] check bits */
  589. if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
  590. opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
  591. tmp |= CMD_SET_RIDXC_BITS;
  592. /* RCRC7C[1:0] check bits */
  593. if (opc == MMC_SEND_OP_COND)
  594. tmp |= CMD_SET_CRC7C_BITS;
  595. /* RCRC7C[1:0] internal CRC7 */
  596. if (opc == MMC_ALL_SEND_CID ||
  597. opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
  598. tmp |= CMD_SET_CRC7C_INTERNAL;
  599. return opc = ((opc << 24) | tmp);
  600. }
  601. static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
  602. struct mmc_request *mrq, u32 opc)
  603. {
  604. int ret;
  605. switch (opc) {
  606. case MMC_READ_MULTIPLE_BLOCK:
  607. ret = sh_mmcif_multi_read(host, mrq);
  608. break;
  609. case MMC_WRITE_MULTIPLE_BLOCK:
  610. ret = sh_mmcif_multi_write(host, mrq);
  611. break;
  612. case MMC_WRITE_BLOCK:
  613. ret = sh_mmcif_single_write(host, mrq);
  614. break;
  615. case MMC_READ_SINGLE_BLOCK:
  616. case MMC_SEND_EXT_CSD:
  617. ret = sh_mmcif_single_read(host, mrq);
  618. break;
  619. default:
  620. dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
  621. ret = -EINVAL;
  622. break;
  623. }
  624. return ret;
  625. }
  626. static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
  627. struct mmc_request *mrq, struct mmc_command *cmd)
  628. {
  629. long time;
  630. int ret = 0, mask = 0;
  631. u32 opc = cmd->opcode;
  632. switch (opc) {
  633. /* respons busy check */
  634. case MMC_SWITCH:
  635. case MMC_STOP_TRANSMISSION:
  636. case MMC_SET_WRITE_PROT:
  637. case MMC_CLR_WRITE_PROT:
  638. case MMC_ERASE:
  639. case MMC_GEN_CMD:
  640. mask = MASK_MRBSYE;
  641. break;
  642. default:
  643. mask = MASK_MCRSPE;
  644. break;
  645. }
  646. mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
  647. MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
  648. MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
  649. MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
  650. if (host->data) {
  651. sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
  652. sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
  653. mrq->data->blksz);
  654. }
  655. opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
  656. sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
  657. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
  658. /* set arg */
  659. sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
  660. /* set cmd */
  661. sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
  662. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  663. host->timeout);
  664. if (time <= 0) {
  665. cmd->error = sh_mmcif_error_manage(host);
  666. return;
  667. }
  668. if (host->sd_error) {
  669. switch (cmd->opcode) {
  670. case MMC_ALL_SEND_CID:
  671. case MMC_SELECT_CARD:
  672. case MMC_APP_CMD:
  673. cmd->error = -ETIMEDOUT;
  674. break;
  675. default:
  676. dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
  677. cmd->opcode);
  678. cmd->error = sh_mmcif_error_manage(host);
  679. break;
  680. }
  681. host->sd_error = false;
  682. return;
  683. }
  684. if (!(cmd->flags & MMC_RSP_PRESENT)) {
  685. cmd->error = 0;
  686. return;
  687. }
  688. sh_mmcif_get_response(host, cmd);
  689. if (host->data) {
  690. if (!host->dma_active) {
  691. ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
  692. } else {
  693. long time =
  694. wait_for_completion_interruptible_timeout(&host->dma_complete,
  695. host->timeout);
  696. if (!time)
  697. ret = -ETIMEDOUT;
  698. else if (time < 0)
  699. ret = time;
  700. sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
  701. BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
  702. host->dma_active = false;
  703. }
  704. if (ret < 0)
  705. mrq->data->bytes_xfered = 0;
  706. else
  707. mrq->data->bytes_xfered =
  708. mrq->data->blocks * mrq->data->blksz;
  709. }
  710. cmd->error = ret;
  711. }
  712. static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
  713. struct mmc_request *mrq, struct mmc_command *cmd)
  714. {
  715. long time;
  716. if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
  717. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
  718. else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
  719. sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
  720. else {
  721. dev_err(&host->pd->dev, "unsupported stop cmd\n");
  722. cmd->error = sh_mmcif_error_manage(host);
  723. return;
  724. }
  725. time = wait_for_completion_interruptible_timeout(&host->intr_wait,
  726. host->timeout);
  727. if (time <= 0 || host->sd_error) {
  728. cmd->error = sh_mmcif_error_manage(host);
  729. return;
  730. }
  731. sh_mmcif_get_cmd12response(host, cmd);
  732. cmd->error = 0;
  733. }
  734. static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
  735. {
  736. struct sh_mmcif_host *host = mmc_priv(mmc);
  737. unsigned long flags;
  738. spin_lock_irqsave(&host->lock, flags);
  739. if (host->state != STATE_IDLE) {
  740. spin_unlock_irqrestore(&host->lock, flags);
  741. mrq->cmd->error = -EAGAIN;
  742. mmc_request_done(mmc, mrq);
  743. return;
  744. }
  745. host->state = STATE_REQUEST;
  746. spin_unlock_irqrestore(&host->lock, flags);
  747. switch (mrq->cmd->opcode) {
  748. /* MMCIF does not support SD/SDIO command */
  749. case SD_IO_SEND_OP_COND:
  750. case MMC_APP_CMD:
  751. host->state = STATE_IDLE;
  752. mrq->cmd->error = -ETIMEDOUT;
  753. mmc_request_done(mmc, mrq);
  754. return;
  755. case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
  756. if (!mrq->data) {
  757. /* send_if_cond cmd (not support) */
  758. host->state = STATE_IDLE;
  759. mrq->cmd->error = -ETIMEDOUT;
  760. mmc_request_done(mmc, mrq);
  761. return;
  762. }
  763. break;
  764. default:
  765. break;
  766. }
  767. host->data = mrq->data;
  768. if (mrq->data) {
  769. if (mrq->data->flags & MMC_DATA_READ) {
  770. if (host->chan_rx)
  771. sh_mmcif_start_dma_rx(host);
  772. } else {
  773. if (host->chan_tx)
  774. sh_mmcif_start_dma_tx(host);
  775. }
  776. }
  777. sh_mmcif_start_cmd(host, mrq, mrq->cmd);
  778. host->data = NULL;
  779. if (!mrq->cmd->error && mrq->stop)
  780. sh_mmcif_stop_cmd(host, mrq, mrq->stop);
  781. host->state = STATE_IDLE;
  782. mmc_request_done(mmc, mrq);
  783. }
  784. static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  785. {
  786. struct sh_mmcif_host *host = mmc_priv(mmc);
  787. struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
  788. unsigned long flags;
  789. spin_lock_irqsave(&host->lock, flags);
  790. if (host->state != STATE_IDLE) {
  791. spin_unlock_irqrestore(&host->lock, flags);
  792. return;
  793. }
  794. host->state = STATE_IOS;
  795. spin_unlock_irqrestore(&host->lock, flags);
  796. if (ios->power_mode == MMC_POWER_UP) {
  797. if (!host->card_present) {
  798. /* See if we also get DMA */
  799. sh_mmcif_request_dma(host, host->pd->dev.platform_data);
  800. host->card_present = true;
  801. }
  802. } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
  803. /* clock stop */
  804. sh_mmcif_clock_control(host, 0);
  805. if (ios->power_mode == MMC_POWER_OFF) {
  806. if (host->card_present) {
  807. sh_mmcif_release_dma(host);
  808. host->card_present = false;
  809. }
  810. }
  811. if (host->power) {
  812. pm_runtime_put(&host->pd->dev);
  813. host->power = false;
  814. if (p->down_pwr)
  815. p->down_pwr(host->pd);
  816. }
  817. host->state = STATE_IDLE;
  818. return;
  819. }
  820. if (ios->clock) {
  821. if (!host->power) {
  822. if (p->set_pwr)
  823. p->set_pwr(host->pd, ios->power_mode);
  824. pm_runtime_get_sync(&host->pd->dev);
  825. host->power = true;
  826. sh_mmcif_sync_reset(host);
  827. }
  828. sh_mmcif_clock_control(host, ios->clock);
  829. }
  830. host->bus_width = ios->bus_width;
  831. host->state = STATE_IDLE;
  832. }
  833. static int sh_mmcif_get_cd(struct mmc_host *mmc)
  834. {
  835. struct sh_mmcif_host *host = mmc_priv(mmc);
  836. struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
  837. if (!p->get_cd)
  838. return -ENOSYS;
  839. else
  840. return p->get_cd(host->pd);
  841. }
  842. static struct mmc_host_ops sh_mmcif_ops = {
  843. .request = sh_mmcif_request,
  844. .set_ios = sh_mmcif_set_ios,
  845. .get_cd = sh_mmcif_get_cd,
  846. };
  847. static void sh_mmcif_detect(struct mmc_host *mmc)
  848. {
  849. mmc_detect_change(mmc, 0);
  850. }
  851. static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
  852. {
  853. struct sh_mmcif_host *host = dev_id;
  854. u32 state;
  855. int err = 0;
  856. state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
  857. if (state & INT_RBSYE) {
  858. sh_mmcif_writel(host->addr, MMCIF_CE_INT,
  859. ~(INT_RBSYE | INT_CRSPE));
  860. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
  861. } else if (state & INT_CRSPE) {
  862. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
  863. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
  864. } else if (state & INT_BUFREN) {
  865. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
  866. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
  867. } else if (state & INT_BUFWEN) {
  868. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
  869. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
  870. } else if (state & INT_CMD12DRE) {
  871. sh_mmcif_writel(host->addr, MMCIF_CE_INT,
  872. ~(INT_CMD12DRE | INT_CMD12RBE |
  873. INT_CMD12CRE | INT_BUFRE));
  874. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
  875. } else if (state & INT_BUFRE) {
  876. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
  877. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
  878. } else if (state & INT_DTRANE) {
  879. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
  880. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
  881. } else if (state & INT_CMD12RBE) {
  882. sh_mmcif_writel(host->addr, MMCIF_CE_INT,
  883. ~(INT_CMD12RBE | INT_CMD12CRE));
  884. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
  885. } else if (state & INT_ERR_STS) {
  886. /* err interrupts */
  887. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
  888. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
  889. err = 1;
  890. } else {
  891. dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
  892. sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
  893. sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
  894. err = 1;
  895. }
  896. if (err) {
  897. host->sd_error = true;
  898. dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
  899. }
  900. if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
  901. complete(&host->intr_wait);
  902. else
  903. dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
  904. return IRQ_HANDLED;
  905. }
  906. static int __devinit sh_mmcif_probe(struct platform_device *pdev)
  907. {
  908. int ret = 0, irq[2];
  909. struct mmc_host *mmc;
  910. struct sh_mmcif_host *host;
  911. struct sh_mmcif_plat_data *pd;
  912. struct resource *res;
  913. void __iomem *reg;
  914. char clk_name[8];
  915. irq[0] = platform_get_irq(pdev, 0);
  916. irq[1] = platform_get_irq(pdev, 1);
  917. if (irq[0] < 0 || irq[1] < 0) {
  918. dev_err(&pdev->dev, "Get irq error\n");
  919. return -ENXIO;
  920. }
  921. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  922. if (!res) {
  923. dev_err(&pdev->dev, "platform_get_resource error.\n");
  924. return -ENXIO;
  925. }
  926. reg = ioremap(res->start, resource_size(res));
  927. if (!reg) {
  928. dev_err(&pdev->dev, "ioremap error.\n");
  929. return -ENOMEM;
  930. }
  931. pd = pdev->dev.platform_data;
  932. if (!pd) {
  933. dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
  934. ret = -ENXIO;
  935. goto clean_up;
  936. }
  937. mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
  938. if (!mmc) {
  939. ret = -ENOMEM;
  940. goto clean_up;
  941. }
  942. host = mmc_priv(mmc);
  943. host->mmc = mmc;
  944. host->addr = reg;
  945. host->timeout = 1000;
  946. snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
  947. host->hclk = clk_get(&pdev->dev, clk_name);
  948. if (IS_ERR(host->hclk)) {
  949. dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
  950. ret = PTR_ERR(host->hclk);
  951. goto clean_up1;
  952. }
  953. clk_enable(host->hclk);
  954. host->clk = clk_get_rate(host->hclk);
  955. host->pd = pdev;
  956. init_completion(&host->intr_wait);
  957. spin_lock_init(&host->lock);
  958. mmc->ops = &sh_mmcif_ops;
  959. mmc->f_max = host->clk;
  960. /* close to 400KHz */
  961. if (mmc->f_max < 51200000)
  962. mmc->f_min = mmc->f_max / 128;
  963. else if (mmc->f_max < 102400000)
  964. mmc->f_min = mmc->f_max / 256;
  965. else
  966. mmc->f_min = mmc->f_max / 512;
  967. if (pd->ocr)
  968. mmc->ocr_avail = pd->ocr;
  969. mmc->caps = MMC_CAP_MMC_HIGHSPEED;
  970. if (pd->caps)
  971. mmc->caps |= pd->caps;
  972. mmc->max_segs = 32;
  973. mmc->max_blk_size = 512;
  974. mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
  975. mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
  976. mmc->max_seg_size = mmc->max_req_size;
  977. sh_mmcif_sync_reset(host);
  978. platform_set_drvdata(pdev, host);
  979. pm_runtime_enable(&pdev->dev);
  980. host->power = false;
  981. ret = pm_runtime_resume(&pdev->dev);
  982. if (ret < 0)
  983. goto clean_up2;
  984. mmc_add_host(mmc);
  985. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  986. ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
  987. if (ret) {
  988. dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
  989. goto clean_up3;
  990. }
  991. ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
  992. if (ret) {
  993. free_irq(irq[0], host);
  994. dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
  995. goto clean_up3;
  996. }
  997. sh_mmcif_detect(host->mmc);
  998. dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
  999. dev_dbg(&pdev->dev, "chip ver H'%04x\n",
  1000. sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
  1001. return ret;
  1002. clean_up3:
  1003. mmc_remove_host(mmc);
  1004. pm_runtime_suspend(&pdev->dev);
  1005. clean_up2:
  1006. pm_runtime_disable(&pdev->dev);
  1007. clk_disable(host->hclk);
  1008. clean_up1:
  1009. mmc_free_host(mmc);
  1010. clean_up:
  1011. if (reg)
  1012. iounmap(reg);
  1013. return ret;
  1014. }
  1015. static int __devexit sh_mmcif_remove(struct platform_device *pdev)
  1016. {
  1017. struct sh_mmcif_host *host = platform_get_drvdata(pdev);
  1018. int irq[2];
  1019. pm_runtime_get_sync(&pdev->dev);
  1020. mmc_remove_host(host->mmc);
  1021. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  1022. if (host->addr)
  1023. iounmap(host->addr);
  1024. irq[0] = platform_get_irq(pdev, 0);
  1025. irq[1] = platform_get_irq(pdev, 1);
  1026. free_irq(irq[0], host);
  1027. free_irq(irq[1], host);
  1028. platform_set_drvdata(pdev, NULL);
  1029. clk_disable(host->hclk);
  1030. mmc_free_host(host->mmc);
  1031. pm_runtime_put_sync(&pdev->dev);
  1032. pm_runtime_disable(&pdev->dev);
  1033. return 0;
  1034. }
  1035. #ifdef CONFIG_PM
  1036. static int sh_mmcif_suspend(struct device *dev)
  1037. {
  1038. struct platform_device *pdev = to_platform_device(dev);
  1039. struct sh_mmcif_host *host = platform_get_drvdata(pdev);
  1040. int ret = mmc_suspend_host(host->mmc);
  1041. if (!ret) {
  1042. sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
  1043. clk_disable(host->hclk);
  1044. }
  1045. return ret;
  1046. }
  1047. static int sh_mmcif_resume(struct device *dev)
  1048. {
  1049. struct platform_device *pdev = to_platform_device(dev);
  1050. struct sh_mmcif_host *host = platform_get_drvdata(pdev);
  1051. clk_enable(host->hclk);
  1052. return mmc_resume_host(host->mmc);
  1053. }
  1054. #else
  1055. #define sh_mmcif_suspend NULL
  1056. #define sh_mmcif_resume NULL
  1057. #endif /* CONFIG_PM */
  1058. static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
  1059. .suspend = sh_mmcif_suspend,
  1060. .resume = sh_mmcif_resume,
  1061. };
  1062. static struct platform_driver sh_mmcif_driver = {
  1063. .probe = sh_mmcif_probe,
  1064. .remove = sh_mmcif_remove,
  1065. .driver = {
  1066. .name = DRIVER_NAME,
  1067. .pm = &sh_mmcif_dev_pm_ops,
  1068. },
  1069. };
  1070. static int __init sh_mmcif_init(void)
  1071. {
  1072. return platform_driver_register(&sh_mmcif_driver);
  1073. }
  1074. static void __exit sh_mmcif_exit(void)
  1075. {
  1076. platform_driver_unregister(&sh_mmcif_driver);
  1077. }
  1078. module_init(sh_mmcif_init);
  1079. module_exit(sh_mmcif_exit);
  1080. MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
  1081. MODULE_LICENSE("GPL");
  1082. MODULE_ALIAS("platform:" DRIVER_NAME);
  1083. MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");