dw_mmc.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435
  1. /*
  2. * Synopsys DesignWare Multimedia Card Interface driver
  3. * (Based on NXP driver for lpc 31xx)
  4. *
  5. * Copyright (C) 2009 NXP Semiconductors
  6. * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/blkdev.h>
  14. #include <linux/clk.h>
  15. #include <linux/debugfs.h>
  16. #include <linux/device.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/ioport.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/slab.h>
  26. #include <linux/stat.h>
  27. #include <linux/delay.h>
  28. #include <linux/irq.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/mmc/mmc.h>
  31. #include <linux/mmc/dw_mmc.h>
  32. #include <linux/bitops.h>
  33. #include <linux/regulator/consumer.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/of.h>
  36. #include "dw_mmc.h"
  37. /* Common flag combinations */
  38. #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
  39. SDMMC_INT_HTO | SDMMC_INT_SBE | \
  40. SDMMC_INT_EBE)
  41. #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
  42. SDMMC_INT_RESP_ERR)
  43. #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
  44. DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
  45. #define DW_MCI_SEND_STATUS 1
  46. #define DW_MCI_RECV_STATUS 2
  47. #define DW_MCI_DMA_THRESHOLD 16
  48. #ifdef CONFIG_MMC_DW_IDMAC
  49. struct idmac_desc {
  50. u32 des0; /* Control Descriptor */
  51. #define IDMAC_DES0_DIC BIT(1)
  52. #define IDMAC_DES0_LD BIT(2)
  53. #define IDMAC_DES0_FD BIT(3)
  54. #define IDMAC_DES0_CH BIT(4)
  55. #define IDMAC_DES0_ER BIT(5)
  56. #define IDMAC_DES0_CES BIT(30)
  57. #define IDMAC_DES0_OWN BIT(31)
  58. u32 des1; /* Buffer sizes */
  59. #define IDMAC_SET_BUFFER1_SIZE(d, s) \
  60. ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
  61. u32 des2; /* buffer 1 physical address */
  62. u32 des3; /* buffer 2 physical address */
  63. };
  64. #endif /* CONFIG_MMC_DW_IDMAC */
  65. /**
  66. * struct dw_mci_slot - MMC slot state
  67. * @mmc: The mmc_host representing this slot.
  68. * @host: The MMC controller this slot is using.
  69. * @ctype: Card type for this slot.
  70. * @mrq: mmc_request currently being processed or waiting to be
  71. * processed, or NULL when the slot is idle.
  72. * @queue_node: List node for placing this node in the @queue list of
  73. * &struct dw_mci.
  74. * @clock: Clock rate configured by set_ios(). Protected by host->lock.
  75. * @flags: Random state bits associated with the slot.
  76. * @id: Number of this slot.
  77. * @last_detect_state: Most recently observed card detect state.
  78. */
  79. struct dw_mci_slot {
  80. struct mmc_host *mmc;
  81. struct dw_mci *host;
  82. u32 ctype;
  83. struct mmc_request *mrq;
  84. struct list_head queue_node;
  85. unsigned int clock;
  86. unsigned long flags;
  87. #define DW_MMC_CARD_PRESENT 0
  88. #define DW_MMC_CARD_NEED_INIT 1
  89. int id;
  90. int last_detect_state;
  91. };
  92. #if defined(CONFIG_DEBUG_FS)
  93. static int dw_mci_req_show(struct seq_file *s, void *v)
  94. {
  95. struct dw_mci_slot *slot = s->private;
  96. struct mmc_request *mrq;
  97. struct mmc_command *cmd;
  98. struct mmc_command *stop;
  99. struct mmc_data *data;
  100. /* Make sure we get a consistent snapshot */
  101. spin_lock_bh(&slot->host->lock);
  102. mrq = slot->mrq;
  103. if (mrq) {
  104. cmd = mrq->cmd;
  105. data = mrq->data;
  106. stop = mrq->stop;
  107. if (cmd)
  108. seq_printf(s,
  109. "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  110. cmd->opcode, cmd->arg, cmd->flags,
  111. cmd->resp[0], cmd->resp[1], cmd->resp[2],
  112. cmd->resp[2], cmd->error);
  113. if (data)
  114. seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
  115. data->bytes_xfered, data->blocks,
  116. data->blksz, data->flags, data->error);
  117. if (stop)
  118. seq_printf(s,
  119. "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
  120. stop->opcode, stop->arg, stop->flags,
  121. stop->resp[0], stop->resp[1], stop->resp[2],
  122. stop->resp[2], stop->error);
  123. }
  124. spin_unlock_bh(&slot->host->lock);
  125. return 0;
  126. }
  127. static int dw_mci_req_open(struct inode *inode, struct file *file)
  128. {
  129. return single_open(file, dw_mci_req_show, inode->i_private);
  130. }
  131. static const struct file_operations dw_mci_req_fops = {
  132. .owner = THIS_MODULE,
  133. .open = dw_mci_req_open,
  134. .read = seq_read,
  135. .llseek = seq_lseek,
  136. .release = single_release,
  137. };
  138. static int dw_mci_regs_show(struct seq_file *s, void *v)
  139. {
  140. seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
  141. seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
  142. seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
  143. seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
  144. seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
  145. seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
  146. return 0;
  147. }
  148. static int dw_mci_regs_open(struct inode *inode, struct file *file)
  149. {
  150. return single_open(file, dw_mci_regs_show, inode->i_private);
  151. }
  152. static const struct file_operations dw_mci_regs_fops = {
  153. .owner = THIS_MODULE,
  154. .open = dw_mci_regs_open,
  155. .read = seq_read,
  156. .llseek = seq_lseek,
  157. .release = single_release,
  158. };
  159. static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
  160. {
  161. struct mmc_host *mmc = slot->mmc;
  162. struct dw_mci *host = slot->host;
  163. struct dentry *root;
  164. struct dentry *node;
  165. root = mmc->debugfs_root;
  166. if (!root)
  167. return;
  168. node = debugfs_create_file("regs", S_IRUSR, root, host,
  169. &dw_mci_regs_fops);
  170. if (!node)
  171. goto err;
  172. node = debugfs_create_file("req", S_IRUSR, root, slot,
  173. &dw_mci_req_fops);
  174. if (!node)
  175. goto err;
  176. node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
  177. if (!node)
  178. goto err;
  179. node = debugfs_create_x32("pending_events", S_IRUSR, root,
  180. (u32 *)&host->pending_events);
  181. if (!node)
  182. goto err;
  183. node = debugfs_create_x32("completed_events", S_IRUSR, root,
  184. (u32 *)&host->completed_events);
  185. if (!node)
  186. goto err;
  187. return;
  188. err:
  189. dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
  190. }
  191. #endif /* defined(CONFIG_DEBUG_FS) */
  192. static void dw_mci_set_timeout(struct dw_mci *host)
  193. {
  194. /* timeout (maximum) */
  195. mci_writel(host, TMOUT, 0xffffffff);
  196. }
  197. static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
  198. {
  199. struct mmc_data *data;
  200. struct dw_mci_slot *slot = mmc_priv(mmc);
  201. const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
  202. u32 cmdr;
  203. cmd->error = -EINPROGRESS;
  204. cmdr = cmd->opcode;
  205. if (cmdr == MMC_STOP_TRANSMISSION)
  206. cmdr |= SDMMC_CMD_STOP;
  207. else
  208. cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
  209. if (cmd->flags & MMC_RSP_PRESENT) {
  210. /* We expect a response, so set this bit */
  211. cmdr |= SDMMC_CMD_RESP_EXP;
  212. if (cmd->flags & MMC_RSP_136)
  213. cmdr |= SDMMC_CMD_RESP_LONG;
  214. }
  215. if (cmd->flags & MMC_RSP_CRC)
  216. cmdr |= SDMMC_CMD_RESP_CRC;
  217. data = cmd->data;
  218. if (data) {
  219. cmdr |= SDMMC_CMD_DAT_EXP;
  220. if (data->flags & MMC_DATA_STREAM)
  221. cmdr |= SDMMC_CMD_STRM_MODE;
  222. if (data->flags & MMC_DATA_WRITE)
  223. cmdr |= SDMMC_CMD_DAT_WR;
  224. }
  225. if (drv_data && drv_data->prepare_command)
  226. drv_data->prepare_command(slot->host, &cmdr);
  227. return cmdr;
  228. }
  229. static void dw_mci_start_command(struct dw_mci *host,
  230. struct mmc_command *cmd, u32 cmd_flags)
  231. {
  232. host->cmd = cmd;
  233. dev_vdbg(host->dev,
  234. "start command: ARGR=0x%08x CMDR=0x%08x\n",
  235. cmd->arg, cmd_flags);
  236. mci_writel(host, CMDARG, cmd->arg);
  237. wmb();
  238. mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
  239. }
  240. static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
  241. {
  242. dw_mci_start_command(host, data->stop, host->stop_cmdr);
  243. }
  244. /* DMA interface functions */
  245. static void dw_mci_stop_dma(struct dw_mci *host)
  246. {
  247. if (host->using_dma) {
  248. host->dma_ops->stop(host);
  249. host->dma_ops->cleanup(host);
  250. } else {
  251. /* Data transfer was stopped by the interrupt handler */
  252. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  253. }
  254. }
  255. static int dw_mci_get_dma_dir(struct mmc_data *data)
  256. {
  257. if (data->flags & MMC_DATA_WRITE)
  258. return DMA_TO_DEVICE;
  259. else
  260. return DMA_FROM_DEVICE;
  261. }
  262. #ifdef CONFIG_MMC_DW_IDMAC
  263. static void dw_mci_dma_cleanup(struct dw_mci *host)
  264. {
  265. struct mmc_data *data = host->data;
  266. if (data)
  267. if (!data->host_cookie)
  268. dma_unmap_sg(host->dev,
  269. data->sg,
  270. data->sg_len,
  271. dw_mci_get_dma_dir(data));
  272. }
  273. static void dw_mci_idmac_stop_dma(struct dw_mci *host)
  274. {
  275. u32 temp;
  276. /* Disable and reset the IDMAC interface */
  277. temp = mci_readl(host, CTRL);
  278. temp &= ~SDMMC_CTRL_USE_IDMAC;
  279. temp |= SDMMC_CTRL_DMA_RESET;
  280. mci_writel(host, CTRL, temp);
  281. /* Stop the IDMAC running */
  282. temp = mci_readl(host, BMOD);
  283. temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
  284. mci_writel(host, BMOD, temp);
  285. }
  286. static void dw_mci_idmac_complete_dma(struct dw_mci *host)
  287. {
  288. struct mmc_data *data = host->data;
  289. dev_vdbg(host->dev, "DMA complete\n");
  290. host->dma_ops->cleanup(host);
  291. /*
  292. * If the card was removed, data will be NULL. No point in trying to
  293. * send the stop command or waiting for NBUSY in this case.
  294. */
  295. if (data) {
  296. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  297. tasklet_schedule(&host->tasklet);
  298. }
  299. }
  300. static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
  301. unsigned int sg_len)
  302. {
  303. int i;
  304. struct idmac_desc *desc = host->sg_cpu;
  305. for (i = 0; i < sg_len; i++, desc++) {
  306. unsigned int length = sg_dma_len(&data->sg[i]);
  307. u32 mem_addr = sg_dma_address(&data->sg[i]);
  308. /* Set the OWN bit and disable interrupts for this descriptor */
  309. desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
  310. /* Buffer length */
  311. IDMAC_SET_BUFFER1_SIZE(desc, length);
  312. /* Physical address to DMA to/from */
  313. desc->des2 = mem_addr;
  314. }
  315. /* Set first descriptor */
  316. desc = host->sg_cpu;
  317. desc->des0 |= IDMAC_DES0_FD;
  318. /* Set last descriptor */
  319. desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
  320. desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
  321. desc->des0 |= IDMAC_DES0_LD;
  322. wmb();
  323. }
  324. static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
  325. {
  326. u32 temp;
  327. dw_mci_translate_sglist(host, host->data, sg_len);
  328. /* Select IDMAC interface */
  329. temp = mci_readl(host, CTRL);
  330. temp |= SDMMC_CTRL_USE_IDMAC;
  331. mci_writel(host, CTRL, temp);
  332. wmb();
  333. /* Enable the IDMAC */
  334. temp = mci_readl(host, BMOD);
  335. temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
  336. mci_writel(host, BMOD, temp);
  337. /* Start it running */
  338. mci_writel(host, PLDMND, 1);
  339. }
  340. static int dw_mci_idmac_init(struct dw_mci *host)
  341. {
  342. struct idmac_desc *p;
  343. int i;
  344. /* Number of descriptors in the ring buffer */
  345. host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
  346. /* Forward link the descriptor list */
  347. for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
  348. p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
  349. /* Set the last descriptor as the end-of-ring descriptor */
  350. p->des3 = host->sg_dma;
  351. p->des0 = IDMAC_DES0_ER;
  352. mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
  353. /* Mask out interrupts - get Tx & Rx complete only */
  354. mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
  355. SDMMC_IDMAC_INT_TI);
  356. /* Set the descriptor base address */
  357. mci_writel(host, DBADDR, host->sg_dma);
  358. return 0;
  359. }
  360. static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
  361. .init = dw_mci_idmac_init,
  362. .start = dw_mci_idmac_start_dma,
  363. .stop = dw_mci_idmac_stop_dma,
  364. .complete = dw_mci_idmac_complete_dma,
  365. .cleanup = dw_mci_dma_cleanup,
  366. };
  367. #endif /* CONFIG_MMC_DW_IDMAC */
  368. static int dw_mci_pre_dma_transfer(struct dw_mci *host,
  369. struct mmc_data *data,
  370. bool next)
  371. {
  372. struct scatterlist *sg;
  373. unsigned int i, sg_len;
  374. if (!next && data->host_cookie)
  375. return data->host_cookie;
  376. /*
  377. * We don't do DMA on "complex" transfers, i.e. with
  378. * non-word-aligned buffers or lengths. Also, we don't bother
  379. * with all the DMA setup overhead for short transfers.
  380. */
  381. if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
  382. return -EINVAL;
  383. if (data->blksz & 3)
  384. return -EINVAL;
  385. for_each_sg(data->sg, sg, data->sg_len, i) {
  386. if (sg->offset & 3 || sg->length & 3)
  387. return -EINVAL;
  388. }
  389. sg_len = dma_map_sg(host->dev,
  390. data->sg,
  391. data->sg_len,
  392. dw_mci_get_dma_dir(data));
  393. if (sg_len == 0)
  394. return -EINVAL;
  395. if (next)
  396. data->host_cookie = sg_len;
  397. return sg_len;
  398. }
  399. static void dw_mci_pre_req(struct mmc_host *mmc,
  400. struct mmc_request *mrq,
  401. bool is_first_req)
  402. {
  403. struct dw_mci_slot *slot = mmc_priv(mmc);
  404. struct mmc_data *data = mrq->data;
  405. if (!slot->host->use_dma || !data)
  406. return;
  407. if (data->host_cookie) {
  408. data->host_cookie = 0;
  409. return;
  410. }
  411. if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
  412. data->host_cookie = 0;
  413. }
  414. static void dw_mci_post_req(struct mmc_host *mmc,
  415. struct mmc_request *mrq,
  416. int err)
  417. {
  418. struct dw_mci_slot *slot = mmc_priv(mmc);
  419. struct mmc_data *data = mrq->data;
  420. if (!slot->host->use_dma || !data)
  421. return;
  422. if (data->host_cookie)
  423. dma_unmap_sg(slot->host->dev,
  424. data->sg,
  425. data->sg_len,
  426. dw_mci_get_dma_dir(data));
  427. data->host_cookie = 0;
  428. }
  429. static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
  430. {
  431. int sg_len;
  432. u32 temp;
  433. host->using_dma = 0;
  434. /* If we don't have a channel, we can't do DMA */
  435. if (!host->use_dma)
  436. return -ENODEV;
  437. sg_len = dw_mci_pre_dma_transfer(host, data, 0);
  438. if (sg_len < 0) {
  439. host->dma_ops->stop(host);
  440. return sg_len;
  441. }
  442. host->using_dma = 1;
  443. dev_vdbg(host->dev,
  444. "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
  445. (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
  446. sg_len);
  447. /* Enable the DMA interface */
  448. temp = mci_readl(host, CTRL);
  449. temp |= SDMMC_CTRL_DMA_ENABLE;
  450. mci_writel(host, CTRL, temp);
  451. /* Disable RX/TX IRQs, let DMA handle it */
  452. temp = mci_readl(host, INTMASK);
  453. temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
  454. mci_writel(host, INTMASK, temp);
  455. host->dma_ops->start(host, sg_len);
  456. return 0;
  457. }
  458. static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
  459. {
  460. u32 temp;
  461. data->error = -EINPROGRESS;
  462. WARN_ON(host->data);
  463. host->sg = NULL;
  464. host->data = data;
  465. if (data->flags & MMC_DATA_READ)
  466. host->dir_status = DW_MCI_RECV_STATUS;
  467. else
  468. host->dir_status = DW_MCI_SEND_STATUS;
  469. if (dw_mci_submit_data_dma(host, data)) {
  470. int flags = SG_MITER_ATOMIC;
  471. if (host->data->flags & MMC_DATA_READ)
  472. flags |= SG_MITER_TO_SG;
  473. else
  474. flags |= SG_MITER_FROM_SG;
  475. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  476. host->sg = data->sg;
  477. host->part_buf_start = 0;
  478. host->part_buf_count = 0;
  479. mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
  480. temp = mci_readl(host, INTMASK);
  481. temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
  482. mci_writel(host, INTMASK, temp);
  483. temp = mci_readl(host, CTRL);
  484. temp &= ~SDMMC_CTRL_DMA_ENABLE;
  485. mci_writel(host, CTRL, temp);
  486. }
  487. }
  488. static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
  489. {
  490. struct dw_mci *host = slot->host;
  491. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  492. unsigned int cmd_status = 0;
  493. mci_writel(host, CMDARG, arg);
  494. wmb();
  495. mci_writel(host, CMD, SDMMC_CMD_START | cmd);
  496. while (time_before(jiffies, timeout)) {
  497. cmd_status = mci_readl(host, CMD);
  498. if (!(cmd_status & SDMMC_CMD_START))
  499. return;
  500. }
  501. dev_err(&slot->mmc->class_dev,
  502. "Timeout sending command (cmd %#x arg %#x status %#x)\n",
  503. cmd, arg, cmd_status);
  504. }
  505. static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
  506. {
  507. struct dw_mci *host = slot->host;
  508. u32 div;
  509. u32 clk_en_a;
  510. if (slot->clock != host->current_speed || force_clkinit) {
  511. div = host->bus_hz / slot->clock;
  512. if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
  513. /*
  514. * move the + 1 after the divide to prevent
  515. * over-clocking the card.
  516. */
  517. div += 1;
  518. div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
  519. dev_info(&slot->mmc->class_dev,
  520. "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
  521. " div = %d)\n", slot->id, host->bus_hz, slot->clock,
  522. div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
  523. /* disable clock */
  524. mci_writel(host, CLKENA, 0);
  525. mci_writel(host, CLKSRC, 0);
  526. /* inform CIU */
  527. mci_send_cmd(slot,
  528. SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
  529. /* set clock to desired speed */
  530. mci_writel(host, CLKDIV, div);
  531. /* inform CIU */
  532. mci_send_cmd(slot,
  533. SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
  534. /* enable clock; only low power if no SDIO */
  535. clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
  536. if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
  537. clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
  538. mci_writel(host, CLKENA, clk_en_a);
  539. /* inform CIU */
  540. mci_send_cmd(slot,
  541. SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
  542. host->current_speed = slot->clock;
  543. }
  544. /* Set the current slot bus width */
  545. mci_writel(host, CTYPE, (slot->ctype << slot->id));
  546. }
  547. static void __dw_mci_start_request(struct dw_mci *host,
  548. struct dw_mci_slot *slot,
  549. struct mmc_command *cmd)
  550. {
  551. struct mmc_request *mrq;
  552. struct mmc_data *data;
  553. u32 cmdflags;
  554. mrq = slot->mrq;
  555. if (host->pdata->select_slot)
  556. host->pdata->select_slot(slot->id);
  557. host->cur_slot = slot;
  558. host->mrq = mrq;
  559. host->pending_events = 0;
  560. host->completed_events = 0;
  561. host->data_status = 0;
  562. data = cmd->data;
  563. if (data) {
  564. dw_mci_set_timeout(host);
  565. mci_writel(host, BYTCNT, data->blksz*data->blocks);
  566. mci_writel(host, BLKSIZ, data->blksz);
  567. }
  568. cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
  569. /* this is the first command, send the initialization clock */
  570. if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
  571. cmdflags |= SDMMC_CMD_INIT;
  572. if (data) {
  573. dw_mci_submit_data(host, data);
  574. wmb();
  575. }
  576. dw_mci_start_command(host, cmd, cmdflags);
  577. if (mrq->stop)
  578. host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
  579. }
  580. static void dw_mci_start_request(struct dw_mci *host,
  581. struct dw_mci_slot *slot)
  582. {
  583. struct mmc_request *mrq = slot->mrq;
  584. struct mmc_command *cmd;
  585. cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
  586. __dw_mci_start_request(host, slot, cmd);
  587. }
  588. /* must be called with host->lock held */
  589. static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
  590. struct mmc_request *mrq)
  591. {
  592. dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
  593. host->state);
  594. slot->mrq = mrq;
  595. if (host->state == STATE_IDLE) {
  596. host->state = STATE_SENDING_CMD;
  597. dw_mci_start_request(host, slot);
  598. } else {
  599. list_add_tail(&slot->queue_node, &host->queue);
  600. }
  601. }
  602. static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  603. {
  604. struct dw_mci_slot *slot = mmc_priv(mmc);
  605. struct dw_mci *host = slot->host;
  606. WARN_ON(slot->mrq);
  607. /*
  608. * The check for card presence and queueing of the request must be
  609. * atomic, otherwise the card could be removed in between and the
  610. * request wouldn't fail until another card was inserted.
  611. */
  612. spin_lock_bh(&host->lock);
  613. if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
  614. spin_unlock_bh(&host->lock);
  615. mrq->cmd->error = -ENOMEDIUM;
  616. mmc_request_done(mmc, mrq);
  617. return;
  618. }
  619. dw_mci_queue_request(host, slot, mrq);
  620. spin_unlock_bh(&host->lock);
  621. }
  622. static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  623. {
  624. struct dw_mci_slot *slot = mmc_priv(mmc);
  625. const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
  626. u32 regs;
  627. switch (ios->bus_width) {
  628. case MMC_BUS_WIDTH_4:
  629. slot->ctype = SDMMC_CTYPE_4BIT;
  630. break;
  631. case MMC_BUS_WIDTH_8:
  632. slot->ctype = SDMMC_CTYPE_8BIT;
  633. break;
  634. default:
  635. /* set default 1 bit mode */
  636. slot->ctype = SDMMC_CTYPE_1BIT;
  637. }
  638. regs = mci_readl(slot->host, UHS_REG);
  639. /* DDR mode set */
  640. if (ios->timing == MMC_TIMING_UHS_DDR50)
  641. regs |= (0x1 << slot->id) << 16;
  642. else
  643. regs &= ~(0x1 << slot->id) << 16;
  644. mci_writel(slot->host, UHS_REG, regs);
  645. if (ios->clock) {
  646. /*
  647. * Use mirror of ios->clock to prevent race with mmc
  648. * core ios update when finding the minimum.
  649. */
  650. slot->clock = ios->clock;
  651. }
  652. if (drv_data && drv_data->set_ios)
  653. drv_data->set_ios(slot->host, ios);
  654. /* Slot specific timing and width adjustment */
  655. dw_mci_setup_bus(slot, false);
  656. switch (ios->power_mode) {
  657. case MMC_POWER_UP:
  658. set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
  659. break;
  660. default:
  661. break;
  662. }
  663. }
  664. static int dw_mci_get_ro(struct mmc_host *mmc)
  665. {
  666. int read_only;
  667. struct dw_mci_slot *slot = mmc_priv(mmc);
  668. struct dw_mci_board *brd = slot->host->pdata;
  669. /* Use platform get_ro function, else try on board write protect */
  670. if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)
  671. read_only = 0;
  672. else if (brd->get_ro)
  673. read_only = brd->get_ro(slot->id);
  674. else
  675. read_only =
  676. mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
  677. dev_dbg(&mmc->class_dev, "card is %s\n",
  678. read_only ? "read-only" : "read-write");
  679. return read_only;
  680. }
  681. static int dw_mci_get_cd(struct mmc_host *mmc)
  682. {
  683. int present;
  684. struct dw_mci_slot *slot = mmc_priv(mmc);
  685. struct dw_mci_board *brd = slot->host->pdata;
  686. /* Use platform get_cd function, else try onboard card detect */
  687. if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
  688. present = 1;
  689. else if (brd->get_cd)
  690. present = !brd->get_cd(slot->id);
  691. else
  692. present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
  693. == 0 ? 1 : 0;
  694. if (present)
  695. dev_dbg(&mmc->class_dev, "card is present\n");
  696. else
  697. dev_dbg(&mmc->class_dev, "card is not present\n");
  698. return present;
  699. }
  700. /*
  701. * Disable lower power mode.
  702. *
  703. * Low power mode will stop the card clock when idle. According to the
  704. * description of the CLKENA register we should disable low power mode
  705. * for SDIO cards if we need SDIO interrupts to work.
  706. *
  707. * This function is fast if low power mode is already disabled.
  708. */
  709. static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
  710. {
  711. struct dw_mci *host = slot->host;
  712. u32 clk_en_a;
  713. const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
  714. clk_en_a = mci_readl(host, CLKENA);
  715. if (clk_en_a & clken_low_pwr) {
  716. mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
  717. mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
  718. SDMMC_CMD_PRV_DAT_WAIT, 0);
  719. }
  720. }
  721. static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
  722. {
  723. struct dw_mci_slot *slot = mmc_priv(mmc);
  724. struct dw_mci *host = slot->host;
  725. u32 int_mask;
  726. /* Enable/disable Slot Specific SDIO interrupt */
  727. int_mask = mci_readl(host, INTMASK);
  728. if (enb) {
  729. /*
  730. * Turn off low power mode if it was enabled. This is a bit of
  731. * a heavy operation and we disable / enable IRQs a lot, so
  732. * we'll leave low power mode disabled and it will get
  733. * re-enabled again in dw_mci_setup_bus().
  734. */
  735. dw_mci_disable_low_power(slot);
  736. mci_writel(host, INTMASK,
  737. (int_mask | SDMMC_INT_SDIO(slot->id)));
  738. } else {
  739. mci_writel(host, INTMASK,
  740. (int_mask & ~SDMMC_INT_SDIO(slot->id)));
  741. }
  742. }
  743. static const struct mmc_host_ops dw_mci_ops = {
  744. .request = dw_mci_request,
  745. .pre_req = dw_mci_pre_req,
  746. .post_req = dw_mci_post_req,
  747. .set_ios = dw_mci_set_ios,
  748. .get_ro = dw_mci_get_ro,
  749. .get_cd = dw_mci_get_cd,
  750. .enable_sdio_irq = dw_mci_enable_sdio_irq,
  751. };
  752. static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
  753. __releases(&host->lock)
  754. __acquires(&host->lock)
  755. {
  756. struct dw_mci_slot *slot;
  757. struct mmc_host *prev_mmc = host->cur_slot->mmc;
  758. WARN_ON(host->cmd || host->data);
  759. host->cur_slot->mrq = NULL;
  760. host->mrq = NULL;
  761. if (!list_empty(&host->queue)) {
  762. slot = list_entry(host->queue.next,
  763. struct dw_mci_slot, queue_node);
  764. list_del(&slot->queue_node);
  765. dev_vdbg(host->dev, "list not empty: %s is next\n",
  766. mmc_hostname(slot->mmc));
  767. host->state = STATE_SENDING_CMD;
  768. dw_mci_start_request(host, slot);
  769. } else {
  770. dev_vdbg(host->dev, "list empty\n");
  771. host->state = STATE_IDLE;
  772. }
  773. spin_unlock(&host->lock);
  774. mmc_request_done(prev_mmc, mrq);
  775. spin_lock(&host->lock);
  776. }
  777. static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
  778. {
  779. u32 status = host->cmd_status;
  780. host->cmd_status = 0;
  781. /* Read the response from the card (up to 16 bytes) */
  782. if (cmd->flags & MMC_RSP_PRESENT) {
  783. if (cmd->flags & MMC_RSP_136) {
  784. cmd->resp[3] = mci_readl(host, RESP0);
  785. cmd->resp[2] = mci_readl(host, RESP1);
  786. cmd->resp[1] = mci_readl(host, RESP2);
  787. cmd->resp[0] = mci_readl(host, RESP3);
  788. } else {
  789. cmd->resp[0] = mci_readl(host, RESP0);
  790. cmd->resp[1] = 0;
  791. cmd->resp[2] = 0;
  792. cmd->resp[3] = 0;
  793. }
  794. }
  795. if (status & SDMMC_INT_RTO)
  796. cmd->error = -ETIMEDOUT;
  797. else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
  798. cmd->error = -EILSEQ;
  799. else if (status & SDMMC_INT_RESP_ERR)
  800. cmd->error = -EIO;
  801. else
  802. cmd->error = 0;
  803. if (cmd->error) {
  804. /* newer ip versions need a delay between retries */
  805. if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
  806. mdelay(20);
  807. if (cmd->data) {
  808. dw_mci_stop_dma(host);
  809. host->data = NULL;
  810. }
  811. }
  812. }
  813. static void dw_mci_tasklet_func(unsigned long priv)
  814. {
  815. struct dw_mci *host = (struct dw_mci *)priv;
  816. struct mmc_data *data;
  817. struct mmc_command *cmd;
  818. enum dw_mci_state state;
  819. enum dw_mci_state prev_state;
  820. u32 status, ctrl;
  821. spin_lock(&host->lock);
  822. state = host->state;
  823. data = host->data;
  824. do {
  825. prev_state = state;
  826. switch (state) {
  827. case STATE_IDLE:
  828. break;
  829. case STATE_SENDING_CMD:
  830. if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  831. &host->pending_events))
  832. break;
  833. cmd = host->cmd;
  834. host->cmd = NULL;
  835. set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
  836. dw_mci_command_complete(host, cmd);
  837. if (cmd == host->mrq->sbc && !cmd->error) {
  838. prev_state = state = STATE_SENDING_CMD;
  839. __dw_mci_start_request(host, host->cur_slot,
  840. host->mrq->cmd);
  841. goto unlock;
  842. }
  843. if (!host->mrq->data || cmd->error) {
  844. dw_mci_request_end(host, host->mrq);
  845. goto unlock;
  846. }
  847. prev_state = state = STATE_SENDING_DATA;
  848. /* fall through */
  849. case STATE_SENDING_DATA:
  850. if (test_and_clear_bit(EVENT_DATA_ERROR,
  851. &host->pending_events)) {
  852. dw_mci_stop_dma(host);
  853. if (data->stop)
  854. send_stop_cmd(host, data);
  855. state = STATE_DATA_ERROR;
  856. break;
  857. }
  858. if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  859. &host->pending_events))
  860. break;
  861. set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
  862. prev_state = state = STATE_DATA_BUSY;
  863. /* fall through */
  864. case STATE_DATA_BUSY:
  865. if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
  866. &host->pending_events))
  867. break;
  868. host->data = NULL;
  869. set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
  870. status = host->data_status;
  871. if (status & DW_MCI_DATA_ERROR_FLAGS) {
  872. if (status & SDMMC_INT_DTO) {
  873. data->error = -ETIMEDOUT;
  874. } else if (status & SDMMC_INT_DCRC) {
  875. data->error = -EILSEQ;
  876. } else if (status & SDMMC_INT_EBE &&
  877. host->dir_status ==
  878. DW_MCI_SEND_STATUS) {
  879. /*
  880. * No data CRC status was returned.
  881. * The number of bytes transferred will
  882. * be exaggerated in PIO mode.
  883. */
  884. data->bytes_xfered = 0;
  885. data->error = -ETIMEDOUT;
  886. } else {
  887. dev_err(host->dev,
  888. "data FIFO error "
  889. "(status=%08x)\n",
  890. status);
  891. data->error = -EIO;
  892. }
  893. /*
  894. * After an error, there may be data lingering
  895. * in the FIFO, so reset it - doing so
  896. * generates a block interrupt, hence setting
  897. * the scatter-gather pointer to NULL.
  898. */
  899. sg_miter_stop(&host->sg_miter);
  900. host->sg = NULL;
  901. ctrl = mci_readl(host, CTRL);
  902. ctrl |= SDMMC_CTRL_FIFO_RESET;
  903. mci_writel(host, CTRL, ctrl);
  904. } else {
  905. data->bytes_xfered = data->blocks * data->blksz;
  906. data->error = 0;
  907. }
  908. if (!data->stop) {
  909. dw_mci_request_end(host, host->mrq);
  910. goto unlock;
  911. }
  912. if (host->mrq->sbc && !data->error) {
  913. data->stop->error = 0;
  914. dw_mci_request_end(host, host->mrq);
  915. goto unlock;
  916. }
  917. prev_state = state = STATE_SENDING_STOP;
  918. if (!data->error)
  919. send_stop_cmd(host, data);
  920. /* fall through */
  921. case STATE_SENDING_STOP:
  922. if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
  923. &host->pending_events))
  924. break;
  925. host->cmd = NULL;
  926. dw_mci_command_complete(host, host->mrq->stop);
  927. dw_mci_request_end(host, host->mrq);
  928. goto unlock;
  929. case STATE_DATA_ERROR:
  930. if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
  931. &host->pending_events))
  932. break;
  933. state = STATE_DATA_BUSY;
  934. break;
  935. }
  936. } while (state != prev_state);
  937. host->state = state;
  938. unlock:
  939. spin_unlock(&host->lock);
  940. }
  941. /* push final bytes to part_buf, only use during push */
  942. static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
  943. {
  944. memcpy((void *)&host->part_buf, buf, cnt);
  945. host->part_buf_count = cnt;
  946. }
  947. /* append bytes to part_buf, only use during push */
  948. static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
  949. {
  950. cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
  951. memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
  952. host->part_buf_count += cnt;
  953. return cnt;
  954. }
  955. /* pull first bytes from part_buf, only use during pull */
  956. static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
  957. {
  958. cnt = min(cnt, (int)host->part_buf_count);
  959. if (cnt) {
  960. memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
  961. cnt);
  962. host->part_buf_count -= cnt;
  963. host->part_buf_start += cnt;
  964. }
  965. return cnt;
  966. }
  967. /* pull final bytes from the part_buf, assuming it's just been filled */
  968. static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
  969. {
  970. memcpy(buf, &host->part_buf, cnt);
  971. host->part_buf_start = cnt;
  972. host->part_buf_count = (1 << host->data_shift) - cnt;
  973. }
  974. static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
  975. {
  976. /* try and push anything in the part_buf */
  977. if (unlikely(host->part_buf_count)) {
  978. int len = dw_mci_push_part_bytes(host, buf, cnt);
  979. buf += len;
  980. cnt -= len;
  981. if (!sg_next(host->sg) || host->part_buf_count == 2) {
  982. mci_writew(host, DATA(host->data_offset),
  983. host->part_buf16);
  984. host->part_buf_count = 0;
  985. }
  986. }
  987. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  988. if (unlikely((unsigned long)buf & 0x1)) {
  989. while (cnt >= 2) {
  990. u16 aligned_buf[64];
  991. int len = min(cnt & -2, (int)sizeof(aligned_buf));
  992. int items = len >> 1;
  993. int i;
  994. /* memcpy from input buffer into aligned buffer */
  995. memcpy(aligned_buf, buf, len);
  996. buf += len;
  997. cnt -= len;
  998. /* push data from aligned buffer into fifo */
  999. for (i = 0; i < items; ++i)
  1000. mci_writew(host, DATA(host->data_offset),
  1001. aligned_buf[i]);
  1002. }
  1003. } else
  1004. #endif
  1005. {
  1006. u16 *pdata = buf;
  1007. for (; cnt >= 2; cnt -= 2)
  1008. mci_writew(host, DATA(host->data_offset), *pdata++);
  1009. buf = pdata;
  1010. }
  1011. /* put anything remaining in the part_buf */
  1012. if (cnt) {
  1013. dw_mci_set_part_bytes(host, buf, cnt);
  1014. if (!sg_next(host->sg))
  1015. mci_writew(host, DATA(host->data_offset),
  1016. host->part_buf16);
  1017. }
  1018. }
  1019. static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
  1020. {
  1021. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1022. if (unlikely((unsigned long)buf & 0x1)) {
  1023. while (cnt >= 2) {
  1024. /* pull data from fifo into aligned buffer */
  1025. u16 aligned_buf[64];
  1026. int len = min(cnt & -2, (int)sizeof(aligned_buf));
  1027. int items = len >> 1;
  1028. int i;
  1029. for (i = 0; i < items; ++i)
  1030. aligned_buf[i] = mci_readw(host,
  1031. DATA(host->data_offset));
  1032. /* memcpy from aligned buffer into output buffer */
  1033. memcpy(buf, aligned_buf, len);
  1034. buf += len;
  1035. cnt -= len;
  1036. }
  1037. } else
  1038. #endif
  1039. {
  1040. u16 *pdata = buf;
  1041. for (; cnt >= 2; cnt -= 2)
  1042. *pdata++ = mci_readw(host, DATA(host->data_offset));
  1043. buf = pdata;
  1044. }
  1045. if (cnt) {
  1046. host->part_buf16 = mci_readw(host, DATA(host->data_offset));
  1047. dw_mci_pull_final_bytes(host, buf, cnt);
  1048. }
  1049. }
  1050. static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
  1051. {
  1052. /* try and push anything in the part_buf */
  1053. if (unlikely(host->part_buf_count)) {
  1054. int len = dw_mci_push_part_bytes(host, buf, cnt);
  1055. buf += len;
  1056. cnt -= len;
  1057. if (!sg_next(host->sg) || host->part_buf_count == 4) {
  1058. mci_writel(host, DATA(host->data_offset),
  1059. host->part_buf32);
  1060. host->part_buf_count = 0;
  1061. }
  1062. }
  1063. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1064. if (unlikely((unsigned long)buf & 0x3)) {
  1065. while (cnt >= 4) {
  1066. u32 aligned_buf[32];
  1067. int len = min(cnt & -4, (int)sizeof(aligned_buf));
  1068. int items = len >> 2;
  1069. int i;
  1070. /* memcpy from input buffer into aligned buffer */
  1071. memcpy(aligned_buf, buf, len);
  1072. buf += len;
  1073. cnt -= len;
  1074. /* push data from aligned buffer into fifo */
  1075. for (i = 0; i < items; ++i)
  1076. mci_writel(host, DATA(host->data_offset),
  1077. aligned_buf[i]);
  1078. }
  1079. } else
  1080. #endif
  1081. {
  1082. u32 *pdata = buf;
  1083. for (; cnt >= 4; cnt -= 4)
  1084. mci_writel(host, DATA(host->data_offset), *pdata++);
  1085. buf = pdata;
  1086. }
  1087. /* put anything remaining in the part_buf */
  1088. if (cnt) {
  1089. dw_mci_set_part_bytes(host, buf, cnt);
  1090. if (!sg_next(host->sg))
  1091. mci_writel(host, DATA(host->data_offset),
  1092. host->part_buf32);
  1093. }
  1094. }
  1095. static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
  1096. {
  1097. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1098. if (unlikely((unsigned long)buf & 0x3)) {
  1099. while (cnt >= 4) {
  1100. /* pull data from fifo into aligned buffer */
  1101. u32 aligned_buf[32];
  1102. int len = min(cnt & -4, (int)sizeof(aligned_buf));
  1103. int items = len >> 2;
  1104. int i;
  1105. for (i = 0; i < items; ++i)
  1106. aligned_buf[i] = mci_readl(host,
  1107. DATA(host->data_offset));
  1108. /* memcpy from aligned buffer into output buffer */
  1109. memcpy(buf, aligned_buf, len);
  1110. buf += len;
  1111. cnt -= len;
  1112. }
  1113. } else
  1114. #endif
  1115. {
  1116. u32 *pdata = buf;
  1117. for (; cnt >= 4; cnt -= 4)
  1118. *pdata++ = mci_readl(host, DATA(host->data_offset));
  1119. buf = pdata;
  1120. }
  1121. if (cnt) {
  1122. host->part_buf32 = mci_readl(host, DATA(host->data_offset));
  1123. dw_mci_pull_final_bytes(host, buf, cnt);
  1124. }
  1125. }
  1126. static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
  1127. {
  1128. /* try and push anything in the part_buf */
  1129. if (unlikely(host->part_buf_count)) {
  1130. int len = dw_mci_push_part_bytes(host, buf, cnt);
  1131. buf += len;
  1132. cnt -= len;
  1133. if (!sg_next(host->sg) || host->part_buf_count == 8) {
  1134. mci_writew(host, DATA(host->data_offset),
  1135. host->part_buf);
  1136. host->part_buf_count = 0;
  1137. }
  1138. }
  1139. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1140. if (unlikely((unsigned long)buf & 0x7)) {
  1141. while (cnt >= 8) {
  1142. u64 aligned_buf[16];
  1143. int len = min(cnt & -8, (int)sizeof(aligned_buf));
  1144. int items = len >> 3;
  1145. int i;
  1146. /* memcpy from input buffer into aligned buffer */
  1147. memcpy(aligned_buf, buf, len);
  1148. buf += len;
  1149. cnt -= len;
  1150. /* push data from aligned buffer into fifo */
  1151. for (i = 0; i < items; ++i)
  1152. mci_writeq(host, DATA(host->data_offset),
  1153. aligned_buf[i]);
  1154. }
  1155. } else
  1156. #endif
  1157. {
  1158. u64 *pdata = buf;
  1159. for (; cnt >= 8; cnt -= 8)
  1160. mci_writeq(host, DATA(host->data_offset), *pdata++);
  1161. buf = pdata;
  1162. }
  1163. /* put anything remaining in the part_buf */
  1164. if (cnt) {
  1165. dw_mci_set_part_bytes(host, buf, cnt);
  1166. if (!sg_next(host->sg))
  1167. mci_writeq(host, DATA(host->data_offset),
  1168. host->part_buf);
  1169. }
  1170. }
  1171. static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
  1172. {
  1173. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  1174. if (unlikely((unsigned long)buf & 0x7)) {
  1175. while (cnt >= 8) {
  1176. /* pull data from fifo into aligned buffer */
  1177. u64 aligned_buf[16];
  1178. int len = min(cnt & -8, (int)sizeof(aligned_buf));
  1179. int items = len >> 3;
  1180. int i;
  1181. for (i = 0; i < items; ++i)
  1182. aligned_buf[i] = mci_readq(host,
  1183. DATA(host->data_offset));
  1184. /* memcpy from aligned buffer into output buffer */
  1185. memcpy(buf, aligned_buf, len);
  1186. buf += len;
  1187. cnt -= len;
  1188. }
  1189. } else
  1190. #endif
  1191. {
  1192. u64 *pdata = buf;
  1193. for (; cnt >= 8; cnt -= 8)
  1194. *pdata++ = mci_readq(host, DATA(host->data_offset));
  1195. buf = pdata;
  1196. }
  1197. if (cnt) {
  1198. host->part_buf = mci_readq(host, DATA(host->data_offset));
  1199. dw_mci_pull_final_bytes(host, buf, cnt);
  1200. }
  1201. }
  1202. static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
  1203. {
  1204. int len;
  1205. /* get remaining partial bytes */
  1206. len = dw_mci_pull_part_bytes(host, buf, cnt);
  1207. if (unlikely(len == cnt))
  1208. return;
  1209. buf += len;
  1210. cnt -= len;
  1211. /* get the rest of the data */
  1212. host->pull_data(host, buf, cnt);
  1213. }
  1214. static void dw_mci_read_data_pio(struct dw_mci *host)
  1215. {
  1216. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  1217. void *buf;
  1218. unsigned int offset;
  1219. struct mmc_data *data = host->data;
  1220. int shift = host->data_shift;
  1221. u32 status;
  1222. unsigned int nbytes = 0, len;
  1223. unsigned int remain, fcnt;
  1224. do {
  1225. if (!sg_miter_next(sg_miter))
  1226. goto done;
  1227. host->sg = sg_miter->__sg;
  1228. buf = sg_miter->addr;
  1229. remain = sg_miter->length;
  1230. offset = 0;
  1231. do {
  1232. fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
  1233. << shift) + host->part_buf_count;
  1234. len = min(remain, fcnt);
  1235. if (!len)
  1236. break;
  1237. dw_mci_pull_data(host, (void *)(buf + offset), len);
  1238. offset += len;
  1239. nbytes += len;
  1240. remain -= len;
  1241. } while (remain);
  1242. sg_miter->consumed = offset;
  1243. status = mci_readl(host, MINTSTS);
  1244. mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  1245. } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
  1246. data->bytes_xfered += nbytes;
  1247. if (!remain) {
  1248. if (!sg_miter_next(sg_miter))
  1249. goto done;
  1250. sg_miter->consumed = 0;
  1251. }
  1252. sg_miter_stop(sg_miter);
  1253. return;
  1254. done:
  1255. data->bytes_xfered += nbytes;
  1256. sg_miter_stop(sg_miter);
  1257. host->sg = NULL;
  1258. smp_wmb();
  1259. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  1260. }
  1261. static void dw_mci_write_data_pio(struct dw_mci *host)
  1262. {
  1263. struct sg_mapping_iter *sg_miter = &host->sg_miter;
  1264. void *buf;
  1265. unsigned int offset;
  1266. struct mmc_data *data = host->data;
  1267. int shift = host->data_shift;
  1268. u32 status;
  1269. unsigned int nbytes = 0, len;
  1270. unsigned int fifo_depth = host->fifo_depth;
  1271. unsigned int remain, fcnt;
  1272. do {
  1273. if (!sg_miter_next(sg_miter))
  1274. goto done;
  1275. host->sg = sg_miter->__sg;
  1276. buf = sg_miter->addr;
  1277. remain = sg_miter->length;
  1278. offset = 0;
  1279. do {
  1280. fcnt = ((fifo_depth -
  1281. SDMMC_GET_FCNT(mci_readl(host, STATUS)))
  1282. << shift) - host->part_buf_count;
  1283. len = min(remain, fcnt);
  1284. if (!len)
  1285. break;
  1286. host->push_data(host, (void *)(buf + offset), len);
  1287. offset += len;
  1288. nbytes += len;
  1289. remain -= len;
  1290. } while (remain);
  1291. sg_miter->consumed = offset;
  1292. status = mci_readl(host, MINTSTS);
  1293. mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  1294. } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
  1295. data->bytes_xfered += nbytes;
  1296. if (!remain) {
  1297. if (!sg_miter_next(sg_miter))
  1298. goto done;
  1299. sg_miter->consumed = 0;
  1300. }
  1301. sg_miter_stop(sg_miter);
  1302. return;
  1303. done:
  1304. data->bytes_xfered += nbytes;
  1305. sg_miter_stop(sg_miter);
  1306. host->sg = NULL;
  1307. smp_wmb();
  1308. set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
  1309. }
  1310. static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
  1311. {
  1312. if (!host->cmd_status)
  1313. host->cmd_status = status;
  1314. smp_wmb();
  1315. set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  1316. tasklet_schedule(&host->tasklet);
  1317. }
  1318. static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
  1319. {
  1320. struct dw_mci *host = dev_id;
  1321. u32 pending;
  1322. unsigned int pass_count = 0;
  1323. int i;
  1324. do {
  1325. pending = mci_readl(host, MINTSTS); /* read-only mask reg */
  1326. /*
  1327. * DTO fix - version 2.10a and below, and only if internal DMA
  1328. * is configured.
  1329. */
  1330. if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
  1331. if (!pending &&
  1332. ((mci_readl(host, STATUS) >> 17) & 0x1fff))
  1333. pending |= SDMMC_INT_DATA_OVER;
  1334. }
  1335. if (!pending)
  1336. break;
  1337. if (pending & DW_MCI_CMD_ERROR_FLAGS) {
  1338. mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
  1339. host->cmd_status = pending;
  1340. smp_wmb();
  1341. set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
  1342. }
  1343. if (pending & DW_MCI_DATA_ERROR_FLAGS) {
  1344. /* if there is an error report DATA_ERROR */
  1345. mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
  1346. host->data_status = pending;
  1347. smp_wmb();
  1348. set_bit(EVENT_DATA_ERROR, &host->pending_events);
  1349. tasklet_schedule(&host->tasklet);
  1350. }
  1351. if (pending & SDMMC_INT_DATA_OVER) {
  1352. mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
  1353. if (!host->data_status)
  1354. host->data_status = pending;
  1355. smp_wmb();
  1356. if (host->dir_status == DW_MCI_RECV_STATUS) {
  1357. if (host->sg != NULL)
  1358. dw_mci_read_data_pio(host);
  1359. }
  1360. set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
  1361. tasklet_schedule(&host->tasklet);
  1362. }
  1363. if (pending & SDMMC_INT_RXDR) {
  1364. mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
  1365. if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
  1366. dw_mci_read_data_pio(host);
  1367. }
  1368. if (pending & SDMMC_INT_TXDR) {
  1369. mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
  1370. if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
  1371. dw_mci_write_data_pio(host);
  1372. }
  1373. if (pending & SDMMC_INT_CMD_DONE) {
  1374. mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
  1375. dw_mci_cmd_interrupt(host, pending);
  1376. }
  1377. if (pending & SDMMC_INT_CD) {
  1378. mci_writel(host, RINTSTS, SDMMC_INT_CD);
  1379. queue_work(host->card_workqueue, &host->card_work);
  1380. }
  1381. /* Handle SDIO Interrupts */
  1382. for (i = 0; i < host->num_slots; i++) {
  1383. struct dw_mci_slot *slot = host->slot[i];
  1384. if (pending & SDMMC_INT_SDIO(i)) {
  1385. mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
  1386. mmc_signal_sdio_irq(slot->mmc);
  1387. }
  1388. }
  1389. } while (pass_count++ < 5);
  1390. #ifdef CONFIG_MMC_DW_IDMAC
  1391. /* Handle DMA interrupts */
  1392. pending = mci_readl(host, IDSTS);
  1393. if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
  1394. mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
  1395. mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
  1396. host->dma_ops->complete(host);
  1397. }
  1398. #endif
  1399. return IRQ_HANDLED;
  1400. }
  1401. static void dw_mci_work_routine_card(struct work_struct *work)
  1402. {
  1403. struct dw_mci *host = container_of(work, struct dw_mci, card_work);
  1404. int i;
  1405. for (i = 0; i < host->num_slots; i++) {
  1406. struct dw_mci_slot *slot = host->slot[i];
  1407. struct mmc_host *mmc = slot->mmc;
  1408. struct mmc_request *mrq;
  1409. int present;
  1410. u32 ctrl;
  1411. present = dw_mci_get_cd(mmc);
  1412. while (present != slot->last_detect_state) {
  1413. dev_dbg(&slot->mmc->class_dev, "card %s\n",
  1414. present ? "inserted" : "removed");
  1415. /* Power up slot (before spin_lock, may sleep) */
  1416. if (present != 0 && host->pdata->setpower)
  1417. host->pdata->setpower(slot->id, mmc->ocr_avail);
  1418. spin_lock_bh(&host->lock);
  1419. /* Card change detected */
  1420. slot->last_detect_state = present;
  1421. /* Mark card as present if applicable */
  1422. if (present != 0)
  1423. set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1424. /* Clean up queue if present */
  1425. mrq = slot->mrq;
  1426. if (mrq) {
  1427. if (mrq == host->mrq) {
  1428. host->data = NULL;
  1429. host->cmd = NULL;
  1430. switch (host->state) {
  1431. case STATE_IDLE:
  1432. break;
  1433. case STATE_SENDING_CMD:
  1434. mrq->cmd->error = -ENOMEDIUM;
  1435. if (!mrq->data)
  1436. break;
  1437. /* fall through */
  1438. case STATE_SENDING_DATA:
  1439. mrq->data->error = -ENOMEDIUM;
  1440. dw_mci_stop_dma(host);
  1441. break;
  1442. case STATE_DATA_BUSY:
  1443. case STATE_DATA_ERROR:
  1444. if (mrq->data->error == -EINPROGRESS)
  1445. mrq->data->error = -ENOMEDIUM;
  1446. if (!mrq->stop)
  1447. break;
  1448. /* fall through */
  1449. case STATE_SENDING_STOP:
  1450. mrq->stop->error = -ENOMEDIUM;
  1451. break;
  1452. }
  1453. dw_mci_request_end(host, mrq);
  1454. } else {
  1455. list_del(&slot->queue_node);
  1456. mrq->cmd->error = -ENOMEDIUM;
  1457. if (mrq->data)
  1458. mrq->data->error = -ENOMEDIUM;
  1459. if (mrq->stop)
  1460. mrq->stop->error = -ENOMEDIUM;
  1461. spin_unlock(&host->lock);
  1462. mmc_request_done(slot->mmc, mrq);
  1463. spin_lock(&host->lock);
  1464. }
  1465. }
  1466. /* Power down slot */
  1467. if (present == 0) {
  1468. clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1469. /*
  1470. * Clear down the FIFO - doing so generates a
  1471. * block interrupt, hence setting the
  1472. * scatter-gather pointer to NULL.
  1473. */
  1474. sg_miter_stop(&host->sg_miter);
  1475. host->sg = NULL;
  1476. ctrl = mci_readl(host, CTRL);
  1477. ctrl |= SDMMC_CTRL_FIFO_RESET;
  1478. mci_writel(host, CTRL, ctrl);
  1479. #ifdef CONFIG_MMC_DW_IDMAC
  1480. ctrl = mci_readl(host, BMOD);
  1481. /* Software reset of DMA */
  1482. ctrl |= SDMMC_IDMAC_SWRESET;
  1483. mci_writel(host, BMOD, ctrl);
  1484. #endif
  1485. }
  1486. spin_unlock_bh(&host->lock);
  1487. /* Power down slot (after spin_unlock, may sleep) */
  1488. if (present == 0 && host->pdata->setpower)
  1489. host->pdata->setpower(slot->id, 0);
  1490. present = dw_mci_get_cd(mmc);
  1491. }
  1492. mmc_detect_change(slot->mmc,
  1493. msecs_to_jiffies(host->pdata->detect_delay_ms));
  1494. }
  1495. }
  1496. #ifdef CONFIG_OF
  1497. /* given a slot id, find out the device node representing that slot */
  1498. static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
  1499. {
  1500. struct device_node *np;
  1501. const __be32 *addr;
  1502. int len;
  1503. if (!dev || !dev->of_node)
  1504. return NULL;
  1505. for_each_child_of_node(dev->of_node, np) {
  1506. addr = of_get_property(np, "reg", &len);
  1507. if (!addr || (len < sizeof(int)))
  1508. continue;
  1509. if (be32_to_cpup(addr) == slot)
  1510. return np;
  1511. }
  1512. return NULL;
  1513. }
  1514. /* find out bus-width for a given slot */
  1515. static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
  1516. {
  1517. struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
  1518. u32 bus_wd = 1;
  1519. if (!np)
  1520. return 1;
  1521. if (of_property_read_u32(np, "bus-width", &bus_wd))
  1522. dev_err(dev, "bus-width property not found, assuming width"
  1523. " as 1\n");
  1524. return bus_wd;
  1525. }
  1526. #else /* CONFIG_OF */
  1527. static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
  1528. {
  1529. return 1;
  1530. }
  1531. static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
  1532. {
  1533. return NULL;
  1534. }
  1535. #endif /* CONFIG_OF */
  1536. static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
  1537. {
  1538. struct mmc_host *mmc;
  1539. struct dw_mci_slot *slot;
  1540. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1541. int ctrl_id, ret;
  1542. u8 bus_width;
  1543. mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
  1544. if (!mmc)
  1545. return -ENOMEM;
  1546. slot = mmc_priv(mmc);
  1547. slot->id = id;
  1548. slot->mmc = mmc;
  1549. slot->host = host;
  1550. host->slot[id] = slot;
  1551. mmc->ops = &dw_mci_ops;
  1552. mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
  1553. mmc->f_max = host->bus_hz;
  1554. if (host->pdata->get_ocr)
  1555. mmc->ocr_avail = host->pdata->get_ocr(id);
  1556. else
  1557. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  1558. /*
  1559. * Start with slot power disabled, it will be enabled when a card
  1560. * is detected.
  1561. */
  1562. if (host->pdata->setpower)
  1563. host->pdata->setpower(id, 0);
  1564. if (host->pdata->caps)
  1565. mmc->caps = host->pdata->caps;
  1566. if (host->pdata->pm_caps)
  1567. mmc->pm_caps = host->pdata->pm_caps;
  1568. if (host->dev->of_node) {
  1569. ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
  1570. if (ctrl_id < 0)
  1571. ctrl_id = 0;
  1572. } else {
  1573. ctrl_id = to_platform_device(host->dev)->id;
  1574. }
  1575. if (drv_data && drv_data->caps)
  1576. mmc->caps |= drv_data->caps[ctrl_id];
  1577. if (host->pdata->caps2)
  1578. mmc->caps2 = host->pdata->caps2;
  1579. if (host->pdata->get_bus_wd)
  1580. bus_width = host->pdata->get_bus_wd(slot->id);
  1581. else if (host->dev->of_node)
  1582. bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
  1583. else
  1584. bus_width = 1;
  1585. if (drv_data && drv_data->setup_bus) {
  1586. struct device_node *slot_np;
  1587. slot_np = dw_mci_of_find_slot_node(host->dev, slot->id);
  1588. ret = drv_data->setup_bus(host, slot_np, bus_width);
  1589. if (ret)
  1590. goto err_setup_bus;
  1591. }
  1592. switch (bus_width) {
  1593. case 8:
  1594. mmc->caps |= MMC_CAP_8_BIT_DATA;
  1595. case 4:
  1596. mmc->caps |= MMC_CAP_4_BIT_DATA;
  1597. }
  1598. if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
  1599. mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
  1600. if (host->pdata->blk_settings) {
  1601. mmc->max_segs = host->pdata->blk_settings->max_segs;
  1602. mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
  1603. mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
  1604. mmc->max_req_size = host->pdata->blk_settings->max_req_size;
  1605. mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
  1606. } else {
  1607. /* Useful defaults if platform data is unset. */
  1608. #ifdef CONFIG_MMC_DW_IDMAC
  1609. mmc->max_segs = host->ring_size;
  1610. mmc->max_blk_size = 65536;
  1611. mmc->max_blk_count = host->ring_size;
  1612. mmc->max_seg_size = 0x1000;
  1613. mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
  1614. #else
  1615. mmc->max_segs = 64;
  1616. mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
  1617. mmc->max_blk_count = 512;
  1618. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  1619. mmc->max_seg_size = mmc->max_req_size;
  1620. #endif /* CONFIG_MMC_DW_IDMAC */
  1621. }
  1622. host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
  1623. if (IS_ERR(host->vmmc)) {
  1624. pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
  1625. host->vmmc = NULL;
  1626. } else
  1627. regulator_enable(host->vmmc);
  1628. if (dw_mci_get_cd(mmc))
  1629. set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1630. else
  1631. clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
  1632. mmc_add_host(mmc);
  1633. #if defined(CONFIG_DEBUG_FS)
  1634. dw_mci_init_debugfs(slot);
  1635. #endif
  1636. /* Card initially undetected */
  1637. slot->last_detect_state = 0;
  1638. /*
  1639. * Card may have been plugged in prior to boot so we
  1640. * need to run the detect tasklet
  1641. */
  1642. queue_work(host->card_workqueue, &host->card_work);
  1643. return 0;
  1644. err_setup_bus:
  1645. mmc_free_host(mmc);
  1646. return -EINVAL;
  1647. }
  1648. static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
  1649. {
  1650. /* Shutdown detect IRQ */
  1651. if (slot->host->pdata->exit)
  1652. slot->host->pdata->exit(id);
  1653. /* Debugfs stuff is cleaned up by mmc core */
  1654. mmc_remove_host(slot->mmc);
  1655. slot->host->slot[id] = NULL;
  1656. mmc_free_host(slot->mmc);
  1657. }
  1658. static void dw_mci_init_dma(struct dw_mci *host)
  1659. {
  1660. /* Alloc memory for sg translation */
  1661. host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
  1662. &host->sg_dma, GFP_KERNEL);
  1663. if (!host->sg_cpu) {
  1664. dev_err(host->dev, "%s: could not alloc DMA memory\n",
  1665. __func__);
  1666. goto no_dma;
  1667. }
  1668. /* Determine which DMA interface to use */
  1669. #ifdef CONFIG_MMC_DW_IDMAC
  1670. host->dma_ops = &dw_mci_idmac_ops;
  1671. dev_info(host->dev, "Using internal DMA controller.\n");
  1672. #endif
  1673. if (!host->dma_ops)
  1674. goto no_dma;
  1675. if (host->dma_ops->init && host->dma_ops->start &&
  1676. host->dma_ops->stop && host->dma_ops->cleanup) {
  1677. if (host->dma_ops->init(host)) {
  1678. dev_err(host->dev, "%s: Unable to initialize "
  1679. "DMA Controller.\n", __func__);
  1680. goto no_dma;
  1681. }
  1682. } else {
  1683. dev_err(host->dev, "DMA initialization not found.\n");
  1684. goto no_dma;
  1685. }
  1686. host->use_dma = 1;
  1687. return;
  1688. no_dma:
  1689. dev_info(host->dev, "Using PIO mode.\n");
  1690. host->use_dma = 0;
  1691. return;
  1692. }
  1693. static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
  1694. {
  1695. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  1696. unsigned int ctrl;
  1697. mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
  1698. SDMMC_CTRL_DMA_RESET));
  1699. /* wait till resets clear */
  1700. do {
  1701. ctrl = mci_readl(host, CTRL);
  1702. if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
  1703. SDMMC_CTRL_DMA_RESET)))
  1704. return true;
  1705. } while (time_before(jiffies, timeout));
  1706. dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
  1707. return false;
  1708. }
  1709. #ifdef CONFIG_OF
  1710. static struct dw_mci_of_quirks {
  1711. char *quirk;
  1712. int id;
  1713. } of_quirks[] = {
  1714. {
  1715. .quirk = "supports-highspeed",
  1716. .id = DW_MCI_QUIRK_HIGHSPEED,
  1717. }, {
  1718. .quirk = "broken-cd",
  1719. .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
  1720. },
  1721. };
  1722. static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  1723. {
  1724. struct dw_mci_board *pdata;
  1725. struct device *dev = host->dev;
  1726. struct device_node *np = dev->of_node;
  1727. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1728. int idx, ret;
  1729. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1730. if (!pdata) {
  1731. dev_err(dev, "could not allocate memory for pdata\n");
  1732. return ERR_PTR(-ENOMEM);
  1733. }
  1734. /* find out number of slots supported */
  1735. if (of_property_read_u32(dev->of_node, "num-slots",
  1736. &pdata->num_slots)) {
  1737. dev_info(dev, "num-slots property not found, "
  1738. "assuming 1 slot is available\n");
  1739. pdata->num_slots = 1;
  1740. }
  1741. /* get quirks */
  1742. for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
  1743. if (of_get_property(np, of_quirks[idx].quirk, NULL))
  1744. pdata->quirks |= of_quirks[idx].id;
  1745. if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
  1746. dev_info(dev, "fifo-depth property not found, using "
  1747. "value of FIFOTH register as default\n");
  1748. of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
  1749. if (drv_data && drv_data->parse_dt) {
  1750. ret = drv_data->parse_dt(host);
  1751. if (ret)
  1752. return ERR_PTR(ret);
  1753. }
  1754. if (of_find_property(np, "keep-power-in-suspend", NULL))
  1755. pdata->pm_caps |= MMC_PM_KEEP_POWER;
  1756. if (of_find_property(np, "enable-sdio-wakeup", NULL))
  1757. pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
  1758. return pdata;
  1759. }
  1760. #else /* CONFIG_OF */
  1761. static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
  1762. {
  1763. return ERR_PTR(-EINVAL);
  1764. }
  1765. #endif /* CONFIG_OF */
  1766. int dw_mci_probe(struct dw_mci *host)
  1767. {
  1768. const struct dw_mci_drv_data *drv_data = host->drv_data;
  1769. int width, i, ret = 0;
  1770. u32 fifo_size;
  1771. int init_slots = 0;
  1772. if (!host->pdata) {
  1773. host->pdata = dw_mci_parse_dt(host);
  1774. if (IS_ERR(host->pdata)) {
  1775. dev_err(host->dev, "platform data not available\n");
  1776. return -EINVAL;
  1777. }
  1778. }
  1779. if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
  1780. dev_err(host->dev,
  1781. "Platform data must supply select_slot function\n");
  1782. return -ENODEV;
  1783. }
  1784. host->biu_clk = devm_clk_get(host->dev, "biu");
  1785. if (IS_ERR(host->biu_clk)) {
  1786. dev_dbg(host->dev, "biu clock not available\n");
  1787. } else {
  1788. ret = clk_prepare_enable(host->biu_clk);
  1789. if (ret) {
  1790. dev_err(host->dev, "failed to enable biu clock\n");
  1791. return ret;
  1792. }
  1793. }
  1794. host->ciu_clk = devm_clk_get(host->dev, "ciu");
  1795. if (IS_ERR(host->ciu_clk)) {
  1796. dev_dbg(host->dev, "ciu clock not available\n");
  1797. } else {
  1798. ret = clk_prepare_enable(host->ciu_clk);
  1799. if (ret) {
  1800. dev_err(host->dev, "failed to enable ciu clock\n");
  1801. goto err_clk_biu;
  1802. }
  1803. }
  1804. if (IS_ERR(host->ciu_clk))
  1805. host->bus_hz = host->pdata->bus_hz;
  1806. else
  1807. host->bus_hz = clk_get_rate(host->ciu_clk);
  1808. if (drv_data && drv_data->setup_clock) {
  1809. ret = drv_data->setup_clock(host);
  1810. if (ret) {
  1811. dev_err(host->dev,
  1812. "implementation specific clock setup failed\n");
  1813. goto err_clk_ciu;
  1814. }
  1815. }
  1816. if (!host->bus_hz) {
  1817. dev_err(host->dev,
  1818. "Platform data must supply bus speed\n");
  1819. ret = -ENODEV;
  1820. goto err_clk_ciu;
  1821. }
  1822. host->quirks = host->pdata->quirks;
  1823. spin_lock_init(&host->lock);
  1824. INIT_LIST_HEAD(&host->queue);
  1825. /*
  1826. * Get the host data width - this assumes that HCON has been set with
  1827. * the correct values.
  1828. */
  1829. i = (mci_readl(host, HCON) >> 7) & 0x7;
  1830. if (!i) {
  1831. host->push_data = dw_mci_push_data16;
  1832. host->pull_data = dw_mci_pull_data16;
  1833. width = 16;
  1834. host->data_shift = 1;
  1835. } else if (i == 2) {
  1836. host->push_data = dw_mci_push_data64;
  1837. host->pull_data = dw_mci_pull_data64;
  1838. width = 64;
  1839. host->data_shift = 3;
  1840. } else {
  1841. /* Check for a reserved value, and warn if it is */
  1842. WARN((i != 1),
  1843. "HCON reports a reserved host data width!\n"
  1844. "Defaulting to 32-bit access.\n");
  1845. host->push_data = dw_mci_push_data32;
  1846. host->pull_data = dw_mci_pull_data32;
  1847. width = 32;
  1848. host->data_shift = 2;
  1849. }
  1850. /* Reset all blocks */
  1851. if (!mci_wait_reset(host->dev, host))
  1852. return -ENODEV;
  1853. host->dma_ops = host->pdata->dma_ops;
  1854. dw_mci_init_dma(host);
  1855. /* Clear the interrupts for the host controller */
  1856. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  1857. mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  1858. /* Put in max timeout */
  1859. mci_writel(host, TMOUT, 0xFFFFFFFF);
  1860. /*
  1861. * FIFO threshold settings RxMark = fifo_size / 2 - 1,
  1862. * Tx Mark = fifo_size / 2 DMA Size = 8
  1863. */
  1864. if (!host->pdata->fifo_depth) {
  1865. /*
  1866. * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
  1867. * have been overwritten by the bootloader, just like we're
  1868. * about to do, so if you know the value for your hardware, you
  1869. * should put it in the platform data.
  1870. */
  1871. fifo_size = mci_readl(host, FIFOTH);
  1872. fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
  1873. } else {
  1874. fifo_size = host->pdata->fifo_depth;
  1875. }
  1876. host->fifo_depth = fifo_size;
  1877. host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
  1878. ((fifo_size/2) << 0));
  1879. mci_writel(host, FIFOTH, host->fifoth_val);
  1880. /* disable clock to CIU */
  1881. mci_writel(host, CLKENA, 0);
  1882. mci_writel(host, CLKSRC, 0);
  1883. tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
  1884. host->card_workqueue = alloc_workqueue("dw-mci-card",
  1885. WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
  1886. if (!host->card_workqueue)
  1887. goto err_dmaunmap;
  1888. INIT_WORK(&host->card_work, dw_mci_work_routine_card);
  1889. ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
  1890. host->irq_flags, "dw-mci", host);
  1891. if (ret)
  1892. goto err_workqueue;
  1893. if (host->pdata->num_slots)
  1894. host->num_slots = host->pdata->num_slots;
  1895. else
  1896. host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
  1897. /*
  1898. * Enable interrupts for command done, data over, data empty, card det,
  1899. * receive ready and error such as transmit, receive timeout, crc error
  1900. */
  1901. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  1902. mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  1903. SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  1904. DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
  1905. mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
  1906. dev_info(host->dev, "DW MMC controller at irq %d, "
  1907. "%d bit host data width, "
  1908. "%u deep fifo\n",
  1909. host->irq, width, fifo_size);
  1910. /* We need at least one slot to succeed */
  1911. for (i = 0; i < host->num_slots; i++) {
  1912. ret = dw_mci_init_slot(host, i);
  1913. if (ret)
  1914. dev_dbg(host->dev, "slot %d init failed\n", i);
  1915. else
  1916. init_slots++;
  1917. }
  1918. if (init_slots) {
  1919. dev_info(host->dev, "%d slots initialized\n", init_slots);
  1920. } else {
  1921. dev_dbg(host->dev, "attempted to initialize %d slots, "
  1922. "but failed on all\n", host->num_slots);
  1923. goto err_workqueue;
  1924. }
  1925. /*
  1926. * In 2.40a spec, Data offset is changed.
  1927. * Need to check the version-id and set data-offset for DATA register.
  1928. */
  1929. host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
  1930. dev_info(host->dev, "Version ID is %04x\n", host->verid);
  1931. if (host->verid < DW_MMC_240A)
  1932. host->data_offset = DATA_OFFSET;
  1933. else
  1934. host->data_offset = DATA_240A_OFFSET;
  1935. if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
  1936. dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
  1937. return 0;
  1938. err_workqueue:
  1939. destroy_workqueue(host->card_workqueue);
  1940. err_dmaunmap:
  1941. if (host->use_dma && host->dma_ops->exit)
  1942. host->dma_ops->exit(host);
  1943. if (host->vmmc)
  1944. regulator_disable(host->vmmc);
  1945. err_clk_ciu:
  1946. if (!IS_ERR(host->ciu_clk))
  1947. clk_disable_unprepare(host->ciu_clk);
  1948. err_clk_biu:
  1949. if (!IS_ERR(host->biu_clk))
  1950. clk_disable_unprepare(host->biu_clk);
  1951. return ret;
  1952. }
  1953. EXPORT_SYMBOL(dw_mci_probe);
  1954. void dw_mci_remove(struct dw_mci *host)
  1955. {
  1956. int i;
  1957. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  1958. mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
  1959. for (i = 0; i < host->num_slots; i++) {
  1960. dev_dbg(host->dev, "remove slot %d\n", i);
  1961. if (host->slot[i])
  1962. dw_mci_cleanup_slot(host->slot[i], i);
  1963. }
  1964. /* disable clock to CIU */
  1965. mci_writel(host, CLKENA, 0);
  1966. mci_writel(host, CLKSRC, 0);
  1967. destroy_workqueue(host->card_workqueue);
  1968. if (host->use_dma && host->dma_ops->exit)
  1969. host->dma_ops->exit(host);
  1970. if (host->vmmc)
  1971. regulator_disable(host->vmmc);
  1972. if (!IS_ERR(host->ciu_clk))
  1973. clk_disable_unprepare(host->ciu_clk);
  1974. if (!IS_ERR(host->biu_clk))
  1975. clk_disable_unprepare(host->biu_clk);
  1976. }
  1977. EXPORT_SYMBOL(dw_mci_remove);
  1978. #ifdef CONFIG_PM_SLEEP
  1979. /*
  1980. * TODO: we should probably disable the clock to the card in the suspend path.
  1981. */
  1982. int dw_mci_suspend(struct dw_mci *host)
  1983. {
  1984. int i, ret = 0;
  1985. for (i = 0; i < host->num_slots; i++) {
  1986. struct dw_mci_slot *slot = host->slot[i];
  1987. if (!slot)
  1988. continue;
  1989. ret = mmc_suspend_host(slot->mmc);
  1990. if (ret < 0) {
  1991. while (--i >= 0) {
  1992. slot = host->slot[i];
  1993. if (slot)
  1994. mmc_resume_host(host->slot[i]->mmc);
  1995. }
  1996. return ret;
  1997. }
  1998. }
  1999. if (host->vmmc)
  2000. regulator_disable(host->vmmc);
  2001. return 0;
  2002. }
  2003. EXPORT_SYMBOL(dw_mci_suspend);
  2004. int dw_mci_resume(struct dw_mci *host)
  2005. {
  2006. int i, ret;
  2007. if (host->vmmc)
  2008. regulator_enable(host->vmmc);
  2009. if (!mci_wait_reset(host->dev, host)) {
  2010. ret = -ENODEV;
  2011. return ret;
  2012. }
  2013. if (host->use_dma && host->dma_ops->init)
  2014. host->dma_ops->init(host);
  2015. /* Restore the old value at FIFOTH register */
  2016. mci_writel(host, FIFOTH, host->fifoth_val);
  2017. mci_writel(host, RINTSTS, 0xFFFFFFFF);
  2018. mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
  2019. SDMMC_INT_TXDR | SDMMC_INT_RXDR |
  2020. DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
  2021. mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
  2022. for (i = 0; i < host->num_slots; i++) {
  2023. struct dw_mci_slot *slot = host->slot[i];
  2024. if (!slot)
  2025. continue;
  2026. if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
  2027. dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
  2028. dw_mci_setup_bus(slot, true);
  2029. }
  2030. ret = mmc_resume_host(host->slot[i]->mmc);
  2031. if (ret < 0)
  2032. return ret;
  2033. }
  2034. return 0;
  2035. }
  2036. EXPORT_SYMBOL(dw_mci_resume);
  2037. #endif /* CONFIG_PM_SLEEP */
  2038. static int __init dw_mci_init(void)
  2039. {
  2040. printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver");
  2041. return 0;
  2042. }
  2043. static void __exit dw_mci_exit(void)
  2044. {
  2045. }
  2046. module_init(dw_mci_init);
  2047. module_exit(dw_mci_exit);
  2048. MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
  2049. MODULE_AUTHOR("NXP Semiconductor VietNam");
  2050. MODULE_AUTHOR("Imagination Technologies Ltd");
  2051. MODULE_LICENSE("GPL v2");