omap.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. /*
  2. * linux/drivers/mmc/host/omap.c
  3. *
  4. * Copyright (C) 2004 Nokia Corporation
  5. * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
  6. * Misc hacks here and there by Tony Lindgren <tony@atomide.com>
  7. * Other hacks (DMA, SD, etc) by David Brownell
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/init.h>
  16. #include <linux/ioport.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/delay.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/timer.h>
  24. #include <linux/omap-dma.h>
  25. #include <linux/mmc/host.h>
  26. #include <linux/mmc/card.h>
  27. #include <linux/clk.h>
  28. #include <linux/scatterlist.h>
  29. #include <linux/i2c/tps65010.h>
  30. #include <linux/slab.h>
  31. #include <asm/io.h>
  32. #include <asm/irq.h>
  33. #include <plat/board.h>
  34. #include <plat/mmc.h>
  35. #include <asm/gpio.h>
  36. #include <plat/dma.h>
  37. #include <plat/mux.h>
  38. #include <plat/fpga.h>
  39. #define OMAP_MMC_REG_CMD 0x00
  40. #define OMAP_MMC_REG_ARGL 0x01
  41. #define OMAP_MMC_REG_ARGH 0x02
  42. #define OMAP_MMC_REG_CON 0x03
  43. #define OMAP_MMC_REG_STAT 0x04
  44. #define OMAP_MMC_REG_IE 0x05
  45. #define OMAP_MMC_REG_CTO 0x06
  46. #define OMAP_MMC_REG_DTO 0x07
  47. #define OMAP_MMC_REG_DATA 0x08
  48. #define OMAP_MMC_REG_BLEN 0x09
  49. #define OMAP_MMC_REG_NBLK 0x0a
  50. #define OMAP_MMC_REG_BUF 0x0b
  51. #define OMAP_MMC_REG_SDIO 0x0d
  52. #define OMAP_MMC_REG_REV 0x0f
  53. #define OMAP_MMC_REG_RSP0 0x10
  54. #define OMAP_MMC_REG_RSP1 0x11
  55. #define OMAP_MMC_REG_RSP2 0x12
  56. #define OMAP_MMC_REG_RSP3 0x13
  57. #define OMAP_MMC_REG_RSP4 0x14
  58. #define OMAP_MMC_REG_RSP5 0x15
  59. #define OMAP_MMC_REG_RSP6 0x16
  60. #define OMAP_MMC_REG_RSP7 0x17
  61. #define OMAP_MMC_REG_IOSR 0x18
  62. #define OMAP_MMC_REG_SYSC 0x19
  63. #define OMAP_MMC_REG_SYSS 0x1a
  64. #define OMAP_MMC_STAT_CARD_ERR (1 << 14)
  65. #define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
  66. #define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
  67. #define OMAP_MMC_STAT_A_EMPTY (1 << 11)
  68. #define OMAP_MMC_STAT_A_FULL (1 << 10)
  69. #define OMAP_MMC_STAT_CMD_CRC (1 << 8)
  70. #define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
  71. #define OMAP_MMC_STAT_DATA_CRC (1 << 6)
  72. #define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
  73. #define OMAP_MMC_STAT_END_BUSY (1 << 4)
  74. #define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
  75. #define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
  76. #define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
  77. #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
  78. #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
  79. #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
  80. /*
  81. * Command types
  82. */
  83. #define OMAP_MMC_CMDTYPE_BC 0
  84. #define OMAP_MMC_CMDTYPE_BCR 1
  85. #define OMAP_MMC_CMDTYPE_AC 2
  86. #define OMAP_MMC_CMDTYPE_ADTC 3
  87. #define DRIVER_NAME "mmci-omap"
  88. /* Specifies how often in millisecs to poll for card status changes
  89. * when the cover switch is open */
  90. #define OMAP_MMC_COVER_POLL_DELAY 500
  91. struct mmc_omap_host;
  92. #define USE_DMA_PRIVATE
  93. struct mmc_omap_slot {
  94. int id;
  95. unsigned int vdd;
  96. u16 saved_con;
  97. u16 bus_mode;
  98. unsigned int fclk_freq;
  99. unsigned powered:1;
  100. struct tasklet_struct cover_tasklet;
  101. struct timer_list cover_timer;
  102. unsigned cover_open;
  103. struct mmc_request *mrq;
  104. struct mmc_omap_host *host;
  105. struct mmc_host *mmc;
  106. struct omap_mmc_slot_data *pdata;
  107. };
  108. struct mmc_omap_host {
  109. int initialized;
  110. int suspended;
  111. struct mmc_request * mrq;
  112. struct mmc_command * cmd;
  113. struct mmc_data * data;
  114. struct mmc_host * mmc;
  115. struct device * dev;
  116. unsigned char id; /* 16xx chips have 2 MMC blocks */
  117. struct clk * iclk;
  118. struct clk * fclk;
  119. struct dma_chan *dma_rx;
  120. u32 dma_rx_burst;
  121. struct dma_chan *dma_tx;
  122. u32 dma_tx_burst;
  123. struct resource *mem_res;
  124. void __iomem *virt_base;
  125. unsigned int phys_base;
  126. int irq;
  127. unsigned char bus_mode;
  128. unsigned char hw_bus_mode;
  129. unsigned int reg_shift;
  130. struct work_struct cmd_abort_work;
  131. unsigned abort:1;
  132. struct timer_list cmd_abort_timer;
  133. struct work_struct slot_release_work;
  134. struct mmc_omap_slot *next_slot;
  135. struct work_struct send_stop_work;
  136. struct mmc_data *stop_data;
  137. unsigned int sg_len;
  138. int sg_idx;
  139. u16 * buffer;
  140. u32 buffer_bytes_left;
  141. u32 total_bytes_left;
  142. unsigned use_dma:1;
  143. unsigned brs_received:1, dma_done:1;
  144. unsigned dma_in_use:1;
  145. #ifdef USE_DMA_PRIVATE
  146. unsigned dma_is_read:1;
  147. int dma_ch;
  148. struct timer_list dma_timer;
  149. unsigned dma_len;
  150. #endif
  151. spinlock_t dma_lock;
  152. struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
  153. struct mmc_omap_slot *current_slot;
  154. spinlock_t slot_lock;
  155. wait_queue_head_t slot_wq;
  156. int nr_slots;
  157. struct timer_list clk_timer;
  158. spinlock_t clk_lock; /* for changing enabled state */
  159. unsigned int fclk_enabled:1;
  160. struct workqueue_struct *mmc_omap_wq;
  161. struct omap_mmc_platform_data *pdata;
  162. };
  163. static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
  164. {
  165. unsigned long tick_ns;
  166. if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
  167. tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
  168. ndelay(8 * tick_ns);
  169. }
  170. }
  171. static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
  172. {
  173. unsigned long flags;
  174. spin_lock_irqsave(&host->clk_lock, flags);
  175. if (host->fclk_enabled != enable) {
  176. host->fclk_enabled = enable;
  177. if (enable)
  178. clk_enable(host->fclk);
  179. else
  180. clk_disable(host->fclk);
  181. }
  182. spin_unlock_irqrestore(&host->clk_lock, flags);
  183. }
  184. static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
  185. {
  186. struct mmc_omap_host *host = slot->host;
  187. unsigned long flags;
  188. if (claimed)
  189. goto no_claim;
  190. spin_lock_irqsave(&host->slot_lock, flags);
  191. while (host->mmc != NULL) {
  192. spin_unlock_irqrestore(&host->slot_lock, flags);
  193. wait_event(host->slot_wq, host->mmc == NULL);
  194. spin_lock_irqsave(&host->slot_lock, flags);
  195. }
  196. host->mmc = slot->mmc;
  197. spin_unlock_irqrestore(&host->slot_lock, flags);
  198. no_claim:
  199. del_timer(&host->clk_timer);
  200. if (host->current_slot != slot || !claimed)
  201. mmc_omap_fclk_offdelay(host->current_slot);
  202. if (host->current_slot != slot) {
  203. OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
  204. if (host->pdata->switch_slot != NULL)
  205. host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
  206. host->current_slot = slot;
  207. }
  208. if (claimed) {
  209. mmc_omap_fclk_enable(host, 1);
  210. /* Doing the dummy read here seems to work around some bug
  211. * at least in OMAP24xx silicon where the command would not
  212. * start after writing the CMD register. Sigh. */
  213. OMAP_MMC_READ(host, CON);
  214. OMAP_MMC_WRITE(host, CON, slot->saved_con);
  215. } else
  216. mmc_omap_fclk_enable(host, 0);
  217. }
  218. static void mmc_omap_start_request(struct mmc_omap_host *host,
  219. struct mmc_request *req);
  220. static void mmc_omap_slot_release_work(struct work_struct *work)
  221. {
  222. struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
  223. slot_release_work);
  224. struct mmc_omap_slot *next_slot = host->next_slot;
  225. struct mmc_request *rq;
  226. host->next_slot = NULL;
  227. mmc_omap_select_slot(next_slot, 1);
  228. rq = next_slot->mrq;
  229. next_slot->mrq = NULL;
  230. mmc_omap_start_request(host, rq);
  231. }
  232. static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
  233. {
  234. struct mmc_omap_host *host = slot->host;
  235. unsigned long flags;
  236. int i;
  237. BUG_ON(slot == NULL || host->mmc == NULL);
  238. if (clk_enabled)
  239. /* Keeps clock running for at least 8 cycles on valid freq */
  240. mod_timer(&host->clk_timer, jiffies + HZ/10);
  241. else {
  242. del_timer(&host->clk_timer);
  243. mmc_omap_fclk_offdelay(slot);
  244. mmc_omap_fclk_enable(host, 0);
  245. }
  246. spin_lock_irqsave(&host->slot_lock, flags);
  247. /* Check for any pending requests */
  248. for (i = 0; i < host->nr_slots; i++) {
  249. struct mmc_omap_slot *new_slot;
  250. if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
  251. continue;
  252. BUG_ON(host->next_slot != NULL);
  253. new_slot = host->slots[i];
  254. /* The current slot should not have a request in queue */
  255. BUG_ON(new_slot == host->current_slot);
  256. host->next_slot = new_slot;
  257. host->mmc = new_slot->mmc;
  258. spin_unlock_irqrestore(&host->slot_lock, flags);
  259. queue_work(host->mmc_omap_wq, &host->slot_release_work);
  260. return;
  261. }
  262. host->mmc = NULL;
  263. wake_up(&host->slot_wq);
  264. spin_unlock_irqrestore(&host->slot_lock, flags);
  265. }
  266. static inline
  267. int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
  268. {
  269. if (slot->pdata->get_cover_state)
  270. return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
  271. slot->id);
  272. return 0;
  273. }
  274. static ssize_t
  275. mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
  276. char *buf)
  277. {
  278. struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
  279. struct mmc_omap_slot *slot = mmc_priv(mmc);
  280. return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
  281. "closed");
  282. }
  283. static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
  284. static ssize_t
  285. mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
  286. char *buf)
  287. {
  288. struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
  289. struct mmc_omap_slot *slot = mmc_priv(mmc);
  290. return sprintf(buf, "%s\n", slot->pdata->name);
  291. }
  292. static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
  293. static void
  294. mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
  295. {
  296. u32 cmdreg;
  297. u32 resptype;
  298. u32 cmdtype;
  299. host->cmd = cmd;
  300. resptype = 0;
  301. cmdtype = 0;
  302. /* Our hardware needs to know exact type */
  303. switch (mmc_resp_type(cmd)) {
  304. case MMC_RSP_NONE:
  305. break;
  306. case MMC_RSP_R1:
  307. case MMC_RSP_R1B:
  308. /* resp 1, 1b, 6, 7 */
  309. resptype = 1;
  310. break;
  311. case MMC_RSP_R2:
  312. resptype = 2;
  313. break;
  314. case MMC_RSP_R3:
  315. resptype = 3;
  316. break;
  317. default:
  318. dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
  319. break;
  320. }
  321. if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
  322. cmdtype = OMAP_MMC_CMDTYPE_ADTC;
  323. } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
  324. cmdtype = OMAP_MMC_CMDTYPE_BC;
  325. } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
  326. cmdtype = OMAP_MMC_CMDTYPE_BCR;
  327. } else {
  328. cmdtype = OMAP_MMC_CMDTYPE_AC;
  329. }
  330. cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
  331. if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
  332. cmdreg |= 1 << 6;
  333. if (cmd->flags & MMC_RSP_BUSY)
  334. cmdreg |= 1 << 11;
  335. if (host->data && !(host->data->flags & MMC_DATA_WRITE))
  336. cmdreg |= 1 << 15;
  337. mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
  338. OMAP_MMC_WRITE(host, CTO, 200);
  339. OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
  340. OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
  341. OMAP_MMC_WRITE(host, IE,
  342. OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL |
  343. OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT |
  344. OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT |
  345. OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR |
  346. OMAP_MMC_STAT_END_OF_DATA);
  347. OMAP_MMC_WRITE(host, CMD, cmdreg);
  348. }
  349. static void
  350. mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
  351. int abort)
  352. {
  353. enum dma_data_direction dma_data_dir;
  354. struct device *dev = mmc_dev(host->mmc);
  355. struct dma_chan *c;
  356. #ifdef USE_DMA_PRIVATE
  357. BUG_ON(host->dma_ch < 0);
  358. if (data->error)
  359. omap_stop_dma(host->dma_ch);
  360. /* Release DMA channel lazily */
  361. mod_timer(&host->dma_timer, jiffies + HZ);
  362. #endif
  363. if (data->flags & MMC_DATA_WRITE) {
  364. dma_data_dir = DMA_TO_DEVICE;
  365. c = host->dma_tx;
  366. } else {
  367. dma_data_dir = DMA_FROM_DEVICE;
  368. c = host->dma_rx;
  369. }
  370. if (c) {
  371. if (data->error) {
  372. dmaengine_terminate_all(c);
  373. /* Claim nothing transferred on error... */
  374. data->bytes_xfered = 0;
  375. }
  376. dev = c->device->dev;
  377. }
  378. dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
  379. }
  380. static void mmc_omap_send_stop_work(struct work_struct *work)
  381. {
  382. struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
  383. send_stop_work);
  384. struct mmc_omap_slot *slot = host->current_slot;
  385. struct mmc_data *data = host->stop_data;
  386. unsigned long tick_ns;
  387. tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
  388. ndelay(8*tick_ns);
  389. mmc_omap_start_command(host, data->stop);
  390. }
  391. static void
  392. mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
  393. {
  394. if (host->dma_in_use)
  395. mmc_omap_release_dma(host, data, data->error);
  396. host->data = NULL;
  397. host->sg_len = 0;
  398. /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing
  399. * dozens of requests until the card finishes writing data.
  400. * It'd be cheaper to just wait till an EOFB interrupt arrives...
  401. */
  402. if (!data->stop) {
  403. struct mmc_host *mmc;
  404. host->mrq = NULL;
  405. mmc = host->mmc;
  406. mmc_omap_release_slot(host->current_slot, 1);
  407. mmc_request_done(mmc, data->mrq);
  408. return;
  409. }
  410. host->stop_data = data;
  411. queue_work(host->mmc_omap_wq, &host->send_stop_work);
  412. }
  413. static void
  414. mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
  415. {
  416. struct mmc_omap_slot *slot = host->current_slot;
  417. unsigned int restarts, passes, timeout;
  418. u16 stat = 0;
  419. /* Sending abort takes 80 clocks. Have some extra and round up */
  420. timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
  421. restarts = 0;
  422. while (restarts < maxloops) {
  423. OMAP_MMC_WRITE(host, STAT, 0xFFFF);
  424. OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
  425. passes = 0;
  426. while (passes < timeout) {
  427. stat = OMAP_MMC_READ(host, STAT);
  428. if (stat & OMAP_MMC_STAT_END_OF_CMD)
  429. goto out;
  430. udelay(1);
  431. passes++;
  432. }
  433. restarts++;
  434. }
  435. out:
  436. OMAP_MMC_WRITE(host, STAT, stat);
  437. }
  438. static void
  439. mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
  440. {
  441. if (host->dma_in_use)
  442. mmc_omap_release_dma(host, data, 1);
  443. host->data = NULL;
  444. host->sg_len = 0;
  445. mmc_omap_send_abort(host, 10000);
  446. }
  447. static void
  448. mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
  449. {
  450. unsigned long flags;
  451. int done;
  452. if (!host->dma_in_use) {
  453. mmc_omap_xfer_done(host, data);
  454. return;
  455. }
  456. done = 0;
  457. spin_lock_irqsave(&host->dma_lock, flags);
  458. if (host->dma_done)
  459. done = 1;
  460. else
  461. host->brs_received = 1;
  462. spin_unlock_irqrestore(&host->dma_lock, flags);
  463. if (done)
  464. mmc_omap_xfer_done(host, data);
  465. }
  466. #ifdef USE_DMA_PRIVATE
  467. static void
  468. mmc_omap_dma_timer(unsigned long data)
  469. {
  470. struct mmc_omap_host *host = (struct mmc_omap_host *) data;
  471. BUG_ON(host->dma_ch < 0);
  472. omap_free_dma(host->dma_ch);
  473. host->dma_ch = -1;
  474. }
  475. #endif
  476. static void
  477. mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
  478. {
  479. unsigned long flags;
  480. int done;
  481. done = 0;
  482. spin_lock_irqsave(&host->dma_lock, flags);
  483. if (host->brs_received)
  484. done = 1;
  485. else
  486. host->dma_done = 1;
  487. spin_unlock_irqrestore(&host->dma_lock, flags);
  488. if (done)
  489. mmc_omap_xfer_done(host, data);
  490. }
  491. static void
  492. mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
  493. {
  494. host->cmd = NULL;
  495. del_timer(&host->cmd_abort_timer);
  496. if (cmd->flags & MMC_RSP_PRESENT) {
  497. if (cmd->flags & MMC_RSP_136) {
  498. /* response type 2 */
  499. cmd->resp[3] =
  500. OMAP_MMC_READ(host, RSP0) |
  501. (OMAP_MMC_READ(host, RSP1) << 16);
  502. cmd->resp[2] =
  503. OMAP_MMC_READ(host, RSP2) |
  504. (OMAP_MMC_READ(host, RSP3) << 16);
  505. cmd->resp[1] =
  506. OMAP_MMC_READ(host, RSP4) |
  507. (OMAP_MMC_READ(host, RSP5) << 16);
  508. cmd->resp[0] =
  509. OMAP_MMC_READ(host, RSP6) |
  510. (OMAP_MMC_READ(host, RSP7) << 16);
  511. } else {
  512. /* response types 1, 1b, 3, 4, 5, 6 */
  513. cmd->resp[0] =
  514. OMAP_MMC_READ(host, RSP6) |
  515. (OMAP_MMC_READ(host, RSP7) << 16);
  516. }
  517. }
  518. if (host->data == NULL || cmd->error) {
  519. struct mmc_host *mmc;
  520. if (host->data != NULL)
  521. mmc_omap_abort_xfer(host, host->data);
  522. host->mrq = NULL;
  523. mmc = host->mmc;
  524. mmc_omap_release_slot(host->current_slot, 1);
  525. mmc_request_done(mmc, cmd->mrq);
  526. }
  527. }
  528. /*
  529. * Abort stuck command. Can occur when card is removed while it is being
  530. * read.
  531. */
  532. static void mmc_omap_abort_command(struct work_struct *work)
  533. {
  534. struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
  535. cmd_abort_work);
  536. BUG_ON(!host->cmd);
  537. dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
  538. host->cmd->opcode);
  539. if (host->cmd->error == 0)
  540. host->cmd->error = -ETIMEDOUT;
  541. if (host->data == NULL) {
  542. struct mmc_command *cmd;
  543. struct mmc_host *mmc;
  544. cmd = host->cmd;
  545. host->cmd = NULL;
  546. mmc_omap_send_abort(host, 10000);
  547. host->mrq = NULL;
  548. mmc = host->mmc;
  549. mmc_omap_release_slot(host->current_slot, 1);
  550. mmc_request_done(mmc, cmd->mrq);
  551. } else
  552. mmc_omap_cmd_done(host, host->cmd);
  553. host->abort = 0;
  554. enable_irq(host->irq);
  555. }
  556. static void
  557. mmc_omap_cmd_timer(unsigned long data)
  558. {
  559. struct mmc_omap_host *host = (struct mmc_omap_host *) data;
  560. unsigned long flags;
  561. spin_lock_irqsave(&host->slot_lock, flags);
  562. if (host->cmd != NULL && !host->abort) {
  563. OMAP_MMC_WRITE(host, IE, 0);
  564. disable_irq(host->irq);
  565. host->abort = 1;
  566. queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
  567. }
  568. spin_unlock_irqrestore(&host->slot_lock, flags);
  569. }
  570. /* PIO only */
  571. static void
  572. mmc_omap_sg_to_buf(struct mmc_omap_host *host)
  573. {
  574. struct scatterlist *sg;
  575. sg = host->data->sg + host->sg_idx;
  576. host->buffer_bytes_left = sg->length;
  577. host->buffer = sg_virt(sg);
  578. if (host->buffer_bytes_left > host->total_bytes_left)
  579. host->buffer_bytes_left = host->total_bytes_left;
  580. }
  581. static void
  582. mmc_omap_clk_timer(unsigned long data)
  583. {
  584. struct mmc_omap_host *host = (struct mmc_omap_host *) data;
  585. mmc_omap_fclk_enable(host, 0);
  586. }
  587. /* PIO only */
  588. static void
  589. mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
  590. {
  591. int n;
  592. if (host->buffer_bytes_left == 0) {
  593. host->sg_idx++;
  594. BUG_ON(host->sg_idx == host->sg_len);
  595. mmc_omap_sg_to_buf(host);
  596. }
  597. n = 64;
  598. if (n > host->buffer_bytes_left)
  599. n = host->buffer_bytes_left;
  600. host->buffer_bytes_left -= n;
  601. host->total_bytes_left -= n;
  602. host->data->bytes_xfered += n;
  603. if (write) {
  604. __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
  605. } else {
  606. __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
  607. }
  608. }
  609. static inline void mmc_omap_report_irq(u16 status)
  610. {
  611. static const char *mmc_omap_status_bits[] = {
  612. "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
  613. "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
  614. };
  615. int i, c = 0;
  616. for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
  617. if (status & (1 << i)) {
  618. if (c)
  619. printk(" ");
  620. printk("%s", mmc_omap_status_bits[i]);
  621. c++;
  622. }
  623. }
  624. static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
  625. {
  626. struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
  627. u16 status;
  628. int end_command;
  629. int end_transfer;
  630. int transfer_error, cmd_error;
  631. if (host->cmd == NULL && host->data == NULL) {
  632. status = OMAP_MMC_READ(host, STAT);
  633. dev_info(mmc_dev(host->slots[0]->mmc),
  634. "Spurious IRQ 0x%04x\n", status);
  635. if (status != 0) {
  636. OMAP_MMC_WRITE(host, STAT, status);
  637. OMAP_MMC_WRITE(host, IE, 0);
  638. }
  639. return IRQ_HANDLED;
  640. }
  641. end_command = 0;
  642. end_transfer = 0;
  643. transfer_error = 0;
  644. cmd_error = 0;
  645. while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
  646. int cmd;
  647. OMAP_MMC_WRITE(host, STAT, status);
  648. if (host->cmd != NULL)
  649. cmd = host->cmd->opcode;
  650. else
  651. cmd = -1;
  652. #ifdef CONFIG_MMC_DEBUG
  653. dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
  654. status, cmd);
  655. mmc_omap_report_irq(status);
  656. printk("\n");
  657. #endif
  658. if (host->total_bytes_left) {
  659. if ((status & OMAP_MMC_STAT_A_FULL) ||
  660. (status & OMAP_MMC_STAT_END_OF_DATA))
  661. mmc_omap_xfer_data(host, 0);
  662. if (status & OMAP_MMC_STAT_A_EMPTY)
  663. mmc_omap_xfer_data(host, 1);
  664. }
  665. if (status & OMAP_MMC_STAT_END_OF_DATA)
  666. end_transfer = 1;
  667. if (status & OMAP_MMC_STAT_DATA_TOUT) {
  668. dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
  669. cmd);
  670. if (host->data) {
  671. host->data->error = -ETIMEDOUT;
  672. transfer_error = 1;
  673. }
  674. }
  675. if (status & OMAP_MMC_STAT_DATA_CRC) {
  676. if (host->data) {
  677. host->data->error = -EILSEQ;
  678. dev_dbg(mmc_dev(host->mmc),
  679. "data CRC error, bytes left %d\n",
  680. host->total_bytes_left);
  681. transfer_error = 1;
  682. } else {
  683. dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
  684. }
  685. }
  686. if (status & OMAP_MMC_STAT_CMD_TOUT) {
  687. /* Timeouts are routine with some commands */
  688. if (host->cmd) {
  689. struct mmc_omap_slot *slot =
  690. host->current_slot;
  691. if (slot == NULL ||
  692. !mmc_omap_cover_is_open(slot))
  693. dev_err(mmc_dev(host->mmc),
  694. "command timeout (CMD%d)\n",
  695. cmd);
  696. host->cmd->error = -ETIMEDOUT;
  697. end_command = 1;
  698. cmd_error = 1;
  699. }
  700. }
  701. if (status & OMAP_MMC_STAT_CMD_CRC) {
  702. if (host->cmd) {
  703. dev_err(mmc_dev(host->mmc),
  704. "command CRC error (CMD%d, arg 0x%08x)\n",
  705. cmd, host->cmd->arg);
  706. host->cmd->error = -EILSEQ;
  707. end_command = 1;
  708. cmd_error = 1;
  709. } else
  710. dev_err(mmc_dev(host->mmc),
  711. "command CRC error without cmd?\n");
  712. }
  713. if (status & OMAP_MMC_STAT_CARD_ERR) {
  714. dev_dbg(mmc_dev(host->mmc),
  715. "ignoring card status error (CMD%d)\n",
  716. cmd);
  717. end_command = 1;
  718. }
  719. /*
  720. * NOTE: On 1610 the END_OF_CMD may come too early when
  721. * starting a write
  722. */
  723. if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
  724. (!(status & OMAP_MMC_STAT_A_EMPTY))) {
  725. end_command = 1;
  726. }
  727. }
  728. if (cmd_error && host->data) {
  729. del_timer(&host->cmd_abort_timer);
  730. host->abort = 1;
  731. OMAP_MMC_WRITE(host, IE, 0);
  732. disable_irq_nosync(host->irq);
  733. queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
  734. return IRQ_HANDLED;
  735. }
  736. if (end_command && host->cmd)
  737. mmc_omap_cmd_done(host, host->cmd);
  738. if (host->data != NULL) {
  739. if (transfer_error)
  740. mmc_omap_xfer_done(host, host->data);
  741. else if (end_transfer)
  742. mmc_omap_end_of_data(host, host->data);
  743. }
  744. return IRQ_HANDLED;
  745. }
  746. void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
  747. {
  748. int cover_open;
  749. struct mmc_omap_host *host = dev_get_drvdata(dev);
  750. struct mmc_omap_slot *slot = host->slots[num];
  751. BUG_ON(num >= host->nr_slots);
  752. /* Other subsystems can call in here before we're initialised. */
  753. if (host->nr_slots == 0 || !host->slots[num])
  754. return;
  755. cover_open = mmc_omap_cover_is_open(slot);
  756. if (cover_open != slot->cover_open) {
  757. slot->cover_open = cover_open;
  758. sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
  759. }
  760. tasklet_hi_schedule(&slot->cover_tasklet);
  761. }
  762. static void mmc_omap_cover_timer(unsigned long arg)
  763. {
  764. struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
  765. tasklet_schedule(&slot->cover_tasklet);
  766. }
  767. static void mmc_omap_cover_handler(unsigned long param)
  768. {
  769. struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
  770. int cover_open = mmc_omap_cover_is_open(slot);
  771. mmc_detect_change(slot->mmc, 0);
  772. if (!cover_open)
  773. return;
  774. /*
  775. * If no card is inserted, we postpone polling until
  776. * the cover has been closed.
  777. */
  778. if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
  779. return;
  780. mod_timer(&slot->cover_timer,
  781. jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
  782. }
  783. static void mmc_omap_dma_callback(void *priv)
  784. {
  785. struct mmc_omap_host *host = priv;
  786. struct mmc_data *data = host->data;
  787. /* If we got to the end of DMA, assume everything went well */
  788. data->bytes_xfered += data->blocks * data->blksz;
  789. mmc_omap_dma_done(host, data);
  790. }
  791. #ifdef USE_DMA_PRIVATE
  792. /* Prepare to transfer the next segment of a scatterlist */
  793. static void
  794. mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
  795. {
  796. int dma_ch = host->dma_ch;
  797. unsigned long data_addr;
  798. u16 buf, frame;
  799. u32 count;
  800. struct scatterlist *sg = &data->sg[host->sg_idx];
  801. int src_port = 0;
  802. int dst_port = 0;
  803. int sync_dev = 0;
  804. data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
  805. frame = data->blksz;
  806. count = sg_dma_len(sg);
  807. if ((data->blocks == 1) && (count > data->blksz))
  808. count = frame;
  809. host->dma_len = count;
  810. /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
  811. * Use 16 or 32 word frames when the blocksize is at least that large.
  812. * Blocksize is usually 512 bytes; but not for some SD reads.
  813. */
  814. if (cpu_is_omap15xx() && frame > 32)
  815. frame = 32;
  816. else if (frame > 64)
  817. frame = 64;
  818. count /= frame;
  819. frame >>= 1;
  820. if (!(data->flags & MMC_DATA_WRITE)) {
  821. buf = 0x800f | ((frame - 1) << 8);
  822. if (cpu_class_is_omap1()) {
  823. src_port = OMAP_DMA_PORT_TIPB;
  824. dst_port = OMAP_DMA_PORT_EMIFF;
  825. }
  826. if (cpu_is_omap24xx())
  827. sync_dev = OMAP24XX_DMA_MMC1_RX;
  828. omap_set_dma_src_params(dma_ch, src_port,
  829. OMAP_DMA_AMODE_CONSTANT,
  830. data_addr, 0, 0);
  831. omap_set_dma_dest_params(dma_ch, dst_port,
  832. OMAP_DMA_AMODE_POST_INC,
  833. sg_dma_address(sg), 0, 0);
  834. omap_set_dma_dest_data_pack(dma_ch, 1);
  835. omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
  836. } else {
  837. buf = 0x0f80 | ((frame - 1) << 0);
  838. if (cpu_class_is_omap1()) {
  839. src_port = OMAP_DMA_PORT_EMIFF;
  840. dst_port = OMAP_DMA_PORT_TIPB;
  841. }
  842. if (cpu_is_omap24xx())
  843. sync_dev = OMAP24XX_DMA_MMC1_TX;
  844. omap_set_dma_dest_params(dma_ch, dst_port,
  845. OMAP_DMA_AMODE_CONSTANT,
  846. data_addr, 0, 0);
  847. omap_set_dma_src_params(dma_ch, src_port,
  848. OMAP_DMA_AMODE_POST_INC,
  849. sg_dma_address(sg), 0, 0);
  850. omap_set_dma_src_data_pack(dma_ch, 1);
  851. omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
  852. }
  853. /* Max limit for DMA frame count is 0xffff */
  854. BUG_ON(count > 0xffff);
  855. OMAP_MMC_WRITE(host, BUF, buf);
  856. omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
  857. frame, count, OMAP_DMA_SYNC_FRAME,
  858. sync_dev, 0);
  859. }
  860. /* A scatterlist segment completed */
  861. static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
  862. {
  863. struct mmc_omap_host *host = (struct mmc_omap_host *) data;
  864. struct mmc_data *mmcdat = host->data;
  865. if (unlikely(host->dma_ch < 0)) {
  866. dev_err(mmc_dev(host->mmc),
  867. "DMA callback while DMA not enabled\n");
  868. return;
  869. }
  870. /* FIXME: We really should do something to _handle_ the errors */
  871. if (ch_status & OMAP1_DMA_TOUT_IRQ) {
  872. dev_err(mmc_dev(host->mmc),"DMA timeout\n");
  873. return;
  874. }
  875. if (ch_status & OMAP_DMA_DROP_IRQ) {
  876. dev_err(mmc_dev(host->mmc), "DMA sync error\n");
  877. return;
  878. }
  879. if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
  880. return;
  881. }
  882. mmcdat->bytes_xfered += host->dma_len;
  883. host->sg_idx++;
  884. if (host->sg_idx < host->sg_len) {
  885. mmc_omap_prepare_dma(host, host->data);
  886. omap_start_dma(host->dma_ch);
  887. } else
  888. mmc_omap_dma_done(host, host->data);
  889. }
  890. static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
  891. {
  892. const char *dma_dev_name;
  893. int sync_dev, dma_ch, is_read, r;
  894. is_read = !(data->flags & MMC_DATA_WRITE);
  895. del_timer_sync(&host->dma_timer);
  896. if (host->dma_ch >= 0) {
  897. if (is_read == host->dma_is_read)
  898. return 0;
  899. omap_free_dma(host->dma_ch);
  900. host->dma_ch = -1;
  901. }
  902. if (is_read) {
  903. if (host->id == 0) {
  904. sync_dev = OMAP_DMA_MMC_RX;
  905. dma_dev_name = "MMC1 read";
  906. } else {
  907. sync_dev = OMAP_DMA_MMC2_RX;
  908. dma_dev_name = "MMC2 read";
  909. }
  910. } else {
  911. if (host->id == 0) {
  912. sync_dev = OMAP_DMA_MMC_TX;
  913. dma_dev_name = "MMC1 write";
  914. } else {
  915. sync_dev = OMAP_DMA_MMC2_TX;
  916. dma_dev_name = "MMC2 write";
  917. }
  918. }
  919. r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
  920. host, &dma_ch);
  921. if (r != 0) {
  922. dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
  923. return r;
  924. }
  925. host->dma_ch = dma_ch;
  926. host->dma_is_read = is_read;
  927. return 0;
  928. }
  929. #endif
  930. static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
  931. {
  932. u16 reg;
  933. reg = OMAP_MMC_READ(host, SDIO);
  934. reg &= ~(1 << 5);
  935. OMAP_MMC_WRITE(host, SDIO, reg);
  936. /* Set maximum timeout */
  937. OMAP_MMC_WRITE(host, CTO, 0xff);
  938. }
  939. static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
  940. {
  941. unsigned int timeout, cycle_ns;
  942. u16 reg;
  943. cycle_ns = 1000000000 / host->current_slot->fclk_freq;
  944. timeout = req->data->timeout_ns / cycle_ns;
  945. timeout += req->data->timeout_clks;
  946. /* Check if we need to use timeout multiplier register */
  947. reg = OMAP_MMC_READ(host, SDIO);
  948. if (timeout > 0xffff) {
  949. reg |= (1 << 5);
  950. timeout /= 1024;
  951. } else
  952. reg &= ~(1 << 5);
  953. OMAP_MMC_WRITE(host, SDIO, reg);
  954. OMAP_MMC_WRITE(host, DTO, timeout);
  955. }
  956. static void
  957. mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
  958. {
  959. struct mmc_data *data = req->data;
  960. int i, use_dma, block_size;
  961. unsigned sg_len;
  962. host->data = data;
  963. if (data == NULL) {
  964. OMAP_MMC_WRITE(host, BLEN, 0);
  965. OMAP_MMC_WRITE(host, NBLK, 0);
  966. OMAP_MMC_WRITE(host, BUF, 0);
  967. host->dma_in_use = 0;
  968. set_cmd_timeout(host, req);
  969. return;
  970. }
  971. block_size = data->blksz;
  972. OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
  973. OMAP_MMC_WRITE(host, BLEN, block_size - 1);
  974. set_data_timeout(host, req);
  975. /* cope with calling layer confusion; it issues "single
  976. * block" writes using multi-block scatterlists.
  977. */
  978. sg_len = (data->blocks == 1) ? 1 : data->sg_len;
  979. /* Only do DMA for entire blocks */
  980. use_dma = host->use_dma;
  981. if (use_dma) {
  982. for (i = 0; i < sg_len; i++) {
  983. if ((data->sg[i].length % block_size) != 0) {
  984. use_dma = 0;
  985. break;
  986. }
  987. }
  988. }
  989. host->sg_idx = 0;
  990. if (use_dma) {
  991. enum dma_data_direction dma_data_dir;
  992. struct dma_async_tx_descriptor *tx;
  993. struct dma_chan *c;
  994. u32 burst, *bp;
  995. u16 buf;
  996. /*
  997. * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
  998. * and 24xx. Use 16 or 32 word frames when the
  999. * blocksize is at least that large. Blocksize is
  1000. * usually 512 bytes; but not for some SD reads.
  1001. */
  1002. burst = cpu_is_omap15xx() ? 32 : 64;
  1003. if (burst > data->blksz)
  1004. burst = data->blksz;
  1005. burst >>= 1;
  1006. if (data->flags & MMC_DATA_WRITE) {
  1007. c = host->dma_tx;
  1008. bp = &host->dma_tx_burst;
  1009. buf = 0x0f80 | (burst - 1) << 0;
  1010. dma_data_dir = DMA_TO_DEVICE;
  1011. } else {
  1012. c = host->dma_rx;
  1013. bp = &host->dma_rx_burst;
  1014. buf = 0x800f | (burst - 1) << 8;
  1015. dma_data_dir = DMA_FROM_DEVICE;
  1016. }
  1017. if (!c)
  1018. goto use_pio;
  1019. /* Only reconfigure if we have a different burst size */
  1020. if (*bp != burst) {
  1021. struct dma_slave_config cfg;
  1022. cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
  1023. cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
  1024. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  1025. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  1026. cfg.src_maxburst = burst;
  1027. cfg.dst_maxburst = burst;
  1028. if (dmaengine_slave_config(c, &cfg))
  1029. goto use_pio;
  1030. *bp = burst;
  1031. }
  1032. host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
  1033. dma_data_dir);
  1034. if (host->sg_len == 0)
  1035. goto use_pio;
  1036. tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
  1037. data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
  1038. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1039. if (!tx)
  1040. goto use_pio;
  1041. OMAP_MMC_WRITE(host, BUF, buf);
  1042. tx->callback = mmc_omap_dma_callback;
  1043. tx->callback_param = host;
  1044. dmaengine_submit(tx);
  1045. host->brs_received = 0;
  1046. host->dma_done = 0;
  1047. host->dma_in_use = 1;
  1048. return;
  1049. }
  1050. use_pio:
  1051. #ifdef USE_DMA_PRIVATE
  1052. if (use_dma) {
  1053. if (mmc_omap_get_dma_channel(host, data) == 0) {
  1054. enum dma_data_direction dma_data_dir;
  1055. if (data->flags & MMC_DATA_WRITE)
  1056. dma_data_dir = DMA_TO_DEVICE;
  1057. else
  1058. dma_data_dir = DMA_FROM_DEVICE;
  1059. host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
  1060. sg_len, dma_data_dir);
  1061. host->total_bytes_left = 0;
  1062. mmc_omap_prepare_dma(host, req->data);
  1063. host->brs_received = 0;
  1064. host->dma_done = 0;
  1065. host->dma_in_use = 1;
  1066. } else
  1067. use_dma = 0;
  1068. }
  1069. #else
  1070. use_dma = 0;
  1071. #endif
  1072. /* Revert to PIO? */
  1073. if (!use_dma) {
  1074. OMAP_MMC_WRITE(host, BUF, 0x1f1f);
  1075. host->total_bytes_left = data->blocks * block_size;
  1076. host->sg_len = sg_len;
  1077. mmc_omap_sg_to_buf(host);
  1078. host->dma_in_use = 0;
  1079. }
  1080. }
  1081. static void mmc_omap_start_request(struct mmc_omap_host *host,
  1082. struct mmc_request *req)
  1083. {
  1084. BUG_ON(host->mrq != NULL);
  1085. host->mrq = req;
  1086. /* only touch fifo AFTER the controller readies it */
  1087. mmc_omap_prepare_data(host, req);
  1088. mmc_omap_start_command(host, req->cmd);
  1089. if (host->dma_in_use) {
  1090. struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
  1091. host->dma_tx : host->dma_rx;
  1092. if (c)
  1093. dma_async_issue_pending(c);
  1094. #ifdef USE_DMA_PRIVATE
  1095. else
  1096. omap_start_dma(host->dma_ch);
  1097. #endif
  1098. }
  1099. }
  1100. static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
  1101. {
  1102. struct mmc_omap_slot *slot = mmc_priv(mmc);
  1103. struct mmc_omap_host *host = slot->host;
  1104. unsigned long flags;
  1105. spin_lock_irqsave(&host->slot_lock, flags);
  1106. if (host->mmc != NULL) {
  1107. BUG_ON(slot->mrq != NULL);
  1108. slot->mrq = req;
  1109. spin_unlock_irqrestore(&host->slot_lock, flags);
  1110. return;
  1111. } else
  1112. host->mmc = mmc;
  1113. spin_unlock_irqrestore(&host->slot_lock, flags);
  1114. mmc_omap_select_slot(slot, 1);
  1115. mmc_omap_start_request(host, req);
  1116. }
  1117. static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
  1118. int vdd)
  1119. {
  1120. struct mmc_omap_host *host;
  1121. host = slot->host;
  1122. if (slot->pdata->set_power != NULL)
  1123. slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
  1124. vdd);
  1125. if (cpu_is_omap24xx()) {
  1126. u16 w;
  1127. if (power_on) {
  1128. w = OMAP_MMC_READ(host, CON);
  1129. OMAP_MMC_WRITE(host, CON, w | (1 << 11));
  1130. } else {
  1131. w = OMAP_MMC_READ(host, CON);
  1132. OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
  1133. }
  1134. }
  1135. }
  1136. static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
  1137. {
  1138. struct mmc_omap_slot *slot = mmc_priv(mmc);
  1139. struct mmc_omap_host *host = slot->host;
  1140. int func_clk_rate = clk_get_rate(host->fclk);
  1141. int dsor;
  1142. if (ios->clock == 0)
  1143. return 0;
  1144. dsor = func_clk_rate / ios->clock;
  1145. if (dsor < 1)
  1146. dsor = 1;
  1147. if (func_clk_rate / dsor > ios->clock)
  1148. dsor++;
  1149. if (dsor > 250)
  1150. dsor = 250;
  1151. slot->fclk_freq = func_clk_rate / dsor;
  1152. if (ios->bus_width == MMC_BUS_WIDTH_4)
  1153. dsor |= 1 << 15;
  1154. return dsor;
  1155. }
  1156. static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  1157. {
  1158. struct mmc_omap_slot *slot = mmc_priv(mmc);
  1159. struct mmc_omap_host *host = slot->host;
  1160. int i, dsor;
  1161. int clk_enabled;
  1162. mmc_omap_select_slot(slot, 0);
  1163. dsor = mmc_omap_calc_divisor(mmc, ios);
  1164. if (ios->vdd != slot->vdd)
  1165. slot->vdd = ios->vdd;
  1166. clk_enabled = 0;
  1167. switch (ios->power_mode) {
  1168. case MMC_POWER_OFF:
  1169. mmc_omap_set_power(slot, 0, ios->vdd);
  1170. break;
  1171. case MMC_POWER_UP:
  1172. /* Cannot touch dsor yet, just power up MMC */
  1173. mmc_omap_set_power(slot, 1, ios->vdd);
  1174. goto exit;
  1175. case MMC_POWER_ON:
  1176. mmc_omap_fclk_enable(host, 1);
  1177. clk_enabled = 1;
  1178. dsor |= 1 << 11;
  1179. break;
  1180. }
  1181. if (slot->bus_mode != ios->bus_mode) {
  1182. if (slot->pdata->set_bus_mode != NULL)
  1183. slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
  1184. ios->bus_mode);
  1185. slot->bus_mode = ios->bus_mode;
  1186. }
  1187. /* On insanely high arm_per frequencies something sometimes
  1188. * goes somehow out of sync, and the POW bit is not being set,
  1189. * which results in the while loop below getting stuck.
  1190. * Writing to the CON register twice seems to do the trick. */
  1191. for (i = 0; i < 2; i++)
  1192. OMAP_MMC_WRITE(host, CON, dsor);
  1193. slot->saved_con = dsor;
  1194. if (ios->power_mode == MMC_POWER_ON) {
  1195. /* worst case at 400kHz, 80 cycles makes 200 microsecs */
  1196. int usecs = 250;
  1197. /* Send clock cycles, poll completion */
  1198. OMAP_MMC_WRITE(host, IE, 0);
  1199. OMAP_MMC_WRITE(host, STAT, 0xffff);
  1200. OMAP_MMC_WRITE(host, CMD, 1 << 7);
  1201. while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
  1202. udelay(1);
  1203. usecs--;
  1204. }
  1205. OMAP_MMC_WRITE(host, STAT, 1);
  1206. }
  1207. exit:
  1208. mmc_omap_release_slot(slot, clk_enabled);
  1209. }
  1210. static const struct mmc_host_ops mmc_omap_ops = {
  1211. .request = mmc_omap_request,
  1212. .set_ios = mmc_omap_set_ios,
  1213. };
  1214. static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
  1215. {
  1216. struct mmc_omap_slot *slot = NULL;
  1217. struct mmc_host *mmc;
  1218. int r;
  1219. mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
  1220. if (mmc == NULL)
  1221. return -ENOMEM;
  1222. slot = mmc_priv(mmc);
  1223. slot->host = host;
  1224. slot->mmc = mmc;
  1225. slot->id = id;
  1226. slot->pdata = &host->pdata->slots[id];
  1227. host->slots[id] = slot;
  1228. mmc->caps = 0;
  1229. if (host->pdata->slots[id].wires >= 4)
  1230. mmc->caps |= MMC_CAP_4_BIT_DATA;
  1231. mmc->ops = &mmc_omap_ops;
  1232. mmc->f_min = 400000;
  1233. if (cpu_class_is_omap2())
  1234. mmc->f_max = 48000000;
  1235. else
  1236. mmc->f_max = 24000000;
  1237. if (host->pdata->max_freq)
  1238. mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
  1239. mmc->ocr_avail = slot->pdata->ocr_mask;
  1240. /* Use scatterlist DMA to reduce per-transfer costs.
  1241. * NOTE max_seg_size assumption that small blocks aren't
  1242. * normally used (except e.g. for reading SD registers).
  1243. */
  1244. mmc->max_segs = 32;
  1245. mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
  1246. mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
  1247. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  1248. mmc->max_seg_size = mmc->max_req_size;
  1249. r = mmc_add_host(mmc);
  1250. if (r < 0)
  1251. goto err_remove_host;
  1252. if (slot->pdata->name != NULL) {
  1253. r = device_create_file(&mmc->class_dev,
  1254. &dev_attr_slot_name);
  1255. if (r < 0)
  1256. goto err_remove_host;
  1257. }
  1258. if (slot->pdata->get_cover_state != NULL) {
  1259. r = device_create_file(&mmc->class_dev,
  1260. &dev_attr_cover_switch);
  1261. if (r < 0)
  1262. goto err_remove_slot_name;
  1263. setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
  1264. (unsigned long)slot);
  1265. tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
  1266. (unsigned long)slot);
  1267. tasklet_schedule(&slot->cover_tasklet);
  1268. }
  1269. return 0;
  1270. err_remove_slot_name:
  1271. if (slot->pdata->name != NULL)
  1272. device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
  1273. err_remove_host:
  1274. mmc_remove_host(mmc);
  1275. mmc_free_host(mmc);
  1276. return r;
  1277. }
  1278. static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
  1279. {
  1280. struct mmc_host *mmc = slot->mmc;
  1281. if (slot->pdata->name != NULL)
  1282. device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
  1283. if (slot->pdata->get_cover_state != NULL)
  1284. device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
  1285. tasklet_kill(&slot->cover_tasklet);
  1286. del_timer_sync(&slot->cover_timer);
  1287. flush_workqueue(slot->host->mmc_omap_wq);
  1288. mmc_remove_host(mmc);
  1289. mmc_free_host(mmc);
  1290. }
  1291. static int __devinit mmc_omap_probe(struct platform_device *pdev)
  1292. {
  1293. struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
  1294. struct mmc_omap_host *host = NULL;
  1295. struct resource *res;
  1296. dma_cap_mask_t mask;
  1297. unsigned sig;
  1298. int i, ret = 0;
  1299. int irq;
  1300. if (pdata == NULL) {
  1301. dev_err(&pdev->dev, "platform data missing\n");
  1302. return -ENXIO;
  1303. }
  1304. if (pdata->nr_slots == 0) {
  1305. dev_err(&pdev->dev, "no slots\n");
  1306. return -ENXIO;
  1307. }
  1308. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1309. irq = platform_get_irq(pdev, 0);
  1310. if (res == NULL || irq < 0)
  1311. return -ENXIO;
  1312. res = request_mem_region(res->start, resource_size(res),
  1313. pdev->name);
  1314. if (res == NULL)
  1315. return -EBUSY;
  1316. host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
  1317. if (host == NULL) {
  1318. ret = -ENOMEM;
  1319. goto err_free_mem_region;
  1320. }
  1321. INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
  1322. INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
  1323. INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
  1324. setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
  1325. (unsigned long) host);
  1326. spin_lock_init(&host->clk_lock);
  1327. setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
  1328. spin_lock_init(&host->dma_lock);
  1329. #ifdef USE_DMA_PRIVATE
  1330. setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
  1331. #endif
  1332. spin_lock_init(&host->slot_lock);
  1333. init_waitqueue_head(&host->slot_wq);
  1334. host->pdata = pdata;
  1335. host->dev = &pdev->dev;
  1336. platform_set_drvdata(pdev, host);
  1337. host->id = pdev->id;
  1338. host->mem_res = res;
  1339. host->irq = irq;
  1340. host->use_dma = 1;
  1341. #ifdef USE_DMA_PRIVATE
  1342. host->dev->dma_mask = &pdata->dma_mask;
  1343. host->dma_ch = -1;
  1344. #endif
  1345. host->irq = irq;
  1346. host->phys_base = host->mem_res->start;
  1347. host->virt_base = ioremap(res->start, resource_size(res));
  1348. if (!host->virt_base)
  1349. goto err_ioremap;
  1350. host->iclk = clk_get(&pdev->dev, "ick");
  1351. if (IS_ERR(host->iclk)) {
  1352. ret = PTR_ERR(host->iclk);
  1353. goto err_free_mmc_host;
  1354. }
  1355. clk_enable(host->iclk);
  1356. host->fclk = clk_get(&pdev->dev, "fck");
  1357. if (IS_ERR(host->fclk)) {
  1358. ret = PTR_ERR(host->fclk);
  1359. goto err_free_iclk;
  1360. }
  1361. dma_cap_zero(mask);
  1362. dma_cap_set(DMA_SLAVE, mask);
  1363. host->dma_tx_burst = -1;
  1364. host->dma_rx_burst = -1;
  1365. if (cpu_is_omap24xx())
  1366. sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
  1367. else
  1368. sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
  1369. host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  1370. #if 0
  1371. if (!host->dma_tx) {
  1372. dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
  1373. sig);
  1374. goto err_dma;
  1375. }
  1376. #else
  1377. if (!host->dma_tx)
  1378. dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
  1379. sig);
  1380. #endif
  1381. if (cpu_is_omap24xx())
  1382. sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
  1383. else
  1384. sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
  1385. host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
  1386. #if 0
  1387. if (!host->dma_rx) {
  1388. dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
  1389. sig);
  1390. goto err_dma;
  1391. }
  1392. #else
  1393. if (!host->dma_rx)
  1394. dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
  1395. sig);
  1396. #endif
  1397. ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
  1398. if (ret)
  1399. goto err_free_dma;
  1400. if (pdata->init != NULL) {
  1401. ret = pdata->init(&pdev->dev);
  1402. if (ret < 0)
  1403. goto err_free_irq;
  1404. }
  1405. host->nr_slots = pdata->nr_slots;
  1406. host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
  1407. host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
  1408. if (!host->mmc_omap_wq)
  1409. goto err_plat_cleanup;
  1410. for (i = 0; i < pdata->nr_slots; i++) {
  1411. ret = mmc_omap_new_slot(host, i);
  1412. if (ret < 0) {
  1413. while (--i >= 0)
  1414. mmc_omap_remove_slot(host->slots[i]);
  1415. goto err_destroy_wq;
  1416. }
  1417. }
  1418. return 0;
  1419. err_destroy_wq:
  1420. destroy_workqueue(host->mmc_omap_wq);
  1421. err_plat_cleanup:
  1422. if (pdata->cleanup)
  1423. pdata->cleanup(&pdev->dev);
  1424. err_free_irq:
  1425. free_irq(host->irq, host);
  1426. err_free_dma:
  1427. if (host->dma_tx)
  1428. dma_release_channel(host->dma_tx);
  1429. if (host->dma_rx)
  1430. dma_release_channel(host->dma_rx);
  1431. clk_put(host->fclk);
  1432. err_free_iclk:
  1433. clk_disable(host->iclk);
  1434. clk_put(host->iclk);
  1435. err_free_mmc_host:
  1436. iounmap(host->virt_base);
  1437. err_ioremap:
  1438. kfree(host);
  1439. err_free_mem_region:
  1440. release_mem_region(res->start, resource_size(res));
  1441. return ret;
  1442. }
  1443. static int __devexit mmc_omap_remove(struct platform_device *pdev)
  1444. {
  1445. struct mmc_omap_host *host = platform_get_drvdata(pdev);
  1446. int i;
  1447. platform_set_drvdata(pdev, NULL);
  1448. BUG_ON(host == NULL);
  1449. for (i = 0; i < host->nr_slots; i++)
  1450. mmc_omap_remove_slot(host->slots[i]);
  1451. if (host->pdata->cleanup)
  1452. host->pdata->cleanup(&pdev->dev);
  1453. mmc_omap_fclk_enable(host, 0);
  1454. free_irq(host->irq, host);
  1455. clk_put(host->fclk);
  1456. clk_disable(host->iclk);
  1457. clk_put(host->iclk);
  1458. if (host->dma_tx)
  1459. dma_release_channel(host->dma_tx);
  1460. if (host->dma_rx)
  1461. dma_release_channel(host->dma_rx);
  1462. iounmap(host->virt_base);
  1463. release_mem_region(pdev->resource[0].start,
  1464. pdev->resource[0].end - pdev->resource[0].start + 1);
  1465. destroy_workqueue(host->mmc_omap_wq);
  1466. kfree(host);
  1467. return 0;
  1468. }
  1469. #ifdef CONFIG_PM
  1470. static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
  1471. {
  1472. int i, ret = 0;
  1473. struct mmc_omap_host *host = platform_get_drvdata(pdev);
  1474. if (host == NULL || host->suspended)
  1475. return 0;
  1476. for (i = 0; i < host->nr_slots; i++) {
  1477. struct mmc_omap_slot *slot;
  1478. slot = host->slots[i];
  1479. ret = mmc_suspend_host(slot->mmc);
  1480. if (ret < 0) {
  1481. while (--i >= 0) {
  1482. slot = host->slots[i];
  1483. mmc_resume_host(slot->mmc);
  1484. }
  1485. return ret;
  1486. }
  1487. }
  1488. host->suspended = 1;
  1489. return 0;
  1490. }
  1491. static int mmc_omap_resume(struct platform_device *pdev)
  1492. {
  1493. int i, ret = 0;
  1494. struct mmc_omap_host *host = platform_get_drvdata(pdev);
  1495. if (host == NULL || !host->suspended)
  1496. return 0;
  1497. for (i = 0; i < host->nr_slots; i++) {
  1498. struct mmc_omap_slot *slot;
  1499. slot = host->slots[i];
  1500. ret = mmc_resume_host(slot->mmc);
  1501. if (ret < 0)
  1502. return ret;
  1503. host->suspended = 0;
  1504. }
  1505. return 0;
  1506. }
  1507. #else
  1508. #define mmc_omap_suspend NULL
  1509. #define mmc_omap_resume NULL
  1510. #endif
  1511. static struct platform_driver mmc_omap_driver = {
  1512. .probe = mmc_omap_probe,
  1513. .remove = __devexit_p(mmc_omap_remove),
  1514. .suspend = mmc_omap_suspend,
  1515. .resume = mmc_omap_resume,
  1516. .driver = {
  1517. .name = DRIVER_NAME,
  1518. .owner = THIS_MODULE,
  1519. },
  1520. };
  1521. module_platform_driver(mmc_omap_driver);
  1522. MODULE_DESCRIPTION("OMAP Multimedia Card driver");
  1523. MODULE_LICENSE("GPL");
  1524. MODULE_ALIAS("platform:" DRIVER_NAME);
  1525. MODULE_AUTHOR("Juha Yrjölä");