sdhci.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. /*
  2. * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
  3. *
  4. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * Thanks to the following companies for their support:
  12. *
  13. * - JMicron (hardware and technical support)
  14. */
  15. #include <linux/delay.h>
  16. #include <linux/highmem.h>
  17. #include <linux/pci.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/scatterlist.h>
  20. #include <linux/mmc/host.h>
  21. #include "sdhci.h"
  22. #define DRIVER_NAME "sdhci"
  23. #define DBG(f, x...) \
  24. pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
  25. static unsigned int debug_quirks = 0;
  26. /* For multi controllers in one platform case */
  27. static u16 chip_index = 0;
  28. static spinlock_t index_lock;
  29. /*
  30. * Different quirks to handle when the hardware deviates from a strict
  31. * interpretation of the SDHCI specification.
  32. */
  33. /* Controller doesn't honor resets unless we touch the clock register */
  34. #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
  35. /* Controller has bad caps bits, but really supports DMA */
  36. #define SDHCI_QUIRK_FORCE_DMA (1<<1)
  37. /* Controller doesn't like some resets when there is no card inserted. */
  38. #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
  39. /* Controller doesn't like clearing the power reg before a change */
  40. #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
  41. /* Controller has flaky internal state so reset it on each ios change */
  42. #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
  43. /* Controller has an unusable DMA engine */
  44. #define SDHCI_QUIRK_BROKEN_DMA (1<<5)
  45. /* Controller can only DMA from 32-bit aligned addresses */
  46. #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
  47. /* Controller can only DMA chunk sizes that are a multiple of 32 bits */
  48. #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
  49. /* Controller needs to be reset after each request to stay stable */
  50. #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
  51. static const struct pci_device_id pci_ids[] __devinitdata = {
  52. {
  53. .vendor = PCI_VENDOR_ID_RICOH,
  54. .device = PCI_DEVICE_ID_RICOH_R5C822,
  55. .subvendor = PCI_VENDOR_ID_IBM,
  56. .subdevice = PCI_ANY_ID,
  57. .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
  58. SDHCI_QUIRK_FORCE_DMA,
  59. },
  60. {
  61. .vendor = PCI_VENDOR_ID_RICOH,
  62. .device = PCI_DEVICE_ID_RICOH_R5C822,
  63. .subvendor = PCI_ANY_ID,
  64. .subdevice = PCI_ANY_ID,
  65. .driver_data = SDHCI_QUIRK_FORCE_DMA |
  66. SDHCI_QUIRK_NO_CARD_NO_RESET,
  67. },
  68. {
  69. .vendor = PCI_VENDOR_ID_TI,
  70. .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
  71. .subvendor = PCI_ANY_ID,
  72. .subdevice = PCI_ANY_ID,
  73. .driver_data = SDHCI_QUIRK_FORCE_DMA,
  74. },
  75. {
  76. .vendor = PCI_VENDOR_ID_ENE,
  77. .device = PCI_DEVICE_ID_ENE_CB712_SD,
  78. .subvendor = PCI_ANY_ID,
  79. .subdevice = PCI_ANY_ID,
  80. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  81. SDHCI_QUIRK_BROKEN_DMA,
  82. },
  83. {
  84. .vendor = PCI_VENDOR_ID_ENE,
  85. .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
  86. .subvendor = PCI_ANY_ID,
  87. .subdevice = PCI_ANY_ID,
  88. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  89. SDHCI_QUIRK_BROKEN_DMA,
  90. },
  91. {
  92. .vendor = PCI_VENDOR_ID_ENE,
  93. .device = PCI_DEVICE_ID_ENE_CB714_SD,
  94. .subvendor = PCI_ANY_ID,
  95. .subdevice = PCI_ANY_ID,
  96. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  97. SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
  98. },
  99. {
  100. .vendor = PCI_VENDOR_ID_ENE,
  101. .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
  102. .subvendor = PCI_ANY_ID,
  103. .subdevice = PCI_ANY_ID,
  104. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  105. SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
  106. },
  107. {
  108. .vendor = PCI_VENDOR_ID_JMICRON,
  109. .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
  110. .subvendor = PCI_ANY_ID,
  111. .subdevice = PCI_ANY_ID,
  112. .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
  113. SDHCI_QUIRK_32BIT_DMA_SIZE |
  114. SDHCI_QUIRK_RESET_AFTER_REQUEST,
  115. },
  116. { /* Generic SD host controller */
  117. PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
  118. },
  119. { /* end: all zeroes */ },
  120. };
  121. MODULE_DEVICE_TABLE(pci, pci_ids);
  122. static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
  123. static void sdhci_finish_data(struct sdhci_host *);
  124. static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
  125. static void sdhci_finish_command(struct sdhci_host *);
  126. static void sdhci_dumpregs(struct sdhci_host *host)
  127. {
  128. printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
  129. printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
  130. readl(host->ioaddr + SDHCI_DMA_ADDRESS),
  131. readw(host->ioaddr + SDHCI_HOST_VERSION));
  132. printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
  133. readw(host->ioaddr + SDHCI_BLOCK_SIZE),
  134. readw(host->ioaddr + SDHCI_BLOCK_COUNT));
  135. printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
  136. readl(host->ioaddr + SDHCI_ARGUMENT),
  137. readw(host->ioaddr + SDHCI_TRANSFER_MODE));
  138. printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
  139. readl(host->ioaddr + SDHCI_PRESENT_STATE),
  140. readb(host->ioaddr + SDHCI_HOST_CONTROL));
  141. printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
  142. readb(host->ioaddr + SDHCI_POWER_CONTROL),
  143. readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
  144. printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
  145. readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL),
  146. readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
  147. printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
  148. readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
  149. readl(host->ioaddr + SDHCI_INT_STATUS));
  150. printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
  151. readl(host->ioaddr + SDHCI_INT_ENABLE),
  152. readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
  153. printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
  154. readw(host->ioaddr + SDHCI_ACMD12_ERR),
  155. readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
  156. printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
  157. readl(host->ioaddr + SDHCI_CAPABILITIES),
  158. readl(host->ioaddr + SDHCI_MAX_CURRENT));
  159. printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
  160. }
  161. /*****************************************************************************\
  162. * *
  163. * Low level functions *
  164. * *
  165. \*****************************************************************************/
  166. static void sdhci_reset(struct sdhci_host *host, u8 mask)
  167. {
  168. unsigned long timeout;
  169. if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  170. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
  171. SDHCI_CARD_PRESENT))
  172. return;
  173. }
  174. writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
  175. if (mask & SDHCI_RESET_ALL)
  176. host->clock = 0;
  177. /* Wait max 100 ms */
  178. timeout = 100;
  179. /* hw clears the bit when it's done */
  180. while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
  181. if (timeout == 0) {
  182. printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
  183. mmc_hostname(host->mmc), (int)mask);
  184. sdhci_dumpregs(host);
  185. return;
  186. }
  187. timeout--;
  188. mdelay(1);
  189. }
  190. }
  191. static void sdhci_init(struct sdhci_host *host)
  192. {
  193. u32 intmask;
  194. sdhci_reset(host, SDHCI_RESET_ALL);
  195. intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  196. SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
  197. SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
  198. SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
  199. SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
  200. SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
  201. writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
  202. writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
  203. }
  204. static void sdhci_activate_led(struct sdhci_host *host)
  205. {
  206. u8 ctrl;
  207. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  208. ctrl |= SDHCI_CTRL_LED;
  209. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  210. }
  211. static void sdhci_deactivate_led(struct sdhci_host *host)
  212. {
  213. u8 ctrl;
  214. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  215. ctrl &= ~SDHCI_CTRL_LED;
  216. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  217. }
  218. /*****************************************************************************\
  219. * *
  220. * Core functions *
  221. * *
  222. \*****************************************************************************/
  223. static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
  224. {
  225. return sg_virt(host->cur_sg);
  226. }
  227. static inline int sdhci_next_sg(struct sdhci_host* host)
  228. {
  229. /*
  230. * Skip to next SG entry.
  231. */
  232. host->cur_sg++;
  233. host->num_sg--;
  234. /*
  235. * Any entries left?
  236. */
  237. if (host->num_sg > 0) {
  238. host->offset = 0;
  239. host->remain = host->cur_sg->length;
  240. }
  241. return host->num_sg;
  242. }
  243. static void sdhci_read_block_pio(struct sdhci_host *host)
  244. {
  245. int blksize, chunk_remain;
  246. u32 data;
  247. char *buffer;
  248. int size;
  249. DBG("PIO reading\n");
  250. blksize = host->data->blksz;
  251. chunk_remain = 0;
  252. data = 0;
  253. buffer = sdhci_sg_to_buffer(host) + host->offset;
  254. while (blksize) {
  255. if (chunk_remain == 0) {
  256. data = readl(host->ioaddr + SDHCI_BUFFER);
  257. chunk_remain = min(blksize, 4);
  258. }
  259. size = min(host->remain, chunk_remain);
  260. chunk_remain -= size;
  261. blksize -= size;
  262. host->offset += size;
  263. host->remain -= size;
  264. while (size) {
  265. *buffer = data & 0xFF;
  266. buffer++;
  267. data >>= 8;
  268. size--;
  269. }
  270. if (host->remain == 0) {
  271. if (sdhci_next_sg(host) == 0) {
  272. BUG_ON(blksize != 0);
  273. return;
  274. }
  275. buffer = sdhci_sg_to_buffer(host);
  276. }
  277. }
  278. }
  279. static void sdhci_write_block_pio(struct sdhci_host *host)
  280. {
  281. int blksize, chunk_remain;
  282. u32 data;
  283. char *buffer;
  284. int bytes, size;
  285. DBG("PIO writing\n");
  286. blksize = host->data->blksz;
  287. chunk_remain = 4;
  288. data = 0;
  289. bytes = 0;
  290. buffer = sdhci_sg_to_buffer(host) + host->offset;
  291. while (blksize) {
  292. size = min(host->remain, chunk_remain);
  293. chunk_remain -= size;
  294. blksize -= size;
  295. host->offset += size;
  296. host->remain -= size;
  297. while (size) {
  298. data >>= 8;
  299. data |= (u32)*buffer << 24;
  300. buffer++;
  301. size--;
  302. }
  303. if (chunk_remain == 0) {
  304. writel(data, host->ioaddr + SDHCI_BUFFER);
  305. chunk_remain = min(blksize, 4);
  306. }
  307. if (host->remain == 0) {
  308. if (sdhci_next_sg(host) == 0) {
  309. BUG_ON(blksize != 0);
  310. return;
  311. }
  312. buffer = sdhci_sg_to_buffer(host);
  313. }
  314. }
  315. }
  316. static void sdhci_transfer_pio(struct sdhci_host *host)
  317. {
  318. u32 mask;
  319. BUG_ON(!host->data);
  320. if (host->num_sg == 0)
  321. return;
  322. if (host->data->flags & MMC_DATA_READ)
  323. mask = SDHCI_DATA_AVAILABLE;
  324. else
  325. mask = SDHCI_SPACE_AVAILABLE;
  326. while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
  327. if (host->data->flags & MMC_DATA_READ)
  328. sdhci_read_block_pio(host);
  329. else
  330. sdhci_write_block_pio(host);
  331. if (host->num_sg == 0)
  332. break;
  333. }
  334. DBG("PIO transfer complete.\n");
  335. }
  336. static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
  337. {
  338. u8 count;
  339. unsigned target_timeout, current_timeout;
  340. WARN_ON(host->data);
  341. if (data == NULL)
  342. return;
  343. /* Sanity checks */
  344. BUG_ON(data->blksz * data->blocks > 524288);
  345. BUG_ON(data->blksz > host->mmc->max_blk_size);
  346. BUG_ON(data->blocks > 65535);
  347. host->data = data;
  348. host->data_early = 0;
  349. /* timeout in us */
  350. target_timeout = data->timeout_ns / 1000 +
  351. data->timeout_clks / host->clock;
  352. /*
  353. * Figure out needed cycles.
  354. * We do this in steps in order to fit inside a 32 bit int.
  355. * The first step is the minimum timeout, which will have a
  356. * minimum resolution of 6 bits:
  357. * (1) 2^13*1000 > 2^22,
  358. * (2) host->timeout_clk < 2^16
  359. * =>
  360. * (1) / (2) > 2^6
  361. */
  362. count = 0;
  363. current_timeout = (1 << 13) * 1000 / host->timeout_clk;
  364. while (current_timeout < target_timeout) {
  365. count++;
  366. current_timeout <<= 1;
  367. if (count >= 0xF)
  368. break;
  369. }
  370. if (count >= 0xF) {
  371. printk(KERN_WARNING "%s: Too large timeout requested!\n",
  372. mmc_hostname(host->mmc));
  373. count = 0xE;
  374. }
  375. writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
  376. if (host->flags & SDHCI_USE_DMA)
  377. host->flags |= SDHCI_REQ_USE_DMA;
  378. if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
  379. (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) &&
  380. ((data->blksz * data->blocks) & 0x3))) {
  381. DBG("Reverting to PIO because of transfer size (%d)\n",
  382. data->blksz * data->blocks);
  383. host->flags &= ~SDHCI_REQ_USE_DMA;
  384. }
  385. /*
  386. * The assumption here being that alignment is the same after
  387. * translation to device address space.
  388. */
  389. if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
  390. (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
  391. (data->sg->offset & 0x3))) {
  392. DBG("Reverting to PIO because of bad alignment\n");
  393. host->flags &= ~SDHCI_REQ_USE_DMA;
  394. }
  395. if (host->flags & SDHCI_REQ_USE_DMA) {
  396. int count;
  397. count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
  398. (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
  399. BUG_ON(count != 1);
  400. writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
  401. } else {
  402. host->cur_sg = data->sg;
  403. host->num_sg = data->sg_len;
  404. host->offset = 0;
  405. host->remain = host->cur_sg->length;
  406. }
  407. /* We do not handle DMA boundaries, so set it to max (512 KiB) */
  408. writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
  409. host->ioaddr + SDHCI_BLOCK_SIZE);
  410. writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
  411. }
  412. static void sdhci_set_transfer_mode(struct sdhci_host *host,
  413. struct mmc_data *data)
  414. {
  415. u16 mode;
  416. if (data == NULL)
  417. return;
  418. WARN_ON(!host->data);
  419. mode = SDHCI_TRNS_BLK_CNT_EN;
  420. if (data->blocks > 1)
  421. mode |= SDHCI_TRNS_MULTI;
  422. if (data->flags & MMC_DATA_READ)
  423. mode |= SDHCI_TRNS_READ;
  424. if (host->flags & SDHCI_REQ_USE_DMA)
  425. mode |= SDHCI_TRNS_DMA;
  426. writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
  427. }
  428. static void sdhci_finish_data(struct sdhci_host *host)
  429. {
  430. struct mmc_data *data;
  431. u16 blocks;
  432. BUG_ON(!host->data);
  433. data = host->data;
  434. host->data = NULL;
  435. if (host->flags & SDHCI_REQ_USE_DMA) {
  436. pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
  437. (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
  438. }
  439. /*
  440. * Controller doesn't count down when in single block mode.
  441. */
  442. if (data->blocks == 1)
  443. blocks = (data->error == 0) ? 0 : 1;
  444. else
  445. blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
  446. data->bytes_xfered = data->blksz * (data->blocks - blocks);
  447. if (!data->error && blocks) {
  448. printk(KERN_ERR "%s: Controller signalled completion even "
  449. "though there were blocks left.\n",
  450. mmc_hostname(host->mmc));
  451. data->error = -EIO;
  452. }
  453. if (data->stop) {
  454. /*
  455. * The controller needs a reset of internal state machines
  456. * upon error conditions.
  457. */
  458. if (data->error) {
  459. sdhci_reset(host, SDHCI_RESET_CMD);
  460. sdhci_reset(host, SDHCI_RESET_DATA);
  461. }
  462. sdhci_send_command(host, data->stop);
  463. } else
  464. tasklet_schedule(&host->finish_tasklet);
  465. }
  466. static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
  467. {
  468. int flags;
  469. u32 mask;
  470. unsigned long timeout;
  471. WARN_ON(host->cmd);
  472. /* Wait max 10 ms */
  473. timeout = 10;
  474. mask = SDHCI_CMD_INHIBIT;
  475. if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
  476. mask |= SDHCI_DATA_INHIBIT;
  477. /* We shouldn't wait for data inihibit for stop commands, even
  478. though they might use busy signaling */
  479. if (host->mrq->data && (cmd == host->mrq->data->stop))
  480. mask &= ~SDHCI_DATA_INHIBIT;
  481. while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
  482. if (timeout == 0) {
  483. printk(KERN_ERR "%s: Controller never released "
  484. "inhibit bit(s).\n", mmc_hostname(host->mmc));
  485. sdhci_dumpregs(host);
  486. cmd->error = -EIO;
  487. tasklet_schedule(&host->finish_tasklet);
  488. return;
  489. }
  490. timeout--;
  491. mdelay(1);
  492. }
  493. mod_timer(&host->timer, jiffies + 10 * HZ);
  494. host->cmd = cmd;
  495. sdhci_prepare_data(host, cmd->data);
  496. writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
  497. sdhci_set_transfer_mode(host, cmd->data);
  498. if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
  499. printk(KERN_ERR "%s: Unsupported response type!\n",
  500. mmc_hostname(host->mmc));
  501. cmd->error = -EINVAL;
  502. tasklet_schedule(&host->finish_tasklet);
  503. return;
  504. }
  505. if (!(cmd->flags & MMC_RSP_PRESENT))
  506. flags = SDHCI_CMD_RESP_NONE;
  507. else if (cmd->flags & MMC_RSP_136)
  508. flags = SDHCI_CMD_RESP_LONG;
  509. else if (cmd->flags & MMC_RSP_BUSY)
  510. flags = SDHCI_CMD_RESP_SHORT_BUSY;
  511. else
  512. flags = SDHCI_CMD_RESP_SHORT;
  513. if (cmd->flags & MMC_RSP_CRC)
  514. flags |= SDHCI_CMD_CRC;
  515. if (cmd->flags & MMC_RSP_OPCODE)
  516. flags |= SDHCI_CMD_INDEX;
  517. if (cmd->data)
  518. flags |= SDHCI_CMD_DATA;
  519. writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
  520. host->ioaddr + SDHCI_COMMAND);
  521. }
  522. static void sdhci_finish_command(struct sdhci_host *host)
  523. {
  524. int i;
  525. BUG_ON(host->cmd == NULL);
  526. if (host->cmd->flags & MMC_RSP_PRESENT) {
  527. if (host->cmd->flags & MMC_RSP_136) {
  528. /* CRC is stripped so we need to do some shifting. */
  529. for (i = 0;i < 4;i++) {
  530. host->cmd->resp[i] = readl(host->ioaddr +
  531. SDHCI_RESPONSE + (3-i)*4) << 8;
  532. if (i != 3)
  533. host->cmd->resp[i] |=
  534. readb(host->ioaddr +
  535. SDHCI_RESPONSE + (3-i)*4-1);
  536. }
  537. } else {
  538. host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
  539. }
  540. }
  541. host->cmd->error = 0;
  542. if (host->data && host->data_early)
  543. sdhci_finish_data(host);
  544. if (!host->cmd->data)
  545. tasklet_schedule(&host->finish_tasklet);
  546. host->cmd = NULL;
  547. }
  548. static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  549. {
  550. int div;
  551. u16 clk;
  552. unsigned long timeout;
  553. if (clock == host->clock)
  554. return;
  555. writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
  556. if (clock == 0)
  557. goto out;
  558. for (div = 1;div < 256;div *= 2) {
  559. if ((host->max_clk / div) <= clock)
  560. break;
  561. }
  562. div >>= 1;
  563. clk = div << SDHCI_DIVIDER_SHIFT;
  564. clk |= SDHCI_CLOCK_INT_EN;
  565. writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
  566. /* Wait max 10 ms */
  567. timeout = 10;
  568. while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
  569. & SDHCI_CLOCK_INT_STABLE)) {
  570. if (timeout == 0) {
  571. printk(KERN_ERR "%s: Internal clock never "
  572. "stabilised.\n", mmc_hostname(host->mmc));
  573. sdhci_dumpregs(host);
  574. return;
  575. }
  576. timeout--;
  577. mdelay(1);
  578. }
  579. clk |= SDHCI_CLOCK_CARD_EN;
  580. writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
  581. out:
  582. host->clock = clock;
  583. }
  584. static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
  585. {
  586. u8 pwr;
  587. if (host->power == power)
  588. return;
  589. if (power == (unsigned short)-1) {
  590. writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
  591. goto out;
  592. }
  593. /*
  594. * Spec says that we should clear the power reg before setting
  595. * a new value. Some controllers don't seem to like this though.
  596. */
  597. if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  598. writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
  599. pwr = SDHCI_POWER_ON;
  600. switch (1 << power) {
  601. case MMC_VDD_165_195:
  602. pwr |= SDHCI_POWER_180;
  603. break;
  604. case MMC_VDD_29_30:
  605. case MMC_VDD_30_31:
  606. pwr |= SDHCI_POWER_300;
  607. break;
  608. case MMC_VDD_32_33:
  609. case MMC_VDD_33_34:
  610. pwr |= SDHCI_POWER_330;
  611. break;
  612. default:
  613. BUG();
  614. }
  615. writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
  616. out:
  617. host->power = power;
  618. }
  619. /*****************************************************************************\
  620. * *
  621. * MMC callbacks *
  622. * *
  623. \*****************************************************************************/
  624. static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  625. {
  626. struct sdhci_host *host;
  627. unsigned long flags;
  628. host = mmc_priv(mmc);
  629. spin_lock_irqsave(&host->lock, flags);
  630. WARN_ON(host->mrq != NULL);
  631. sdhci_activate_led(host);
  632. host->mrq = mrq;
  633. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
  634. host->mrq->cmd->error = -ENOMEDIUM;
  635. tasklet_schedule(&host->finish_tasklet);
  636. } else
  637. sdhci_send_command(host, mrq->cmd);
  638. mmiowb();
  639. spin_unlock_irqrestore(&host->lock, flags);
  640. }
  641. static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  642. {
  643. struct sdhci_host *host;
  644. unsigned long flags;
  645. u8 ctrl;
  646. host = mmc_priv(mmc);
  647. spin_lock_irqsave(&host->lock, flags);
  648. /*
  649. * Reset the chip on each power off.
  650. * Should clear out any weird states.
  651. */
  652. if (ios->power_mode == MMC_POWER_OFF) {
  653. writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
  654. sdhci_init(host);
  655. }
  656. sdhci_set_clock(host, ios->clock);
  657. if (ios->power_mode == MMC_POWER_OFF)
  658. sdhci_set_power(host, -1);
  659. else
  660. sdhci_set_power(host, ios->vdd);
  661. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  662. if (ios->bus_width == MMC_BUS_WIDTH_4)
  663. ctrl |= SDHCI_CTRL_4BITBUS;
  664. else
  665. ctrl &= ~SDHCI_CTRL_4BITBUS;
  666. if (ios->timing == MMC_TIMING_SD_HS)
  667. ctrl |= SDHCI_CTRL_HISPD;
  668. else
  669. ctrl &= ~SDHCI_CTRL_HISPD;
  670. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  671. /*
  672. * Some (ENE) controllers go apeshit on some ios operation,
  673. * signalling timeout and CRC errors even on CMD0. Resetting
  674. * it on each ios seems to solve the problem.
  675. */
  676. if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
  677. sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  678. mmiowb();
  679. spin_unlock_irqrestore(&host->lock, flags);
  680. }
  681. static int sdhci_get_ro(struct mmc_host *mmc)
  682. {
  683. struct sdhci_host *host;
  684. unsigned long flags;
  685. int present;
  686. host = mmc_priv(mmc);
  687. spin_lock_irqsave(&host->lock, flags);
  688. present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
  689. spin_unlock_irqrestore(&host->lock, flags);
  690. return !(present & SDHCI_WRITE_PROTECT);
  691. }
  692. static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  693. {
  694. struct sdhci_host *host;
  695. unsigned long flags;
  696. u32 ier;
  697. host = mmc_priv(mmc);
  698. spin_lock_irqsave(&host->lock, flags);
  699. ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
  700. ier &= ~SDHCI_INT_CARD_INT;
  701. if (enable)
  702. ier |= SDHCI_INT_CARD_INT;
  703. writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
  704. writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
  705. mmiowb();
  706. spin_unlock_irqrestore(&host->lock, flags);
  707. }
  708. static const struct mmc_host_ops sdhci_ops = {
  709. .request = sdhci_request,
  710. .set_ios = sdhci_set_ios,
  711. .get_ro = sdhci_get_ro,
  712. .enable_sdio_irq = sdhci_enable_sdio_irq,
  713. };
  714. /*****************************************************************************\
  715. * *
  716. * Tasklets *
  717. * *
  718. \*****************************************************************************/
  719. static void sdhci_tasklet_card(unsigned long param)
  720. {
  721. struct sdhci_host *host;
  722. unsigned long flags;
  723. host = (struct sdhci_host*)param;
  724. spin_lock_irqsave(&host->lock, flags);
  725. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
  726. if (host->mrq) {
  727. printk(KERN_ERR "%s: Card removed during transfer!\n",
  728. mmc_hostname(host->mmc));
  729. printk(KERN_ERR "%s: Resetting controller.\n",
  730. mmc_hostname(host->mmc));
  731. sdhci_reset(host, SDHCI_RESET_CMD);
  732. sdhci_reset(host, SDHCI_RESET_DATA);
  733. host->mrq->cmd->error = -ENOMEDIUM;
  734. tasklet_schedule(&host->finish_tasklet);
  735. }
  736. }
  737. spin_unlock_irqrestore(&host->lock, flags);
  738. mmc_detect_change(host->mmc, msecs_to_jiffies(500));
  739. }
  740. static void sdhci_tasklet_finish(unsigned long param)
  741. {
  742. struct sdhci_host *host;
  743. unsigned long flags;
  744. struct mmc_request *mrq;
  745. host = (struct sdhci_host*)param;
  746. spin_lock_irqsave(&host->lock, flags);
  747. del_timer(&host->timer);
  748. mrq = host->mrq;
  749. /*
  750. * The controller needs a reset of internal state machines
  751. * upon error conditions.
  752. */
  753. if (mrq->cmd->error ||
  754. (mrq->data && (mrq->data->error ||
  755. (mrq->data->stop && mrq->data->stop->error))) ||
  756. (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
  757. /* Some controllers need this kick or reset won't work here */
  758. if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
  759. unsigned int clock;
  760. /* This is to force an update */
  761. clock = host->clock;
  762. host->clock = 0;
  763. sdhci_set_clock(host, clock);
  764. }
  765. /* Spec says we should do both at the same time, but Ricoh
  766. controllers do not like that. */
  767. sdhci_reset(host, SDHCI_RESET_CMD);
  768. sdhci_reset(host, SDHCI_RESET_DATA);
  769. }
  770. host->mrq = NULL;
  771. host->cmd = NULL;
  772. host->data = NULL;
  773. sdhci_deactivate_led(host);
  774. mmiowb();
  775. spin_unlock_irqrestore(&host->lock, flags);
  776. mmc_request_done(host->mmc, mrq);
  777. }
  778. static void sdhci_timeout_timer(unsigned long data)
  779. {
  780. struct sdhci_host *host;
  781. unsigned long flags;
  782. host = (struct sdhci_host*)data;
  783. spin_lock_irqsave(&host->lock, flags);
  784. if (host->mrq) {
  785. printk(KERN_ERR "%s: Timeout waiting for hardware "
  786. "interrupt.\n", mmc_hostname(host->mmc));
  787. sdhci_dumpregs(host);
  788. if (host->data) {
  789. host->data->error = -ETIMEDOUT;
  790. sdhci_finish_data(host);
  791. } else {
  792. if (host->cmd)
  793. host->cmd->error = -ETIMEDOUT;
  794. else
  795. host->mrq->cmd->error = -ETIMEDOUT;
  796. tasklet_schedule(&host->finish_tasklet);
  797. }
  798. }
  799. mmiowb();
  800. spin_unlock_irqrestore(&host->lock, flags);
  801. }
  802. /*****************************************************************************\
  803. * *
  804. * Interrupt handling *
  805. * *
  806. \*****************************************************************************/
  807. static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
  808. {
  809. BUG_ON(intmask == 0);
  810. if (!host->cmd) {
  811. printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
  812. "though no command operation was in progress.\n",
  813. mmc_hostname(host->mmc), (unsigned)intmask);
  814. sdhci_dumpregs(host);
  815. return;
  816. }
  817. if (intmask & SDHCI_INT_TIMEOUT)
  818. host->cmd->error = -ETIMEDOUT;
  819. else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
  820. SDHCI_INT_INDEX))
  821. host->cmd->error = -EILSEQ;
  822. if (host->cmd->error)
  823. tasklet_schedule(&host->finish_tasklet);
  824. else if (intmask & SDHCI_INT_RESPONSE)
  825. sdhci_finish_command(host);
  826. }
  827. static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
  828. {
  829. BUG_ON(intmask == 0);
  830. if (!host->data) {
  831. /*
  832. * A data end interrupt is sent together with the response
  833. * for the stop command.
  834. */
  835. if (intmask & SDHCI_INT_DATA_END)
  836. return;
  837. printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
  838. "though no data operation was in progress.\n",
  839. mmc_hostname(host->mmc), (unsigned)intmask);
  840. sdhci_dumpregs(host);
  841. return;
  842. }
  843. if (intmask & SDHCI_INT_DATA_TIMEOUT)
  844. host->data->error = -ETIMEDOUT;
  845. else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
  846. host->data->error = -EILSEQ;
  847. if (host->data->error)
  848. sdhci_finish_data(host);
  849. else {
  850. if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
  851. sdhci_transfer_pio(host);
  852. /*
  853. * We currently don't do anything fancy with DMA
  854. * boundaries, but as we can't disable the feature
  855. * we need to at least restart the transfer.
  856. */
  857. if (intmask & SDHCI_INT_DMA_END)
  858. writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
  859. host->ioaddr + SDHCI_DMA_ADDRESS);
  860. if (intmask & SDHCI_INT_DATA_END) {
  861. if (host->cmd) {
  862. /*
  863. * Data managed to finish before the
  864. * command completed. Make sure we do
  865. * things in the proper order.
  866. */
  867. host->data_early = 1;
  868. } else {
  869. sdhci_finish_data(host);
  870. }
  871. }
  872. }
  873. }
  874. static irqreturn_t sdhci_irq(int irq, void *dev_id)
  875. {
  876. irqreturn_t result;
  877. struct sdhci_host* host = dev_id;
  878. u32 intmask;
  879. int cardint = 0;
  880. spin_lock(&host->lock);
  881. intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
  882. if (!intmask || intmask == 0xffffffff) {
  883. result = IRQ_NONE;
  884. goto out;
  885. }
  886. DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
  887. if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  888. writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
  889. host->ioaddr + SDHCI_INT_STATUS);
  890. tasklet_schedule(&host->card_tasklet);
  891. }
  892. intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  893. if (intmask & SDHCI_INT_CMD_MASK) {
  894. writel(intmask & SDHCI_INT_CMD_MASK,
  895. host->ioaddr + SDHCI_INT_STATUS);
  896. sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  897. }
  898. if (intmask & SDHCI_INT_DATA_MASK) {
  899. writel(intmask & SDHCI_INT_DATA_MASK,
  900. host->ioaddr + SDHCI_INT_STATUS);
  901. sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  902. }
  903. intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
  904. intmask &= ~SDHCI_INT_ERROR;
  905. if (intmask & SDHCI_INT_BUS_POWER) {
  906. printk(KERN_ERR "%s: Card is consuming too much power!\n",
  907. mmc_hostname(host->mmc));
  908. writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
  909. }
  910. intmask &= ~SDHCI_INT_BUS_POWER;
  911. if (intmask & SDHCI_INT_CARD_INT)
  912. cardint = 1;
  913. intmask &= ~SDHCI_INT_CARD_INT;
  914. if (intmask) {
  915. printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
  916. mmc_hostname(host->mmc), intmask);
  917. sdhci_dumpregs(host);
  918. writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
  919. }
  920. result = IRQ_HANDLED;
  921. mmiowb();
  922. out:
  923. spin_unlock(&host->lock);
  924. /*
  925. * We have to delay this as it calls back into the driver.
  926. */
  927. if (cardint)
  928. mmc_signal_sdio_irq(host->mmc);
  929. return result;
  930. }
  931. /*****************************************************************************\
  932. * *
  933. * Suspend/resume *
  934. * *
  935. \*****************************************************************************/
  936. #ifdef CONFIG_PM
  937. static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
  938. {
  939. struct sdhci_chip *chip;
  940. int i, ret;
  941. chip = pci_get_drvdata(pdev);
  942. if (!chip)
  943. return 0;
  944. DBG("Suspending...\n");
  945. for (i = 0;i < chip->num_slots;i++) {
  946. if (!chip->hosts[i])
  947. continue;
  948. ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
  949. if (ret) {
  950. for (i--;i >= 0;i--)
  951. mmc_resume_host(chip->hosts[i]->mmc);
  952. return ret;
  953. }
  954. }
  955. pci_save_state(pdev);
  956. pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
  957. for (i = 0;i < chip->num_slots;i++) {
  958. if (!chip->hosts[i])
  959. continue;
  960. free_irq(chip->hosts[i]->irq, chip->hosts[i]);
  961. }
  962. pci_disable_device(pdev);
  963. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  964. return 0;
  965. }
  966. static int sdhci_resume (struct pci_dev *pdev)
  967. {
  968. struct sdhci_chip *chip;
  969. int i, ret;
  970. chip = pci_get_drvdata(pdev);
  971. if (!chip)
  972. return 0;
  973. DBG("Resuming...\n");
  974. pci_set_power_state(pdev, PCI_D0);
  975. pci_restore_state(pdev);
  976. ret = pci_enable_device(pdev);
  977. if (ret)
  978. return ret;
  979. for (i = 0;i < chip->num_slots;i++) {
  980. if (!chip->hosts[i])
  981. continue;
  982. if (chip->hosts[i]->flags & SDHCI_USE_DMA)
  983. pci_set_master(pdev);
  984. ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
  985. IRQF_SHARED, chip->hosts[i]->slot_descr,
  986. chip->hosts[i]);
  987. if (ret)
  988. return ret;
  989. sdhci_init(chip->hosts[i]);
  990. mmiowb();
  991. ret = mmc_resume_host(chip->hosts[i]->mmc);
  992. if (ret)
  993. return ret;
  994. }
  995. return 0;
  996. }
  997. #else /* CONFIG_PM */
  998. #define sdhci_suspend NULL
  999. #define sdhci_resume NULL
  1000. #endif /* CONFIG_PM */
  1001. /*****************************************************************************\
  1002. * *
  1003. * Device probing/removal *
  1004. * *
  1005. \*****************************************************************************/
  1006. static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
  1007. {
  1008. int ret;
  1009. unsigned int version;
  1010. struct sdhci_chip *chip;
  1011. struct mmc_host *mmc;
  1012. struct sdhci_host *host;
  1013. u8 first_bar;
  1014. unsigned int caps;
  1015. chip = pci_get_drvdata(pdev);
  1016. BUG_ON(!chip);
  1017. ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
  1018. if (ret)
  1019. return ret;
  1020. first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
  1021. if (first_bar > 5) {
  1022. printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
  1023. return -ENODEV;
  1024. }
  1025. if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
  1026. printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
  1027. return -ENODEV;
  1028. }
  1029. if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
  1030. printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
  1031. "You may experience problems.\n");
  1032. }
  1033. if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
  1034. printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
  1035. return -ENODEV;
  1036. }
  1037. if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
  1038. printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
  1039. return -ENODEV;
  1040. }
  1041. mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
  1042. if (!mmc)
  1043. return -ENOMEM;
  1044. host = mmc_priv(mmc);
  1045. host->mmc = mmc;
  1046. host->chip = chip;
  1047. chip->hosts[slot] = host;
  1048. host->bar = first_bar + slot;
  1049. host->addr = pci_resource_start(pdev, host->bar);
  1050. host->irq = pdev->irq;
  1051. DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
  1052. snprintf(host->slot_descr, 20, "sdhc%d:slot%d", chip->index, slot);
  1053. ret = pci_request_region(pdev, host->bar, host->slot_descr);
  1054. if (ret)
  1055. goto free;
  1056. host->ioaddr = ioremap_nocache(host->addr,
  1057. pci_resource_len(pdev, host->bar));
  1058. if (!host->ioaddr) {
  1059. ret = -ENOMEM;
  1060. goto release;
  1061. }
  1062. sdhci_reset(host, SDHCI_RESET_ALL);
  1063. version = readw(host->ioaddr + SDHCI_HOST_VERSION);
  1064. version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
  1065. if (version > 1) {
  1066. printk(KERN_ERR "%s: Unknown controller version (%d). "
  1067. "You may experience problems.\n", host->slot_descr,
  1068. version);
  1069. }
  1070. caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
  1071. if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
  1072. host->flags |= SDHCI_USE_DMA;
  1073. else if (!(caps & SDHCI_CAN_DO_DMA))
  1074. DBG("Controller doesn't have DMA capability\n");
  1075. else
  1076. host->flags |= SDHCI_USE_DMA;
  1077. if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
  1078. (host->flags & SDHCI_USE_DMA)) {
  1079. DBG("Disabling DMA as it is marked broken\n");
  1080. host->flags &= ~SDHCI_USE_DMA;
  1081. }
  1082. if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
  1083. (host->flags & SDHCI_USE_DMA)) {
  1084. printk(KERN_WARNING "%s: Will use DMA "
  1085. "mode even though HW doesn't fully "
  1086. "claim to support it.\n", host->slot_descr);
  1087. }
  1088. if (host->flags & SDHCI_USE_DMA) {
  1089. if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
  1090. printk(KERN_WARNING "%s: No suitable DMA available. "
  1091. "Falling back to PIO.\n", host->slot_descr);
  1092. host->flags &= ~SDHCI_USE_DMA;
  1093. }
  1094. }
  1095. if (host->flags & SDHCI_USE_DMA)
  1096. pci_set_master(pdev);
  1097. else /* XXX: Hack to get MMC layer to avoid highmem */
  1098. pdev->dma_mask = 0;
  1099. host->max_clk =
  1100. (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
  1101. if (host->max_clk == 0) {
  1102. printk(KERN_ERR "%s: Hardware doesn't specify base clock "
  1103. "frequency.\n", host->slot_descr);
  1104. ret = -ENODEV;
  1105. goto unmap;
  1106. }
  1107. host->max_clk *= 1000000;
  1108. host->timeout_clk =
  1109. (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
  1110. if (host->timeout_clk == 0) {
  1111. printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
  1112. "frequency.\n", host->slot_descr);
  1113. ret = -ENODEV;
  1114. goto unmap;
  1115. }
  1116. if (caps & SDHCI_TIMEOUT_CLK_UNIT)
  1117. host->timeout_clk *= 1000;
  1118. /*
  1119. * Set host parameters.
  1120. */
  1121. mmc->ops = &sdhci_ops;
  1122. mmc->f_min = host->max_clk / 256;
  1123. mmc->f_max = host->max_clk;
  1124. mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ;
  1125. if (caps & SDHCI_CAN_DO_HISPD)
  1126. mmc->caps |= MMC_CAP_SD_HIGHSPEED;
  1127. mmc->ocr_avail = 0;
  1128. if (caps & SDHCI_CAN_VDD_330)
  1129. mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
  1130. if (caps & SDHCI_CAN_VDD_300)
  1131. mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
  1132. if (caps & SDHCI_CAN_VDD_180)
  1133. mmc->ocr_avail |= MMC_VDD_165_195;
  1134. if (mmc->ocr_avail == 0) {
  1135. printk(KERN_ERR "%s: Hardware doesn't report any "
  1136. "support voltages.\n", host->slot_descr);
  1137. ret = -ENODEV;
  1138. goto unmap;
  1139. }
  1140. spin_lock_init(&host->lock);
  1141. /*
  1142. * Maximum number of segments. Hardware cannot do scatter lists.
  1143. */
  1144. if (host->flags & SDHCI_USE_DMA)
  1145. mmc->max_hw_segs = 1;
  1146. else
  1147. mmc->max_hw_segs = 16;
  1148. mmc->max_phys_segs = 16;
  1149. /*
  1150. * Maximum number of sectors in one transfer. Limited by DMA boundary
  1151. * size (512KiB).
  1152. */
  1153. mmc->max_req_size = 524288;
  1154. /*
  1155. * Maximum segment size. Could be one segment with the maximum number
  1156. * of bytes.
  1157. */
  1158. mmc->max_seg_size = mmc->max_req_size;
  1159. /*
  1160. * Maximum block size. This varies from controller to controller and
  1161. * is specified in the capabilities register.
  1162. */
  1163. mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
  1164. if (mmc->max_blk_size >= 3) {
  1165. printk(KERN_WARNING "%s: Invalid maximum block size, assuming 512\n",
  1166. host->slot_descr);
  1167. mmc->max_blk_size = 512;
  1168. } else
  1169. mmc->max_blk_size = 512 << mmc->max_blk_size;
  1170. /*
  1171. * Maximum block count.
  1172. */
  1173. mmc->max_blk_count = 65535;
  1174. /*
  1175. * Init tasklets.
  1176. */
  1177. tasklet_init(&host->card_tasklet,
  1178. sdhci_tasklet_card, (unsigned long)host);
  1179. tasklet_init(&host->finish_tasklet,
  1180. sdhci_tasklet_finish, (unsigned long)host);
  1181. setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
  1182. ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  1183. host->slot_descr, host);
  1184. if (ret)
  1185. goto untasklet;
  1186. sdhci_init(host);
  1187. #ifdef CONFIG_MMC_DEBUG
  1188. sdhci_dumpregs(host);
  1189. #endif
  1190. mmiowb();
  1191. mmc_add_host(mmc);
  1192. printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc),
  1193. host->addr, host->irq,
  1194. (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
  1195. return 0;
  1196. untasklet:
  1197. tasklet_kill(&host->card_tasklet);
  1198. tasklet_kill(&host->finish_tasklet);
  1199. unmap:
  1200. iounmap(host->ioaddr);
  1201. release:
  1202. pci_release_region(pdev, host->bar);
  1203. free:
  1204. mmc_free_host(mmc);
  1205. return ret;
  1206. }
  1207. static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
  1208. {
  1209. struct sdhci_chip *chip;
  1210. struct mmc_host *mmc;
  1211. struct sdhci_host *host;
  1212. chip = pci_get_drvdata(pdev);
  1213. host = chip->hosts[slot];
  1214. mmc = host->mmc;
  1215. chip->hosts[slot] = NULL;
  1216. mmc_remove_host(mmc);
  1217. sdhci_reset(host, SDHCI_RESET_ALL);
  1218. free_irq(host->irq, host);
  1219. del_timer_sync(&host->timer);
  1220. tasklet_kill(&host->card_tasklet);
  1221. tasklet_kill(&host->finish_tasklet);
  1222. iounmap(host->ioaddr);
  1223. pci_release_region(pdev, host->bar);
  1224. mmc_free_host(mmc);
  1225. }
  1226. static int __devinit sdhci_probe(struct pci_dev *pdev,
  1227. const struct pci_device_id *ent)
  1228. {
  1229. int ret, i;
  1230. u8 slots, rev;
  1231. struct sdhci_chip *chip;
  1232. BUG_ON(pdev == NULL);
  1233. BUG_ON(ent == NULL);
  1234. pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
  1235. printk(KERN_INFO DRIVER_NAME
  1236. ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
  1237. pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
  1238. (int)rev);
  1239. ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
  1240. if (ret)
  1241. return ret;
  1242. slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
  1243. DBG("found %d slot(s)\n", slots);
  1244. if (slots == 0)
  1245. return -ENODEV;
  1246. ret = pci_enable_device(pdev);
  1247. if (ret)
  1248. return ret;
  1249. chip = kzalloc(sizeof(struct sdhci_chip) +
  1250. sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
  1251. if (!chip) {
  1252. ret = -ENOMEM;
  1253. goto err;
  1254. }
  1255. chip->pdev = pdev;
  1256. chip->quirks = ent->driver_data;
  1257. if (debug_quirks)
  1258. chip->quirks = debug_quirks;
  1259. chip->num_slots = slots;
  1260. pci_set_drvdata(pdev, chip);
  1261. /* Add for multi controller case */
  1262. spin_lock(&index_lock);
  1263. chip->index = chip_index++;
  1264. spin_unlock(&index_lock);
  1265. for (i = 0;i < slots;i++) {
  1266. ret = sdhci_probe_slot(pdev, i);
  1267. if (ret) {
  1268. for (i--;i >= 0;i--)
  1269. sdhci_remove_slot(pdev, i);
  1270. goto free;
  1271. }
  1272. }
  1273. return 0;
  1274. free:
  1275. pci_set_drvdata(pdev, NULL);
  1276. kfree(chip);
  1277. err:
  1278. pci_disable_device(pdev);
  1279. return ret;
  1280. }
  1281. static void __devexit sdhci_remove(struct pci_dev *pdev)
  1282. {
  1283. int i;
  1284. struct sdhci_chip *chip;
  1285. chip = pci_get_drvdata(pdev);
  1286. if (chip) {
  1287. for (i = 0;i < chip->num_slots;i++)
  1288. sdhci_remove_slot(pdev, i);
  1289. pci_set_drvdata(pdev, NULL);
  1290. kfree(chip);
  1291. }
  1292. pci_disable_device(pdev);
  1293. }
  1294. static struct pci_driver sdhci_driver = {
  1295. .name = DRIVER_NAME,
  1296. .id_table = pci_ids,
  1297. .probe = sdhci_probe,
  1298. .remove = __devexit_p(sdhci_remove),
  1299. .suspend = sdhci_suspend,
  1300. .resume = sdhci_resume,
  1301. };
  1302. /*****************************************************************************\
  1303. * *
  1304. * Driver init/exit *
  1305. * *
  1306. \*****************************************************************************/
  1307. static int __init sdhci_drv_init(void)
  1308. {
  1309. printk(KERN_INFO DRIVER_NAME
  1310. ": Secure Digital Host Controller Interface driver\n");
  1311. printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
  1312. spin_lock_init(&index_lock);
  1313. return pci_register_driver(&sdhci_driver);
  1314. }
  1315. static void __exit sdhci_drv_exit(void)
  1316. {
  1317. DBG("Exiting\n");
  1318. pci_unregister_driver(&sdhci_driver);
  1319. }
  1320. module_init(sdhci_drv_init);
  1321. module_exit(sdhci_drv_exit);
  1322. module_param(debug_quirks, uint, 0444);
  1323. MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
  1324. MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
  1325. MODULE_LICENSE("GPL");
  1326. MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");