sdhci.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575
  1. /*
  2. * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
  3. *
  4. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/delay.h>
  12. #include <linux/highmem.h>
  13. #include <linux/pci.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/mmc/host.h>
  16. #include <asm/scatterlist.h>
  17. #include "sdhci.h"
  18. #define DRIVER_NAME "sdhci"
  19. #define DBG(f, x...) \
  20. pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
  21. static unsigned int debug_nodma = 0;
  22. static unsigned int debug_forcedma = 0;
  23. static unsigned int debug_quirks = 0;
  24. #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
  25. #define SDHCI_QUIRK_FORCE_DMA (1<<1)
  26. /* Controller doesn't like some resets when there is no card inserted. */
  27. #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
  28. #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
  29. #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
  30. static const struct pci_device_id pci_ids[] __devinitdata = {
  31. {
  32. .vendor = PCI_VENDOR_ID_RICOH,
  33. .device = PCI_DEVICE_ID_RICOH_R5C822,
  34. .subvendor = PCI_VENDOR_ID_IBM,
  35. .subdevice = PCI_ANY_ID,
  36. .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
  37. SDHCI_QUIRK_FORCE_DMA,
  38. },
  39. {
  40. .vendor = PCI_VENDOR_ID_RICOH,
  41. .device = PCI_DEVICE_ID_RICOH_R5C822,
  42. .subvendor = PCI_ANY_ID,
  43. .subdevice = PCI_ANY_ID,
  44. .driver_data = SDHCI_QUIRK_FORCE_DMA |
  45. SDHCI_QUIRK_NO_CARD_NO_RESET,
  46. },
  47. {
  48. .vendor = PCI_VENDOR_ID_TI,
  49. .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
  50. .subvendor = PCI_ANY_ID,
  51. .subdevice = PCI_ANY_ID,
  52. .driver_data = SDHCI_QUIRK_FORCE_DMA,
  53. },
  54. {
  55. .vendor = PCI_VENDOR_ID_ENE,
  56. .device = PCI_DEVICE_ID_ENE_CB712_SD,
  57. .subvendor = PCI_ANY_ID,
  58. .subdevice = PCI_ANY_ID,
  59. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
  60. },
  61. {
  62. .vendor = PCI_VENDOR_ID_ENE,
  63. .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
  64. .subvendor = PCI_ANY_ID,
  65. .subdevice = PCI_ANY_ID,
  66. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
  67. },
  68. {
  69. .vendor = PCI_VENDOR_ID_ENE,
  70. .device = PCI_DEVICE_ID_ENE_CB714_SD,
  71. .subvendor = PCI_ANY_ID,
  72. .subdevice = PCI_ANY_ID,
  73. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  74. SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
  75. },
  76. {
  77. .vendor = PCI_VENDOR_ID_ENE,
  78. .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
  79. .subvendor = PCI_ANY_ID,
  80. .subdevice = PCI_ANY_ID,
  81. .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
  82. SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
  83. },
  84. { /* Generic SD host controller */
  85. PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
  86. },
  87. { /* end: all zeroes */ },
  88. };
  89. MODULE_DEVICE_TABLE(pci, pci_ids);
  90. static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
  91. static void sdhci_finish_data(struct sdhci_host *);
  92. static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
  93. static void sdhci_finish_command(struct sdhci_host *);
  94. static void sdhci_dumpregs(struct sdhci_host *host)
  95. {
  96. printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
  97. printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
  98. readl(host->ioaddr + SDHCI_DMA_ADDRESS),
  99. readw(host->ioaddr + SDHCI_HOST_VERSION));
  100. printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
  101. readw(host->ioaddr + SDHCI_BLOCK_SIZE),
  102. readw(host->ioaddr + SDHCI_BLOCK_COUNT));
  103. printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
  104. readl(host->ioaddr + SDHCI_ARGUMENT),
  105. readw(host->ioaddr + SDHCI_TRANSFER_MODE));
  106. printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
  107. readl(host->ioaddr + SDHCI_PRESENT_STATE),
  108. readb(host->ioaddr + SDHCI_HOST_CONTROL));
  109. printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
  110. readb(host->ioaddr + SDHCI_POWER_CONTROL),
  111. readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
  112. printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
  113. readb(host->ioaddr + SDHCI_WALK_UP_CONTROL),
  114. readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
  115. printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
  116. readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
  117. readl(host->ioaddr + SDHCI_INT_STATUS));
  118. printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
  119. readl(host->ioaddr + SDHCI_INT_ENABLE),
  120. readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
  121. printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
  122. readw(host->ioaddr + SDHCI_ACMD12_ERR),
  123. readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
  124. printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
  125. readl(host->ioaddr + SDHCI_CAPABILITIES),
  126. readl(host->ioaddr + SDHCI_MAX_CURRENT));
  127. printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
  128. }
  129. /*****************************************************************************\
  130. * *
  131. * Low level functions *
  132. * *
  133. \*****************************************************************************/
  134. static void sdhci_reset(struct sdhci_host *host, u8 mask)
  135. {
  136. unsigned long timeout;
  137. if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  138. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
  139. SDHCI_CARD_PRESENT))
  140. return;
  141. }
  142. writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
  143. if (mask & SDHCI_RESET_ALL)
  144. host->clock = 0;
  145. /* Wait max 100 ms */
  146. timeout = 100;
  147. /* hw clears the bit when it's done */
  148. while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
  149. if (timeout == 0) {
  150. printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
  151. mmc_hostname(host->mmc), (int)mask);
  152. sdhci_dumpregs(host);
  153. return;
  154. }
  155. timeout--;
  156. mdelay(1);
  157. }
  158. }
  159. static void sdhci_init(struct sdhci_host *host)
  160. {
  161. u32 intmask;
  162. sdhci_reset(host, SDHCI_RESET_ALL);
  163. intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  164. SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
  165. SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
  166. SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
  167. SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
  168. SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
  169. writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
  170. writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
  171. }
  172. static void sdhci_activate_led(struct sdhci_host *host)
  173. {
  174. u8 ctrl;
  175. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  176. ctrl |= SDHCI_CTRL_LED;
  177. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  178. }
  179. static void sdhci_deactivate_led(struct sdhci_host *host)
  180. {
  181. u8 ctrl;
  182. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  183. ctrl &= ~SDHCI_CTRL_LED;
  184. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  185. }
  186. /*****************************************************************************\
  187. * *
  188. * Core functions *
  189. * *
  190. \*****************************************************************************/
  191. static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
  192. {
  193. return page_address(host->cur_sg->page) + host->cur_sg->offset;
  194. }
  195. static inline int sdhci_next_sg(struct sdhci_host* host)
  196. {
  197. /*
  198. * Skip to next SG entry.
  199. */
  200. host->cur_sg++;
  201. host->num_sg--;
  202. /*
  203. * Any entries left?
  204. */
  205. if (host->num_sg > 0) {
  206. host->offset = 0;
  207. host->remain = host->cur_sg->length;
  208. }
  209. return host->num_sg;
  210. }
  211. static void sdhci_read_block_pio(struct sdhci_host *host)
  212. {
  213. int blksize, chunk_remain;
  214. u32 data;
  215. char *buffer;
  216. int size;
  217. DBG("PIO reading\n");
  218. blksize = host->data->blksz;
  219. chunk_remain = 0;
  220. data = 0;
  221. buffer = sdhci_sg_to_buffer(host) + host->offset;
  222. while (blksize) {
  223. if (chunk_remain == 0) {
  224. data = readl(host->ioaddr + SDHCI_BUFFER);
  225. chunk_remain = min(blksize, 4);
  226. }
  227. size = min(host->remain, chunk_remain);
  228. chunk_remain -= size;
  229. blksize -= size;
  230. host->offset += size;
  231. host->remain -= size;
  232. while (size) {
  233. *buffer = data & 0xFF;
  234. buffer++;
  235. data >>= 8;
  236. size--;
  237. }
  238. if (host->remain == 0) {
  239. if (sdhci_next_sg(host) == 0) {
  240. BUG_ON(blksize != 0);
  241. return;
  242. }
  243. buffer = sdhci_sg_to_buffer(host);
  244. }
  245. }
  246. }
  247. static void sdhci_write_block_pio(struct sdhci_host *host)
  248. {
  249. int blksize, chunk_remain;
  250. u32 data;
  251. char *buffer;
  252. int bytes, size;
  253. DBG("PIO writing\n");
  254. blksize = host->data->blksz;
  255. chunk_remain = 4;
  256. data = 0;
  257. bytes = 0;
  258. buffer = sdhci_sg_to_buffer(host) + host->offset;
  259. while (blksize) {
  260. size = min(host->remain, chunk_remain);
  261. chunk_remain -= size;
  262. blksize -= size;
  263. host->offset += size;
  264. host->remain -= size;
  265. while (size) {
  266. data >>= 8;
  267. data |= (u32)*buffer << 24;
  268. buffer++;
  269. size--;
  270. }
  271. if (chunk_remain == 0) {
  272. writel(data, host->ioaddr + SDHCI_BUFFER);
  273. chunk_remain = min(blksize, 4);
  274. }
  275. if (host->remain == 0) {
  276. if (sdhci_next_sg(host) == 0) {
  277. BUG_ON(blksize != 0);
  278. return;
  279. }
  280. buffer = sdhci_sg_to_buffer(host);
  281. }
  282. }
  283. }
  284. static void sdhci_transfer_pio(struct sdhci_host *host)
  285. {
  286. u32 mask;
  287. BUG_ON(!host->data);
  288. if (host->num_sg == 0)
  289. return;
  290. if (host->data->flags & MMC_DATA_READ)
  291. mask = SDHCI_DATA_AVAILABLE;
  292. else
  293. mask = SDHCI_SPACE_AVAILABLE;
  294. while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
  295. if (host->data->flags & MMC_DATA_READ)
  296. sdhci_read_block_pio(host);
  297. else
  298. sdhci_write_block_pio(host);
  299. if (host->num_sg == 0)
  300. break;
  301. }
  302. DBG("PIO transfer complete.\n");
  303. }
  304. static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
  305. {
  306. u8 count;
  307. unsigned target_timeout, current_timeout;
  308. WARN_ON(host->data);
  309. if (data == NULL)
  310. return;
  311. /* Sanity checks */
  312. BUG_ON(data->blksz * data->blocks > 524288);
  313. BUG_ON(data->blksz > host->mmc->max_blk_size);
  314. BUG_ON(data->blocks > 65535);
  315. host->data = data;
  316. host->data_early = 0;
  317. /* timeout in us */
  318. target_timeout = data->timeout_ns / 1000 +
  319. data->timeout_clks / host->clock;
  320. /*
  321. * Figure out needed cycles.
  322. * We do this in steps in order to fit inside a 32 bit int.
  323. * The first step is the minimum timeout, which will have a
  324. * minimum resolution of 6 bits:
  325. * (1) 2^13*1000 > 2^22,
  326. * (2) host->timeout_clk < 2^16
  327. * =>
  328. * (1) / (2) > 2^6
  329. */
  330. count = 0;
  331. current_timeout = (1 << 13) * 1000 / host->timeout_clk;
  332. while (current_timeout < target_timeout) {
  333. count++;
  334. current_timeout <<= 1;
  335. if (count >= 0xF)
  336. break;
  337. }
  338. if (count >= 0xF) {
  339. printk(KERN_WARNING "%s: Too large timeout requested!\n",
  340. mmc_hostname(host->mmc));
  341. count = 0xE;
  342. }
  343. writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
  344. if (host->flags & SDHCI_USE_DMA) {
  345. int count;
  346. count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
  347. (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
  348. BUG_ON(count != 1);
  349. writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
  350. } else {
  351. host->cur_sg = data->sg;
  352. host->num_sg = data->sg_len;
  353. host->offset = 0;
  354. host->remain = host->cur_sg->length;
  355. }
  356. /* We do not handle DMA boundaries, so set it to max (512 KiB) */
  357. writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
  358. host->ioaddr + SDHCI_BLOCK_SIZE);
  359. writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
  360. }
  361. static void sdhci_set_transfer_mode(struct sdhci_host *host,
  362. struct mmc_data *data)
  363. {
  364. u16 mode;
  365. if (data == NULL)
  366. return;
  367. WARN_ON(!host->data);
  368. mode = SDHCI_TRNS_BLK_CNT_EN;
  369. if (data->blocks > 1)
  370. mode |= SDHCI_TRNS_MULTI;
  371. if (data->flags & MMC_DATA_READ)
  372. mode |= SDHCI_TRNS_READ;
  373. if (host->flags & SDHCI_USE_DMA)
  374. mode |= SDHCI_TRNS_DMA;
  375. writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
  376. }
  377. static void sdhci_finish_data(struct sdhci_host *host)
  378. {
  379. struct mmc_data *data;
  380. u16 blocks;
  381. BUG_ON(!host->data);
  382. data = host->data;
  383. host->data = NULL;
  384. if (host->flags & SDHCI_USE_DMA) {
  385. pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
  386. (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
  387. }
  388. /*
  389. * Controller doesn't count down when in single block mode.
  390. */
  391. if (data->blocks == 1)
  392. blocks = (data->error == 0) ? 0 : 1;
  393. else
  394. blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
  395. data->bytes_xfered = data->blksz * (data->blocks - blocks);
  396. if (!data->error && blocks) {
  397. printk(KERN_ERR "%s: Controller signalled completion even "
  398. "though there were blocks left.\n",
  399. mmc_hostname(host->mmc));
  400. data->error = -EIO;
  401. }
  402. if (data->stop) {
  403. /*
  404. * The controller needs a reset of internal state machines
  405. * upon error conditions.
  406. */
  407. if (data->error) {
  408. sdhci_reset(host, SDHCI_RESET_CMD);
  409. sdhci_reset(host, SDHCI_RESET_DATA);
  410. }
  411. sdhci_send_command(host, data->stop);
  412. } else
  413. tasklet_schedule(&host->finish_tasklet);
  414. }
  415. static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
  416. {
  417. int flags;
  418. u32 mask;
  419. unsigned long timeout;
  420. WARN_ON(host->cmd);
  421. /* Wait max 10 ms */
  422. timeout = 10;
  423. mask = SDHCI_CMD_INHIBIT;
  424. if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
  425. mask |= SDHCI_DATA_INHIBIT;
  426. /* We shouldn't wait for data inihibit for stop commands, even
  427. though they might use busy signaling */
  428. if (host->mrq->data && (cmd == host->mrq->data->stop))
  429. mask &= ~SDHCI_DATA_INHIBIT;
  430. while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
  431. if (timeout == 0) {
  432. printk(KERN_ERR "%s: Controller never released "
  433. "inhibit bit(s).\n", mmc_hostname(host->mmc));
  434. sdhci_dumpregs(host);
  435. cmd->error = -EIO;
  436. tasklet_schedule(&host->finish_tasklet);
  437. return;
  438. }
  439. timeout--;
  440. mdelay(1);
  441. }
  442. mod_timer(&host->timer, jiffies + 10 * HZ);
  443. host->cmd = cmd;
  444. sdhci_prepare_data(host, cmd->data);
  445. writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
  446. sdhci_set_transfer_mode(host, cmd->data);
  447. if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
  448. printk(KERN_ERR "%s: Unsupported response type!\n",
  449. mmc_hostname(host->mmc));
  450. cmd->error = -EINVAL;
  451. tasklet_schedule(&host->finish_tasklet);
  452. return;
  453. }
  454. if (!(cmd->flags & MMC_RSP_PRESENT))
  455. flags = SDHCI_CMD_RESP_NONE;
  456. else if (cmd->flags & MMC_RSP_136)
  457. flags = SDHCI_CMD_RESP_LONG;
  458. else if (cmd->flags & MMC_RSP_BUSY)
  459. flags = SDHCI_CMD_RESP_SHORT_BUSY;
  460. else
  461. flags = SDHCI_CMD_RESP_SHORT;
  462. if (cmd->flags & MMC_RSP_CRC)
  463. flags |= SDHCI_CMD_CRC;
  464. if (cmd->flags & MMC_RSP_OPCODE)
  465. flags |= SDHCI_CMD_INDEX;
  466. if (cmd->data)
  467. flags |= SDHCI_CMD_DATA;
  468. writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
  469. host->ioaddr + SDHCI_COMMAND);
  470. }
  471. static void sdhci_finish_command(struct sdhci_host *host)
  472. {
  473. int i;
  474. BUG_ON(host->cmd == NULL);
  475. if (host->cmd->flags & MMC_RSP_PRESENT) {
  476. if (host->cmd->flags & MMC_RSP_136) {
  477. /* CRC is stripped so we need to do some shifting. */
  478. for (i = 0;i < 4;i++) {
  479. host->cmd->resp[i] = readl(host->ioaddr +
  480. SDHCI_RESPONSE + (3-i)*4) << 8;
  481. if (i != 3)
  482. host->cmd->resp[i] |=
  483. readb(host->ioaddr +
  484. SDHCI_RESPONSE + (3-i)*4-1);
  485. }
  486. } else {
  487. host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
  488. }
  489. }
  490. host->cmd->error = 0;
  491. if (host->data && host->data_early)
  492. sdhci_finish_data(host);
  493. if (!host->cmd->data)
  494. tasklet_schedule(&host->finish_tasklet);
  495. host->cmd = NULL;
  496. }
  497. static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  498. {
  499. int div;
  500. u16 clk;
  501. unsigned long timeout;
  502. if (clock == host->clock)
  503. return;
  504. writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
  505. if (clock == 0)
  506. goto out;
  507. for (div = 1;div < 256;div *= 2) {
  508. if ((host->max_clk / div) <= clock)
  509. break;
  510. }
  511. div >>= 1;
  512. clk = div << SDHCI_DIVIDER_SHIFT;
  513. clk |= SDHCI_CLOCK_INT_EN;
  514. writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
  515. /* Wait max 10 ms */
  516. timeout = 10;
  517. while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
  518. & SDHCI_CLOCK_INT_STABLE)) {
  519. if (timeout == 0) {
  520. printk(KERN_ERR "%s: Internal clock never "
  521. "stabilised.\n", mmc_hostname(host->mmc));
  522. sdhci_dumpregs(host);
  523. return;
  524. }
  525. timeout--;
  526. mdelay(1);
  527. }
  528. clk |= SDHCI_CLOCK_CARD_EN;
  529. writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
  530. out:
  531. host->clock = clock;
  532. }
  533. static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
  534. {
  535. u8 pwr;
  536. if (host->power == power)
  537. return;
  538. if (power == (unsigned short)-1) {
  539. writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
  540. goto out;
  541. }
  542. /*
  543. * Spec says that we should clear the power reg before setting
  544. * a new value. Some controllers don't seem to like this though.
  545. */
  546. if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  547. writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
  548. pwr = SDHCI_POWER_ON;
  549. switch (1 << power) {
  550. case MMC_VDD_165_195:
  551. pwr |= SDHCI_POWER_180;
  552. break;
  553. case MMC_VDD_29_30:
  554. case MMC_VDD_30_31:
  555. pwr |= SDHCI_POWER_300;
  556. break;
  557. case MMC_VDD_32_33:
  558. case MMC_VDD_33_34:
  559. pwr |= SDHCI_POWER_330;
  560. break;
  561. default:
  562. BUG();
  563. }
  564. writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
  565. out:
  566. host->power = power;
  567. }
  568. /*****************************************************************************\
  569. * *
  570. * MMC callbacks *
  571. * *
  572. \*****************************************************************************/
  573. static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  574. {
  575. struct sdhci_host *host;
  576. unsigned long flags;
  577. host = mmc_priv(mmc);
  578. spin_lock_irqsave(&host->lock, flags);
  579. WARN_ON(host->mrq != NULL);
  580. sdhci_activate_led(host);
  581. host->mrq = mrq;
  582. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
  583. host->mrq->cmd->error = -ENOMEDIUM;
  584. tasklet_schedule(&host->finish_tasklet);
  585. } else
  586. sdhci_send_command(host, mrq->cmd);
  587. mmiowb();
  588. spin_unlock_irqrestore(&host->lock, flags);
  589. }
  590. static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  591. {
  592. struct sdhci_host *host;
  593. unsigned long flags;
  594. u8 ctrl;
  595. host = mmc_priv(mmc);
  596. spin_lock_irqsave(&host->lock, flags);
  597. /*
  598. * Reset the chip on each power off.
  599. * Should clear out any weird states.
  600. */
  601. if (ios->power_mode == MMC_POWER_OFF) {
  602. writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
  603. sdhci_init(host);
  604. }
  605. sdhci_set_clock(host, ios->clock);
  606. if (ios->power_mode == MMC_POWER_OFF)
  607. sdhci_set_power(host, -1);
  608. else
  609. sdhci_set_power(host, ios->vdd);
  610. ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
  611. if (ios->bus_width == MMC_BUS_WIDTH_4)
  612. ctrl |= SDHCI_CTRL_4BITBUS;
  613. else
  614. ctrl &= ~SDHCI_CTRL_4BITBUS;
  615. if (ios->timing == MMC_TIMING_SD_HS)
  616. ctrl |= SDHCI_CTRL_HISPD;
  617. else
  618. ctrl &= ~SDHCI_CTRL_HISPD;
  619. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  620. /*
  621. * Some (ENE) controllers go apeshit on some ios operation,
  622. * signalling timeout and CRC errors even on CMD0. Resetting
  623. * it on each ios seems to solve the problem.
  624. */
  625. if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
  626. sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  627. mmiowb();
  628. spin_unlock_irqrestore(&host->lock, flags);
  629. }
  630. static int sdhci_get_ro(struct mmc_host *mmc)
  631. {
  632. struct sdhci_host *host;
  633. unsigned long flags;
  634. int present;
  635. host = mmc_priv(mmc);
  636. spin_lock_irqsave(&host->lock, flags);
  637. present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
  638. spin_unlock_irqrestore(&host->lock, flags);
  639. return !(present & SDHCI_WRITE_PROTECT);
  640. }
  641. static const struct mmc_host_ops sdhci_ops = {
  642. .request = sdhci_request,
  643. .set_ios = sdhci_set_ios,
  644. .get_ro = sdhci_get_ro,
  645. };
  646. /*****************************************************************************\
  647. * *
  648. * Tasklets *
  649. * *
  650. \*****************************************************************************/
  651. static void sdhci_tasklet_card(unsigned long param)
  652. {
  653. struct sdhci_host *host;
  654. unsigned long flags;
  655. host = (struct sdhci_host*)param;
  656. spin_lock_irqsave(&host->lock, flags);
  657. if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
  658. if (host->mrq) {
  659. printk(KERN_ERR "%s: Card removed during transfer!\n",
  660. mmc_hostname(host->mmc));
  661. printk(KERN_ERR "%s: Resetting controller.\n",
  662. mmc_hostname(host->mmc));
  663. sdhci_reset(host, SDHCI_RESET_CMD);
  664. sdhci_reset(host, SDHCI_RESET_DATA);
  665. host->mrq->cmd->error = -ENOMEDIUM;
  666. tasklet_schedule(&host->finish_tasklet);
  667. }
  668. }
  669. spin_unlock_irqrestore(&host->lock, flags);
  670. mmc_detect_change(host->mmc, msecs_to_jiffies(500));
  671. }
  672. static void sdhci_tasklet_finish(unsigned long param)
  673. {
  674. struct sdhci_host *host;
  675. unsigned long flags;
  676. struct mmc_request *mrq;
  677. host = (struct sdhci_host*)param;
  678. spin_lock_irqsave(&host->lock, flags);
  679. del_timer(&host->timer);
  680. mrq = host->mrq;
  681. /*
  682. * The controller needs a reset of internal state machines
  683. * upon error conditions.
  684. */
  685. if (mrq->cmd->error ||
  686. (mrq->data && (mrq->data->error ||
  687. (mrq->data->stop && mrq->data->stop->error)))) {
  688. /* Some controllers need this kick or reset won't work here */
  689. if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
  690. unsigned int clock;
  691. /* This is to force an update */
  692. clock = host->clock;
  693. host->clock = 0;
  694. sdhci_set_clock(host, clock);
  695. }
  696. /* Spec says we should do both at the same time, but Ricoh
  697. controllers do not like that. */
  698. sdhci_reset(host, SDHCI_RESET_CMD);
  699. sdhci_reset(host, SDHCI_RESET_DATA);
  700. }
  701. host->mrq = NULL;
  702. host->cmd = NULL;
  703. host->data = NULL;
  704. sdhci_deactivate_led(host);
  705. mmiowb();
  706. spin_unlock_irqrestore(&host->lock, flags);
  707. mmc_request_done(host->mmc, mrq);
  708. }
  709. static void sdhci_timeout_timer(unsigned long data)
  710. {
  711. struct sdhci_host *host;
  712. unsigned long flags;
  713. host = (struct sdhci_host*)data;
  714. spin_lock_irqsave(&host->lock, flags);
  715. if (host->mrq) {
  716. printk(KERN_ERR "%s: Timeout waiting for hardware "
  717. "interrupt.\n", mmc_hostname(host->mmc));
  718. sdhci_dumpregs(host);
  719. if (host->data) {
  720. host->data->error = -ETIMEDOUT;
  721. sdhci_finish_data(host);
  722. } else {
  723. if (host->cmd)
  724. host->cmd->error = -ETIMEDOUT;
  725. else
  726. host->mrq->cmd->error = -ETIMEDOUT;
  727. tasklet_schedule(&host->finish_tasklet);
  728. }
  729. }
  730. mmiowb();
  731. spin_unlock_irqrestore(&host->lock, flags);
  732. }
  733. /*****************************************************************************\
  734. * *
  735. * Interrupt handling *
  736. * *
  737. \*****************************************************************************/
  738. static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
  739. {
  740. BUG_ON(intmask == 0);
  741. if (!host->cmd) {
  742. printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
  743. "though no command operation was in progress.\n",
  744. mmc_hostname(host->mmc), (unsigned)intmask);
  745. sdhci_dumpregs(host);
  746. return;
  747. }
  748. if (intmask & SDHCI_INT_TIMEOUT)
  749. host->cmd->error = -ETIMEDOUT;
  750. else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
  751. SDHCI_INT_INDEX))
  752. host->cmd->error = -EILSEQ;
  753. if (host->cmd->error)
  754. tasklet_schedule(&host->finish_tasklet);
  755. else if (intmask & SDHCI_INT_RESPONSE)
  756. sdhci_finish_command(host);
  757. }
  758. static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
  759. {
  760. BUG_ON(intmask == 0);
  761. if (!host->data) {
  762. /*
  763. * A data end interrupt is sent together with the response
  764. * for the stop command.
  765. */
  766. if (intmask & SDHCI_INT_DATA_END)
  767. return;
  768. printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
  769. "though no data operation was in progress.\n",
  770. mmc_hostname(host->mmc), (unsigned)intmask);
  771. sdhci_dumpregs(host);
  772. return;
  773. }
  774. if (intmask & SDHCI_INT_DATA_TIMEOUT)
  775. host->data->error = -ETIMEDOUT;
  776. else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
  777. host->data->error = -EILSEQ;
  778. if (host->data->error)
  779. sdhci_finish_data(host);
  780. else {
  781. if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
  782. sdhci_transfer_pio(host);
  783. /*
  784. * We currently don't do anything fancy with DMA
  785. * boundaries, but as we can't disable the feature
  786. * we need to at least restart the transfer.
  787. */
  788. if (intmask & SDHCI_INT_DMA_END)
  789. writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
  790. host->ioaddr + SDHCI_DMA_ADDRESS);
  791. if (intmask & SDHCI_INT_DATA_END) {
  792. if (host->cmd) {
  793. /*
  794. * Data managed to finish before the
  795. * command completed. Make sure we do
  796. * things in the proper order.
  797. */
  798. host->data_early = 1;
  799. } else {
  800. sdhci_finish_data(host);
  801. }
  802. }
  803. }
  804. }
  805. static irqreturn_t sdhci_irq(int irq, void *dev_id)
  806. {
  807. irqreturn_t result;
  808. struct sdhci_host* host = dev_id;
  809. u32 intmask;
  810. spin_lock(&host->lock);
  811. intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
  812. if (!intmask || intmask == 0xffffffff) {
  813. result = IRQ_NONE;
  814. goto out;
  815. }
  816. DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
  817. if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  818. writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
  819. host->ioaddr + SDHCI_INT_STATUS);
  820. tasklet_schedule(&host->card_tasklet);
  821. }
  822. intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  823. if (intmask & SDHCI_INT_CMD_MASK) {
  824. writel(intmask & SDHCI_INT_CMD_MASK,
  825. host->ioaddr + SDHCI_INT_STATUS);
  826. sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  827. }
  828. if (intmask & SDHCI_INT_DATA_MASK) {
  829. writel(intmask & SDHCI_INT_DATA_MASK,
  830. host->ioaddr + SDHCI_INT_STATUS);
  831. sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  832. }
  833. intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
  834. intmask &= ~SDHCI_INT_ERROR;
  835. if (intmask & SDHCI_INT_BUS_POWER) {
  836. printk(KERN_ERR "%s: Card is consuming too much power!\n",
  837. mmc_hostname(host->mmc));
  838. writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
  839. }
  840. intmask &= ~SDHCI_INT_BUS_POWER;
  841. if (intmask) {
  842. printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
  843. mmc_hostname(host->mmc), intmask);
  844. sdhci_dumpregs(host);
  845. writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
  846. }
  847. result = IRQ_HANDLED;
  848. mmiowb();
  849. out:
  850. spin_unlock(&host->lock);
  851. return result;
  852. }
  853. /*****************************************************************************\
  854. * *
  855. * Suspend/resume *
  856. * *
  857. \*****************************************************************************/
  858. #ifdef CONFIG_PM
  859. static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
  860. {
  861. struct sdhci_chip *chip;
  862. int i, ret;
  863. chip = pci_get_drvdata(pdev);
  864. if (!chip)
  865. return 0;
  866. DBG("Suspending...\n");
  867. for (i = 0;i < chip->num_slots;i++) {
  868. if (!chip->hosts[i])
  869. continue;
  870. ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
  871. if (ret) {
  872. for (i--;i >= 0;i--)
  873. mmc_resume_host(chip->hosts[i]->mmc);
  874. return ret;
  875. }
  876. }
  877. pci_save_state(pdev);
  878. pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
  879. for (i = 0;i < chip->num_slots;i++) {
  880. if (!chip->hosts[i])
  881. continue;
  882. free_irq(chip->hosts[i]->irq, chip->hosts[i]);
  883. }
  884. pci_disable_device(pdev);
  885. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  886. return 0;
  887. }
  888. static int sdhci_resume (struct pci_dev *pdev)
  889. {
  890. struct sdhci_chip *chip;
  891. int i, ret;
  892. chip = pci_get_drvdata(pdev);
  893. if (!chip)
  894. return 0;
  895. DBG("Resuming...\n");
  896. pci_set_power_state(pdev, PCI_D0);
  897. pci_restore_state(pdev);
  898. ret = pci_enable_device(pdev);
  899. if (ret)
  900. return ret;
  901. for (i = 0;i < chip->num_slots;i++) {
  902. if (!chip->hosts[i])
  903. continue;
  904. if (chip->hosts[i]->flags & SDHCI_USE_DMA)
  905. pci_set_master(pdev);
  906. ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
  907. IRQF_SHARED, chip->hosts[i]->slot_descr,
  908. chip->hosts[i]);
  909. if (ret)
  910. return ret;
  911. sdhci_init(chip->hosts[i]);
  912. mmiowb();
  913. ret = mmc_resume_host(chip->hosts[i]->mmc);
  914. if (ret)
  915. return ret;
  916. }
  917. return 0;
  918. }
  919. #else /* CONFIG_PM */
  920. #define sdhci_suspend NULL
  921. #define sdhci_resume NULL
  922. #endif /* CONFIG_PM */
  923. /*****************************************************************************\
  924. * *
  925. * Device probing/removal *
  926. * *
  927. \*****************************************************************************/
  928. static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
  929. {
  930. int ret;
  931. unsigned int version;
  932. struct sdhci_chip *chip;
  933. struct mmc_host *mmc;
  934. struct sdhci_host *host;
  935. u8 first_bar;
  936. unsigned int caps;
  937. chip = pci_get_drvdata(pdev);
  938. BUG_ON(!chip);
  939. ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
  940. if (ret)
  941. return ret;
  942. first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
  943. if (first_bar > 5) {
  944. printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
  945. return -ENODEV;
  946. }
  947. if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
  948. printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
  949. return -ENODEV;
  950. }
  951. if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
  952. printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
  953. "You may experience problems.\n");
  954. }
  955. if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
  956. printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
  957. return -ENODEV;
  958. }
  959. if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
  960. printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
  961. return -ENODEV;
  962. }
  963. mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
  964. if (!mmc)
  965. return -ENOMEM;
  966. host = mmc_priv(mmc);
  967. host->mmc = mmc;
  968. host->chip = chip;
  969. chip->hosts[slot] = host;
  970. host->bar = first_bar + slot;
  971. host->addr = pci_resource_start(pdev, host->bar);
  972. host->irq = pdev->irq;
  973. DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
  974. snprintf(host->slot_descr, 20, "sdhci:slot%d", slot);
  975. ret = pci_request_region(pdev, host->bar, host->slot_descr);
  976. if (ret)
  977. goto free;
  978. host->ioaddr = ioremap_nocache(host->addr,
  979. pci_resource_len(pdev, host->bar));
  980. if (!host->ioaddr) {
  981. ret = -ENOMEM;
  982. goto release;
  983. }
  984. sdhci_reset(host, SDHCI_RESET_ALL);
  985. version = readw(host->ioaddr + SDHCI_HOST_VERSION);
  986. version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
  987. if (version != 0) {
  988. printk(KERN_ERR "%s: Unknown controller version (%d). "
  989. "You may experience problems.\n", host->slot_descr,
  990. version);
  991. }
  992. caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
  993. if (debug_nodma)
  994. DBG("DMA forced off\n");
  995. else if (debug_forcedma) {
  996. DBG("DMA forced on\n");
  997. host->flags |= SDHCI_USE_DMA;
  998. } else if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
  999. host->flags |= SDHCI_USE_DMA;
  1000. else if ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA)
  1001. DBG("Controller doesn't have DMA interface\n");
  1002. else if (!(caps & SDHCI_CAN_DO_DMA))
  1003. DBG("Controller doesn't have DMA capability\n");
  1004. else
  1005. host->flags |= SDHCI_USE_DMA;
  1006. if (host->flags & SDHCI_USE_DMA) {
  1007. if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
  1008. printk(KERN_WARNING "%s: No suitable DMA available. "
  1009. "Falling back to PIO.\n", host->slot_descr);
  1010. host->flags &= ~SDHCI_USE_DMA;
  1011. }
  1012. }
  1013. if (host->flags & SDHCI_USE_DMA)
  1014. pci_set_master(pdev);
  1015. else /* XXX: Hack to get MMC layer to avoid highmem */
  1016. pdev->dma_mask = 0;
  1017. host->max_clk =
  1018. (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
  1019. if (host->max_clk == 0) {
  1020. printk(KERN_ERR "%s: Hardware doesn't specify base clock "
  1021. "frequency.\n", host->slot_descr);
  1022. ret = -ENODEV;
  1023. goto unmap;
  1024. }
  1025. host->max_clk *= 1000000;
  1026. host->timeout_clk =
  1027. (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
  1028. if (host->timeout_clk == 0) {
  1029. printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
  1030. "frequency.\n", host->slot_descr);
  1031. ret = -ENODEV;
  1032. goto unmap;
  1033. }
  1034. if (caps & SDHCI_TIMEOUT_CLK_UNIT)
  1035. host->timeout_clk *= 1000;
  1036. /*
  1037. * Set host parameters.
  1038. */
  1039. mmc->ops = &sdhci_ops;
  1040. mmc->f_min = host->max_clk / 256;
  1041. mmc->f_max = host->max_clk;
  1042. mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
  1043. if (caps & SDHCI_CAN_DO_HISPD)
  1044. mmc->caps |= MMC_CAP_SD_HIGHSPEED;
  1045. mmc->ocr_avail = 0;
  1046. if (caps & SDHCI_CAN_VDD_330)
  1047. mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
  1048. if (caps & SDHCI_CAN_VDD_300)
  1049. mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
  1050. if (caps & SDHCI_CAN_VDD_180)
  1051. mmc->ocr_avail |= MMC_VDD_165_195;
  1052. if (mmc->ocr_avail == 0) {
  1053. printk(KERN_ERR "%s: Hardware doesn't report any "
  1054. "support voltages.\n", host->slot_descr);
  1055. ret = -ENODEV;
  1056. goto unmap;
  1057. }
  1058. spin_lock_init(&host->lock);
  1059. /*
  1060. * Maximum number of segments. Hardware cannot do scatter lists.
  1061. */
  1062. if (host->flags & SDHCI_USE_DMA)
  1063. mmc->max_hw_segs = 1;
  1064. else
  1065. mmc->max_hw_segs = 16;
  1066. mmc->max_phys_segs = 16;
  1067. /*
  1068. * Maximum number of sectors in one transfer. Limited by DMA boundary
  1069. * size (512KiB).
  1070. */
  1071. mmc->max_req_size = 524288;
  1072. /*
  1073. * Maximum segment size. Could be one segment with the maximum number
  1074. * of bytes.
  1075. */
  1076. mmc->max_seg_size = mmc->max_req_size;
  1077. /*
  1078. * Maximum block size. This varies from controller to controller and
  1079. * is specified in the capabilities register.
  1080. */
  1081. mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
  1082. if (mmc->max_blk_size >= 3) {
  1083. printk(KERN_WARNING "%s: Invalid maximum block size, assuming 512\n",
  1084. host->slot_descr);
  1085. mmc->max_blk_size = 512;
  1086. } else
  1087. mmc->max_blk_size = 512 << mmc->max_blk_size;
  1088. /*
  1089. * Maximum block count.
  1090. */
  1091. mmc->max_blk_count = 65535;
  1092. /*
  1093. * Init tasklets.
  1094. */
  1095. tasklet_init(&host->card_tasklet,
  1096. sdhci_tasklet_card, (unsigned long)host);
  1097. tasklet_init(&host->finish_tasklet,
  1098. sdhci_tasklet_finish, (unsigned long)host);
  1099. setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
  1100. ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  1101. host->slot_descr, host);
  1102. if (ret)
  1103. goto untasklet;
  1104. sdhci_init(host);
  1105. #ifdef CONFIG_MMC_DEBUG
  1106. sdhci_dumpregs(host);
  1107. #endif
  1108. mmiowb();
  1109. mmc_add_host(mmc);
  1110. printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc),
  1111. host->addr, host->irq,
  1112. (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
  1113. return 0;
  1114. untasklet:
  1115. tasklet_kill(&host->card_tasklet);
  1116. tasklet_kill(&host->finish_tasklet);
  1117. unmap:
  1118. iounmap(host->ioaddr);
  1119. release:
  1120. pci_release_region(pdev, host->bar);
  1121. free:
  1122. mmc_free_host(mmc);
  1123. return ret;
  1124. }
  1125. static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
  1126. {
  1127. struct sdhci_chip *chip;
  1128. struct mmc_host *mmc;
  1129. struct sdhci_host *host;
  1130. chip = pci_get_drvdata(pdev);
  1131. host = chip->hosts[slot];
  1132. mmc = host->mmc;
  1133. chip->hosts[slot] = NULL;
  1134. mmc_remove_host(mmc);
  1135. sdhci_reset(host, SDHCI_RESET_ALL);
  1136. free_irq(host->irq, host);
  1137. del_timer_sync(&host->timer);
  1138. tasklet_kill(&host->card_tasklet);
  1139. tasklet_kill(&host->finish_tasklet);
  1140. iounmap(host->ioaddr);
  1141. pci_release_region(pdev, host->bar);
  1142. mmc_free_host(mmc);
  1143. }
  1144. static int __devinit sdhci_probe(struct pci_dev *pdev,
  1145. const struct pci_device_id *ent)
  1146. {
  1147. int ret, i;
  1148. u8 slots, rev;
  1149. struct sdhci_chip *chip;
  1150. BUG_ON(pdev == NULL);
  1151. BUG_ON(ent == NULL);
  1152. pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
  1153. printk(KERN_INFO DRIVER_NAME
  1154. ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
  1155. pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
  1156. (int)rev);
  1157. ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
  1158. if (ret)
  1159. return ret;
  1160. slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
  1161. DBG("found %d slot(s)\n", slots);
  1162. if (slots == 0)
  1163. return -ENODEV;
  1164. ret = pci_enable_device(pdev);
  1165. if (ret)
  1166. return ret;
  1167. chip = kzalloc(sizeof(struct sdhci_chip) +
  1168. sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
  1169. if (!chip) {
  1170. ret = -ENOMEM;
  1171. goto err;
  1172. }
  1173. chip->pdev = pdev;
  1174. chip->quirks = ent->driver_data;
  1175. if (debug_quirks)
  1176. chip->quirks = debug_quirks;
  1177. chip->num_slots = slots;
  1178. pci_set_drvdata(pdev, chip);
  1179. for (i = 0;i < slots;i++) {
  1180. ret = sdhci_probe_slot(pdev, i);
  1181. if (ret) {
  1182. for (i--;i >= 0;i--)
  1183. sdhci_remove_slot(pdev, i);
  1184. goto free;
  1185. }
  1186. }
  1187. return 0;
  1188. free:
  1189. pci_set_drvdata(pdev, NULL);
  1190. kfree(chip);
  1191. err:
  1192. pci_disable_device(pdev);
  1193. return ret;
  1194. }
  1195. static void __devexit sdhci_remove(struct pci_dev *pdev)
  1196. {
  1197. int i;
  1198. struct sdhci_chip *chip;
  1199. chip = pci_get_drvdata(pdev);
  1200. if (chip) {
  1201. for (i = 0;i < chip->num_slots;i++)
  1202. sdhci_remove_slot(pdev, i);
  1203. pci_set_drvdata(pdev, NULL);
  1204. kfree(chip);
  1205. }
  1206. pci_disable_device(pdev);
  1207. }
  1208. static struct pci_driver sdhci_driver = {
  1209. .name = DRIVER_NAME,
  1210. .id_table = pci_ids,
  1211. .probe = sdhci_probe,
  1212. .remove = __devexit_p(sdhci_remove),
  1213. .suspend = sdhci_suspend,
  1214. .resume = sdhci_resume,
  1215. };
  1216. /*****************************************************************************\
  1217. * *
  1218. * Driver init/exit *
  1219. * *
  1220. \*****************************************************************************/
  1221. static int __init sdhci_drv_init(void)
  1222. {
  1223. printk(KERN_INFO DRIVER_NAME
  1224. ": Secure Digital Host Controller Interface driver\n");
  1225. printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
  1226. return pci_register_driver(&sdhci_driver);
  1227. }
  1228. static void __exit sdhci_drv_exit(void)
  1229. {
  1230. DBG("Exiting\n");
  1231. pci_unregister_driver(&sdhci_driver);
  1232. }
  1233. module_init(sdhci_drv_init);
  1234. module_exit(sdhci_drv_exit);
  1235. module_param(debug_nodma, uint, 0444);
  1236. module_param(debug_forcedma, uint, 0444);
  1237. module_param(debug_quirks, uint, 0444);
  1238. MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
  1239. MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
  1240. MODULE_LICENSE("GPL");
  1241. MODULE_PARM_DESC(debug_nodma, "Forcefully disable DMA transfers. (default 0)");
  1242. MODULE_PARM_DESC(debug_forcedma, "Forcefully enable DMA transfers. (default 0)");
  1243. MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");