core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <linux/leds.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/mmc/card.h>
  24. #include <linux/mmc/host.h>
  25. #include <linux/mmc/mmc.h>
  26. #include <linux/mmc/sd.h>
  27. #include "core.h"
  28. #include "bus.h"
  29. #include "host.h"
  30. #include "sdio_bus.h"
  31. #include "mmc_ops.h"
  32. #include "sd_ops.h"
  33. #include "sdio_ops.h"
  34. static struct workqueue_struct *workqueue;
  35. /*
  36. * Enabling software CRCs on the data blocks can be a significant (30%)
  37. * performance cost, and for other reasons may not always be desired.
  38. * So we allow it it to be disabled.
  39. */
  40. int use_spi_crc = 1;
  41. module_param(use_spi_crc, bool, 0);
  42. /*
  43. * Internal function. Schedule delayed work in the MMC work queue.
  44. */
  45. static int mmc_schedule_delayed_work(struct delayed_work *work,
  46. unsigned long delay)
  47. {
  48. return queue_delayed_work(workqueue, work, delay);
  49. }
  50. /*
  51. * Internal function. Flush all scheduled work from the MMC work queue.
  52. */
  53. static void mmc_flush_scheduled_work(void)
  54. {
  55. flush_workqueue(workqueue);
  56. }
  57. /**
  58. * mmc_request_done - finish processing an MMC request
  59. * @host: MMC host which completed request
  60. * @mrq: MMC request which request
  61. *
  62. * MMC drivers should call this function when they have completed
  63. * their processing of a request.
  64. */
  65. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  66. {
  67. struct mmc_command *cmd = mrq->cmd;
  68. int err = cmd->error;
  69. if (err && cmd->retries && mmc_host_is_spi(host)) {
  70. if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
  71. cmd->retries = 0;
  72. }
  73. if (err && cmd->retries) {
  74. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  75. mmc_hostname(host), cmd->opcode, err);
  76. cmd->retries--;
  77. cmd->error = 0;
  78. host->ops->request(host, mrq);
  79. } else {
  80. led_trigger_event(host->led, LED_OFF);
  81. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  82. mmc_hostname(host), cmd->opcode, err,
  83. cmd->resp[0], cmd->resp[1],
  84. cmd->resp[2], cmd->resp[3]);
  85. if (mrq->data) {
  86. pr_debug("%s: %d bytes transferred: %d\n",
  87. mmc_hostname(host),
  88. mrq->data->bytes_xfered, mrq->data->error);
  89. }
  90. if (mrq->stop) {
  91. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  92. mmc_hostname(host), mrq->stop->opcode,
  93. mrq->stop->error,
  94. mrq->stop->resp[0], mrq->stop->resp[1],
  95. mrq->stop->resp[2], mrq->stop->resp[3]);
  96. }
  97. if (mrq->done)
  98. mrq->done(mrq);
  99. }
  100. }
  101. EXPORT_SYMBOL(mmc_request_done);
  102. static void
  103. mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  104. {
  105. #ifdef CONFIG_MMC_DEBUG
  106. unsigned int i, sz;
  107. #endif
  108. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  109. mmc_hostname(host), mrq->cmd->opcode,
  110. mrq->cmd->arg, mrq->cmd->flags);
  111. if (mrq->data) {
  112. pr_debug("%s: blksz %d blocks %d flags %08x "
  113. "tsac %d ms nsac %d\n",
  114. mmc_hostname(host), mrq->data->blksz,
  115. mrq->data->blocks, mrq->data->flags,
  116. mrq->data->timeout_ns / 1000000,
  117. mrq->data->timeout_clks);
  118. }
  119. if (mrq->stop) {
  120. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  121. mmc_hostname(host), mrq->stop->opcode,
  122. mrq->stop->arg, mrq->stop->flags);
  123. }
  124. WARN_ON(!host->claimed);
  125. led_trigger_event(host->led, LED_FULL);
  126. mrq->cmd->error = 0;
  127. mrq->cmd->mrq = mrq;
  128. if (mrq->data) {
  129. BUG_ON(mrq->data->blksz > host->max_blk_size);
  130. BUG_ON(mrq->data->blocks > host->max_blk_count);
  131. BUG_ON(mrq->data->blocks * mrq->data->blksz >
  132. host->max_req_size);
  133. #ifdef CONFIG_MMC_DEBUG
  134. sz = 0;
  135. for (i = 0;i < mrq->data->sg_len;i++)
  136. sz += mrq->data->sg[i].length;
  137. BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
  138. #endif
  139. mrq->cmd->data = mrq->data;
  140. mrq->data->error = 0;
  141. mrq->data->mrq = mrq;
  142. if (mrq->stop) {
  143. mrq->data->stop = mrq->stop;
  144. mrq->stop->error = 0;
  145. mrq->stop->mrq = mrq;
  146. }
  147. }
  148. host->ops->request(host, mrq);
  149. }
  150. static void mmc_wait_done(struct mmc_request *mrq)
  151. {
  152. complete(mrq->done_data);
  153. }
  154. /**
  155. * mmc_wait_for_req - start a request and wait for completion
  156. * @host: MMC host to start command
  157. * @mrq: MMC request to start
  158. *
  159. * Start a new MMC custom command request for a host, and wait
  160. * for the command to complete. Does not attempt to parse the
  161. * response.
  162. */
  163. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  164. {
  165. DECLARE_COMPLETION_ONSTACK(complete);
  166. mrq->done_data = &complete;
  167. mrq->done = mmc_wait_done;
  168. mmc_start_request(host, mrq);
  169. wait_for_completion(&complete);
  170. }
  171. EXPORT_SYMBOL(mmc_wait_for_req);
  172. /**
  173. * mmc_wait_for_cmd - start a command and wait for completion
  174. * @host: MMC host to start command
  175. * @cmd: MMC command to start
  176. * @retries: maximum number of retries
  177. *
  178. * Start a new MMC command for a host, and wait for the command
  179. * to complete. Return any error that occurred while the command
  180. * was executing. Do not attempt to parse the response.
  181. */
  182. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  183. {
  184. struct mmc_request mrq;
  185. WARN_ON(!host->claimed);
  186. memset(&mrq, 0, sizeof(struct mmc_request));
  187. memset(cmd->resp, 0, sizeof(cmd->resp));
  188. cmd->retries = retries;
  189. mrq.cmd = cmd;
  190. cmd->data = NULL;
  191. mmc_wait_for_req(host, &mrq);
  192. return cmd->error;
  193. }
  194. EXPORT_SYMBOL(mmc_wait_for_cmd);
  195. /**
  196. * mmc_set_data_timeout - set the timeout for a data command
  197. * @data: data phase for command
  198. * @card: the MMC card associated with the data transfer
  199. *
  200. * Computes the data timeout parameters according to the
  201. * correct algorithm given the card type.
  202. */
  203. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  204. {
  205. unsigned int mult;
  206. /*
  207. * SDIO cards only define an upper 1 s limit on access.
  208. */
  209. if (mmc_card_sdio(card)) {
  210. data->timeout_ns = 1000000000;
  211. data->timeout_clks = 0;
  212. return;
  213. }
  214. /*
  215. * SD cards use a 100 multiplier rather than 10
  216. */
  217. mult = mmc_card_sd(card) ? 100 : 10;
  218. /*
  219. * Scale up the multiplier (and therefore the timeout) by
  220. * the r2w factor for writes.
  221. */
  222. if (data->flags & MMC_DATA_WRITE)
  223. mult <<= card->csd.r2w_factor;
  224. data->timeout_ns = card->csd.tacc_ns * mult;
  225. data->timeout_clks = card->csd.tacc_clks * mult;
  226. /*
  227. * SD cards also have an upper limit on the timeout.
  228. */
  229. if (mmc_card_sd(card)) {
  230. unsigned int timeout_us, limit_us;
  231. timeout_us = data->timeout_ns / 1000;
  232. timeout_us += data->timeout_clks * 1000 /
  233. (card->host->ios.clock / 1000);
  234. if (data->flags & MMC_DATA_WRITE)
  235. limit_us = 250000;
  236. else
  237. limit_us = 100000;
  238. /*
  239. * SDHC cards always use these fixed values.
  240. */
  241. if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
  242. data->timeout_ns = limit_us * 1000;
  243. data->timeout_clks = 0;
  244. }
  245. }
  246. }
  247. EXPORT_SYMBOL(mmc_set_data_timeout);
  248. /**
  249. * __mmc_claim_host - exclusively claim a host
  250. * @host: mmc host to claim
  251. * @abort: whether or not the operation should be aborted
  252. *
  253. * Claim a host for a set of operations. If @abort is non null and
  254. * dereference a non-zero value then this will return prematurely with
  255. * that non-zero value without acquiring the lock. Returns zero
  256. * with the lock held otherwise.
  257. */
  258. int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
  259. {
  260. DECLARE_WAITQUEUE(wait, current);
  261. unsigned long flags;
  262. int stop;
  263. might_sleep();
  264. add_wait_queue(&host->wq, &wait);
  265. spin_lock_irqsave(&host->lock, flags);
  266. while (1) {
  267. set_current_state(TASK_UNINTERRUPTIBLE);
  268. stop = abort ? atomic_read(abort) : 0;
  269. if (stop || !host->claimed)
  270. break;
  271. spin_unlock_irqrestore(&host->lock, flags);
  272. schedule();
  273. spin_lock_irqsave(&host->lock, flags);
  274. }
  275. set_current_state(TASK_RUNNING);
  276. if (!stop)
  277. host->claimed = 1;
  278. else
  279. wake_up(&host->wq);
  280. spin_unlock_irqrestore(&host->lock, flags);
  281. remove_wait_queue(&host->wq, &wait);
  282. return stop;
  283. }
  284. EXPORT_SYMBOL(__mmc_claim_host);
  285. /**
  286. * mmc_release_host - release a host
  287. * @host: mmc host to release
  288. *
  289. * Release a MMC host, allowing others to claim the host
  290. * for their operations.
  291. */
  292. void mmc_release_host(struct mmc_host *host)
  293. {
  294. unsigned long flags;
  295. WARN_ON(!host->claimed);
  296. spin_lock_irqsave(&host->lock, flags);
  297. host->claimed = 0;
  298. spin_unlock_irqrestore(&host->lock, flags);
  299. wake_up(&host->wq);
  300. }
  301. EXPORT_SYMBOL(mmc_release_host);
  302. /*
  303. * Internal function that does the actual ios call to the host driver,
  304. * optionally printing some debug output.
  305. */
  306. static inline void mmc_set_ios(struct mmc_host *host)
  307. {
  308. struct mmc_ios *ios = &host->ios;
  309. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  310. "width %u timing %u\n",
  311. mmc_hostname(host), ios->clock, ios->bus_mode,
  312. ios->power_mode, ios->chip_select, ios->vdd,
  313. ios->bus_width, ios->timing);
  314. host->ops->set_ios(host, ios);
  315. }
  316. /*
  317. * Control chip select pin on a host.
  318. */
  319. void mmc_set_chip_select(struct mmc_host *host, int mode)
  320. {
  321. host->ios.chip_select = mode;
  322. mmc_set_ios(host);
  323. }
  324. /*
  325. * Sets the host clock to the highest possible frequency that
  326. * is below "hz".
  327. */
  328. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  329. {
  330. WARN_ON(hz < host->f_min);
  331. if (hz > host->f_max)
  332. hz = host->f_max;
  333. host->ios.clock = hz;
  334. mmc_set_ios(host);
  335. }
  336. /*
  337. * Change the bus mode (open drain/push-pull) of a host.
  338. */
  339. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  340. {
  341. host->ios.bus_mode = mode;
  342. mmc_set_ios(host);
  343. }
  344. /*
  345. * Change data bus width of a host.
  346. */
  347. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  348. {
  349. host->ios.bus_width = width;
  350. mmc_set_ios(host);
  351. }
  352. /*
  353. * Mask off any voltages we don't support and select
  354. * the lowest voltage
  355. */
  356. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  357. {
  358. int bit;
  359. ocr &= host->ocr_avail;
  360. bit = ffs(ocr);
  361. if (bit) {
  362. bit -= 1;
  363. ocr &= 3 << bit;
  364. host->ios.vdd = bit;
  365. mmc_set_ios(host);
  366. } else {
  367. ocr = 0;
  368. }
  369. return ocr;
  370. }
  371. /*
  372. * Select timing parameters for host.
  373. */
  374. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  375. {
  376. host->ios.timing = timing;
  377. mmc_set_ios(host);
  378. }
  379. /*
  380. * Apply power to the MMC stack. This is a two-stage process.
  381. * First, we enable power to the card without the clock running.
  382. * We then wait a bit for the power to stabilise. Finally,
  383. * enable the bus drivers and clock to the card.
  384. *
  385. * We must _NOT_ enable the clock prior to power stablising.
  386. *
  387. * If a host does all the power sequencing itself, ignore the
  388. * initial MMC_POWER_UP stage.
  389. */
  390. static void mmc_power_up(struct mmc_host *host)
  391. {
  392. int bit = fls(host->ocr_avail) - 1;
  393. host->ios.vdd = bit;
  394. if (mmc_host_is_spi(host)) {
  395. host->ios.chip_select = MMC_CS_HIGH;
  396. host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
  397. } else {
  398. host->ios.chip_select = MMC_CS_DONTCARE;
  399. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  400. }
  401. host->ios.power_mode = MMC_POWER_UP;
  402. host->ios.bus_width = MMC_BUS_WIDTH_1;
  403. host->ios.timing = MMC_TIMING_LEGACY;
  404. mmc_set_ios(host);
  405. /*
  406. * This delay should be sufficient to allow the power supply
  407. * to reach the minimum voltage.
  408. */
  409. mmc_delay(2);
  410. host->ios.clock = host->f_min;
  411. host->ios.power_mode = MMC_POWER_ON;
  412. mmc_set_ios(host);
  413. /*
  414. * This delay must be at least 74 clock sizes, or 1 ms, or the
  415. * time required to reach a stable voltage.
  416. */
  417. mmc_delay(2);
  418. }
  419. static void mmc_power_off(struct mmc_host *host)
  420. {
  421. host->ios.clock = 0;
  422. host->ios.vdd = 0;
  423. if (!mmc_host_is_spi(host)) {
  424. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  425. host->ios.chip_select = MMC_CS_DONTCARE;
  426. }
  427. host->ios.power_mode = MMC_POWER_OFF;
  428. host->ios.bus_width = MMC_BUS_WIDTH_1;
  429. host->ios.timing = MMC_TIMING_LEGACY;
  430. mmc_set_ios(host);
  431. }
  432. /*
  433. * Cleanup when the last reference to the bus operator is dropped.
  434. */
  435. static void __mmc_release_bus(struct mmc_host *host)
  436. {
  437. BUG_ON(!host);
  438. BUG_ON(host->bus_refs);
  439. BUG_ON(!host->bus_dead);
  440. host->bus_ops = NULL;
  441. }
  442. /*
  443. * Increase reference count of bus operator
  444. */
  445. static inline void mmc_bus_get(struct mmc_host *host)
  446. {
  447. unsigned long flags;
  448. spin_lock_irqsave(&host->lock, flags);
  449. host->bus_refs++;
  450. spin_unlock_irqrestore(&host->lock, flags);
  451. }
  452. /*
  453. * Decrease reference count of bus operator and free it if
  454. * it is the last reference.
  455. */
  456. static inline void mmc_bus_put(struct mmc_host *host)
  457. {
  458. unsigned long flags;
  459. spin_lock_irqsave(&host->lock, flags);
  460. host->bus_refs--;
  461. if ((host->bus_refs == 0) && host->bus_ops)
  462. __mmc_release_bus(host);
  463. spin_unlock_irqrestore(&host->lock, flags);
  464. }
  465. /*
  466. * Assign a mmc bus handler to a host. Only one bus handler may control a
  467. * host at any given time.
  468. */
  469. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  470. {
  471. unsigned long flags;
  472. BUG_ON(!host);
  473. BUG_ON(!ops);
  474. WARN_ON(!host->claimed);
  475. spin_lock_irqsave(&host->lock, flags);
  476. BUG_ON(host->bus_ops);
  477. BUG_ON(host->bus_refs);
  478. host->bus_ops = ops;
  479. host->bus_refs = 1;
  480. host->bus_dead = 0;
  481. spin_unlock_irqrestore(&host->lock, flags);
  482. }
  483. /*
  484. * Remove the current bus handler from a host. Assumes that there are
  485. * no interesting cards left, so the bus is powered down.
  486. */
  487. void mmc_detach_bus(struct mmc_host *host)
  488. {
  489. unsigned long flags;
  490. BUG_ON(!host);
  491. WARN_ON(!host->claimed);
  492. WARN_ON(!host->bus_ops);
  493. spin_lock_irqsave(&host->lock, flags);
  494. host->bus_dead = 1;
  495. spin_unlock_irqrestore(&host->lock, flags);
  496. mmc_power_off(host);
  497. mmc_bus_put(host);
  498. }
  499. /**
  500. * mmc_detect_change - process change of state on a MMC socket
  501. * @host: host which changed state.
  502. * @delay: optional delay to wait before detection (jiffies)
  503. *
  504. * MMC drivers should call this when they detect a card has been
  505. * inserted or removed. The MMC layer will confirm that any
  506. * present card is still functional, and initialize any newly
  507. * inserted.
  508. */
  509. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  510. {
  511. #ifdef CONFIG_MMC_DEBUG
  512. unsigned long flags;
  513. spin_lock_irqsave(&host->lock, flags);
  514. WARN_ON(host->removed);
  515. spin_unlock_irqrestore(&host->lock, flags);
  516. #endif
  517. mmc_schedule_delayed_work(&host->detect, delay);
  518. }
  519. EXPORT_SYMBOL(mmc_detect_change);
  520. void mmc_rescan(struct work_struct *work)
  521. {
  522. struct mmc_host *host =
  523. container_of(work, struct mmc_host, detect.work);
  524. u32 ocr;
  525. int err;
  526. mmc_bus_get(host);
  527. if (host->bus_ops == NULL) {
  528. /*
  529. * Only we can add a new handler, so it's safe to
  530. * release the lock here.
  531. */
  532. mmc_bus_put(host);
  533. mmc_claim_host(host);
  534. mmc_power_up(host);
  535. mmc_go_idle(host);
  536. mmc_send_if_cond(host, host->ocr_avail);
  537. /*
  538. * First we search for SDIO...
  539. */
  540. err = mmc_send_io_op_cond(host, 0, &ocr);
  541. if (!err) {
  542. if (mmc_attach_sdio(host, ocr))
  543. mmc_power_off(host);
  544. return;
  545. }
  546. /*
  547. * ...then normal SD...
  548. */
  549. err = mmc_send_app_op_cond(host, 0, &ocr);
  550. if (!err) {
  551. if (mmc_attach_sd(host, ocr))
  552. mmc_power_off(host);
  553. return;
  554. }
  555. /*
  556. * ...and finally MMC.
  557. */
  558. err = mmc_send_op_cond(host, 0, &ocr);
  559. if (!err) {
  560. if (mmc_attach_mmc(host, ocr))
  561. mmc_power_off(host);
  562. return;
  563. }
  564. mmc_release_host(host);
  565. mmc_power_off(host);
  566. } else {
  567. if (host->bus_ops->detect && !host->bus_dead)
  568. host->bus_ops->detect(host);
  569. mmc_bus_put(host);
  570. }
  571. }
  572. void mmc_start_host(struct mmc_host *host)
  573. {
  574. mmc_power_off(host);
  575. mmc_detect_change(host, 0);
  576. }
  577. void mmc_stop_host(struct mmc_host *host)
  578. {
  579. #ifdef CONFIG_MMC_DEBUG
  580. unsigned long flags;
  581. spin_lock_irqsave(&host->lock, flags);
  582. host->removed = 1;
  583. spin_unlock_irqrestore(&host->lock, flags);
  584. #endif
  585. mmc_flush_scheduled_work();
  586. mmc_bus_get(host);
  587. if (host->bus_ops && !host->bus_dead) {
  588. if (host->bus_ops->remove)
  589. host->bus_ops->remove(host);
  590. mmc_claim_host(host);
  591. mmc_detach_bus(host);
  592. mmc_release_host(host);
  593. }
  594. mmc_bus_put(host);
  595. BUG_ON(host->card);
  596. mmc_power_off(host);
  597. }
  598. #ifdef CONFIG_PM
  599. /**
  600. * mmc_suspend_host - suspend a host
  601. * @host: mmc host
  602. * @state: suspend mode (PM_SUSPEND_xxx)
  603. */
  604. int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
  605. {
  606. mmc_flush_scheduled_work();
  607. mmc_bus_get(host);
  608. if (host->bus_ops && !host->bus_dead) {
  609. if (host->bus_ops->suspend)
  610. host->bus_ops->suspend(host);
  611. if (!host->bus_ops->resume) {
  612. if (host->bus_ops->remove)
  613. host->bus_ops->remove(host);
  614. mmc_claim_host(host);
  615. mmc_detach_bus(host);
  616. mmc_release_host(host);
  617. }
  618. }
  619. mmc_bus_put(host);
  620. mmc_power_off(host);
  621. return 0;
  622. }
  623. EXPORT_SYMBOL(mmc_suspend_host);
  624. /**
  625. * mmc_resume_host - resume a previously suspended host
  626. * @host: mmc host
  627. */
  628. int mmc_resume_host(struct mmc_host *host)
  629. {
  630. mmc_bus_get(host);
  631. if (host->bus_ops && !host->bus_dead) {
  632. mmc_power_up(host);
  633. BUG_ON(!host->bus_ops->resume);
  634. host->bus_ops->resume(host);
  635. }
  636. mmc_bus_put(host);
  637. /*
  638. * We add a slight delay here so that resume can progress
  639. * in parallel.
  640. */
  641. mmc_detect_change(host, 1);
  642. return 0;
  643. }
  644. EXPORT_SYMBOL(mmc_resume_host);
  645. #endif
  646. static int __init mmc_init(void)
  647. {
  648. int ret;
  649. workqueue = create_singlethread_workqueue("kmmcd");
  650. if (!workqueue)
  651. return -ENOMEM;
  652. ret = mmc_register_bus();
  653. if (ret)
  654. goto destroy_workqueue;
  655. ret = mmc_register_host_class();
  656. if (ret)
  657. goto unregister_bus;
  658. ret = sdio_register_bus();
  659. if (ret)
  660. goto unregister_host_class;
  661. return 0;
  662. unregister_host_class:
  663. mmc_unregister_host_class();
  664. unregister_bus:
  665. mmc_unregister_bus();
  666. destroy_workqueue:
  667. destroy_workqueue(workqueue);
  668. return ret;
  669. }
  670. static void __exit mmc_exit(void)
  671. {
  672. sdio_unregister_bus();
  673. mmc_unregister_host_class();
  674. mmc_unregister_bus();
  675. destroy_workqueue(workqueue);
  676. }
  677. subsys_initcall(mmc_init);
  678. module_exit(mmc_exit);
  679. MODULE_LICENSE("GPL");