core.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <asm/scatterlist.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/mmc/card.h>
  24. #include <linux/mmc/host.h>
  25. #include <linux/mmc/mmc.h>
  26. #include <linux/mmc/sd.h>
  27. #include "core.h"
  28. #include "bus.h"
  29. #include "host.h"
  30. #include "mmc_ops.h"
  31. #include "sd_ops.h"
  32. #include "sdio_ops.h"
  33. extern int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
  34. extern int mmc_attach_sd(struct mmc_host *host, u32 ocr);
  35. extern int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
  36. static struct workqueue_struct *workqueue;
  37. /*
  38. * Internal function. Schedule delayed work in the MMC work queue.
  39. */
  40. static int mmc_schedule_delayed_work(struct delayed_work *work,
  41. unsigned long delay)
  42. {
  43. return queue_delayed_work(workqueue, work, delay);
  44. }
  45. /*
  46. * Internal function. Flush all scheduled work from the MMC work queue.
  47. */
  48. static void mmc_flush_scheduled_work(void)
  49. {
  50. flush_workqueue(workqueue);
  51. }
  52. /**
  53. * mmc_request_done - finish processing an MMC request
  54. * @host: MMC host which completed request
  55. * @mrq: MMC request which request
  56. *
  57. * MMC drivers should call this function when they have completed
  58. * their processing of a request.
  59. */
  60. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  61. {
  62. struct mmc_command *cmd = mrq->cmd;
  63. int err = cmd->error;
  64. if (err && cmd->retries) {
  65. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  66. mmc_hostname(host), cmd->opcode, err);
  67. cmd->retries--;
  68. cmd->error = 0;
  69. host->ops->request(host, mrq);
  70. } else {
  71. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  72. mmc_hostname(host), cmd->opcode, err,
  73. cmd->resp[0], cmd->resp[1],
  74. cmd->resp[2], cmd->resp[3]);
  75. if (mrq->data) {
  76. pr_debug("%s: %d bytes transferred: %d\n",
  77. mmc_hostname(host),
  78. mrq->data->bytes_xfered, mrq->data->error);
  79. }
  80. if (mrq->stop) {
  81. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  82. mmc_hostname(host), mrq->stop->opcode,
  83. mrq->stop->error,
  84. mrq->stop->resp[0], mrq->stop->resp[1],
  85. mrq->stop->resp[2], mrq->stop->resp[3]);
  86. }
  87. if (mrq->done)
  88. mrq->done(mrq);
  89. }
  90. }
  91. EXPORT_SYMBOL(mmc_request_done);
  92. static void
  93. mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  94. {
  95. #ifdef CONFIG_MMC_DEBUG
  96. unsigned int i, sz;
  97. #endif
  98. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  99. mmc_hostname(host), mrq->cmd->opcode,
  100. mrq->cmd->arg, mrq->cmd->flags);
  101. if (mrq->data) {
  102. pr_debug("%s: blksz %d blocks %d flags %08x "
  103. "tsac %d ms nsac %d\n",
  104. mmc_hostname(host), mrq->data->blksz,
  105. mrq->data->blocks, mrq->data->flags,
  106. mrq->data->timeout_ns / 10000000,
  107. mrq->data->timeout_clks);
  108. }
  109. if (mrq->stop) {
  110. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  111. mmc_hostname(host), mrq->stop->opcode,
  112. mrq->stop->arg, mrq->stop->flags);
  113. }
  114. WARN_ON(!host->claimed);
  115. mrq->cmd->error = 0;
  116. mrq->cmd->mrq = mrq;
  117. if (mrq->data) {
  118. BUG_ON(mrq->data->blksz > host->max_blk_size);
  119. BUG_ON(mrq->data->blocks > host->max_blk_count);
  120. BUG_ON(mrq->data->blocks * mrq->data->blksz >
  121. host->max_req_size);
  122. #ifdef CONFIG_MMC_DEBUG
  123. sz = 0;
  124. for (i = 0;i < mrq->data->sg_len;i++)
  125. sz += mrq->data->sg[i].length;
  126. BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
  127. #endif
  128. mrq->cmd->data = mrq->data;
  129. mrq->data->error = 0;
  130. mrq->data->mrq = mrq;
  131. if (mrq->stop) {
  132. mrq->data->stop = mrq->stop;
  133. mrq->stop->error = 0;
  134. mrq->stop->mrq = mrq;
  135. }
  136. }
  137. host->ops->request(host, mrq);
  138. }
  139. static void mmc_wait_done(struct mmc_request *mrq)
  140. {
  141. complete(mrq->done_data);
  142. }
  143. /**
  144. * mmc_wait_for_req - start a request and wait for completion
  145. * @host: MMC host to start command
  146. * @mrq: MMC request to start
  147. *
  148. * Start a new MMC custom command request for a host, and wait
  149. * for the command to complete. Does not attempt to parse the
  150. * response.
  151. */
  152. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  153. {
  154. DECLARE_COMPLETION_ONSTACK(complete);
  155. mrq->done_data = &complete;
  156. mrq->done = mmc_wait_done;
  157. mmc_start_request(host, mrq);
  158. wait_for_completion(&complete);
  159. }
  160. EXPORT_SYMBOL(mmc_wait_for_req);
  161. /**
  162. * mmc_wait_for_cmd - start a command and wait for completion
  163. * @host: MMC host to start command
  164. * @cmd: MMC command to start
  165. * @retries: maximum number of retries
  166. *
  167. * Start a new MMC command for a host, and wait for the command
  168. * to complete. Return any error that occurred while the command
  169. * was executing. Do not attempt to parse the response.
  170. */
  171. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  172. {
  173. struct mmc_request mrq;
  174. BUG_ON(!host->claimed);
  175. memset(&mrq, 0, sizeof(struct mmc_request));
  176. memset(cmd->resp, 0, sizeof(cmd->resp));
  177. cmd->retries = retries;
  178. mrq.cmd = cmd;
  179. cmd->data = NULL;
  180. mmc_wait_for_req(host, &mrq);
  181. return cmd->error;
  182. }
  183. EXPORT_SYMBOL(mmc_wait_for_cmd);
  184. /**
  185. * mmc_set_data_timeout - set the timeout for a data command
  186. * @data: data phase for command
  187. * @card: the MMC card associated with the data transfer
  188. *
  189. * Computes the data timeout parameters according to the
  190. * correct algorithm given the card type.
  191. */
  192. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  193. {
  194. unsigned int mult;
  195. /*
  196. * SD cards use a 100 multiplier rather than 10
  197. */
  198. mult = mmc_card_sd(card) ? 100 : 10;
  199. /*
  200. * Scale up the multiplier (and therefore the timeout) by
  201. * the r2w factor for writes.
  202. */
  203. if (data->flags & MMC_DATA_WRITE)
  204. mult <<= card->csd.r2w_factor;
  205. data->timeout_ns = card->csd.tacc_ns * mult;
  206. data->timeout_clks = card->csd.tacc_clks * mult;
  207. /*
  208. * SD cards also have an upper limit on the timeout.
  209. */
  210. if (mmc_card_sd(card)) {
  211. unsigned int timeout_us, limit_us;
  212. timeout_us = data->timeout_ns / 1000;
  213. timeout_us += data->timeout_clks * 1000 /
  214. (card->host->ios.clock / 1000);
  215. if (data->flags & MMC_DATA_WRITE)
  216. limit_us = 250000;
  217. else
  218. limit_us = 100000;
  219. /*
  220. * SDHC cards always use these fixed values.
  221. */
  222. if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
  223. data->timeout_ns = limit_us * 1000;
  224. data->timeout_clks = 0;
  225. }
  226. }
  227. }
  228. EXPORT_SYMBOL(mmc_set_data_timeout);
  229. /**
  230. * mmc_claim_host - exclusively claim a host
  231. * @host: mmc host to claim
  232. *
  233. * Claim a host for a set of operations.
  234. */
  235. void mmc_claim_host(struct mmc_host *host)
  236. {
  237. DECLARE_WAITQUEUE(wait, current);
  238. unsigned long flags;
  239. might_sleep();
  240. add_wait_queue(&host->wq, &wait);
  241. spin_lock_irqsave(&host->lock, flags);
  242. while (1) {
  243. set_current_state(TASK_UNINTERRUPTIBLE);
  244. if (!host->claimed)
  245. break;
  246. spin_unlock_irqrestore(&host->lock, flags);
  247. schedule();
  248. spin_lock_irqsave(&host->lock, flags);
  249. }
  250. set_current_state(TASK_RUNNING);
  251. host->claimed = 1;
  252. spin_unlock_irqrestore(&host->lock, flags);
  253. remove_wait_queue(&host->wq, &wait);
  254. }
  255. EXPORT_SYMBOL(mmc_claim_host);
  256. /**
  257. * mmc_release_host - release a host
  258. * @host: mmc host to release
  259. *
  260. * Release a MMC host, allowing others to claim the host
  261. * for their operations.
  262. */
  263. void mmc_release_host(struct mmc_host *host)
  264. {
  265. unsigned long flags;
  266. BUG_ON(!host->claimed);
  267. spin_lock_irqsave(&host->lock, flags);
  268. host->claimed = 0;
  269. spin_unlock_irqrestore(&host->lock, flags);
  270. wake_up(&host->wq);
  271. }
  272. EXPORT_SYMBOL(mmc_release_host);
  273. /*
  274. * Internal function that does the actual ios call to the host driver,
  275. * optionally printing some debug output.
  276. */
  277. static inline void mmc_set_ios(struct mmc_host *host)
  278. {
  279. struct mmc_ios *ios = &host->ios;
  280. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  281. "width %u timing %u\n",
  282. mmc_hostname(host), ios->clock, ios->bus_mode,
  283. ios->power_mode, ios->chip_select, ios->vdd,
  284. ios->bus_width, ios->timing);
  285. host->ops->set_ios(host, ios);
  286. }
  287. /*
  288. * Control chip select pin on a host.
  289. */
  290. void mmc_set_chip_select(struct mmc_host *host, int mode)
  291. {
  292. host->ios.chip_select = mode;
  293. mmc_set_ios(host);
  294. }
  295. /*
  296. * Sets the host clock to the highest possible frequency that
  297. * is below "hz".
  298. */
  299. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  300. {
  301. WARN_ON(hz < host->f_min);
  302. if (hz > host->f_max)
  303. hz = host->f_max;
  304. host->ios.clock = hz;
  305. mmc_set_ios(host);
  306. }
  307. /*
  308. * Change the bus mode (open drain/push-pull) of a host.
  309. */
  310. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  311. {
  312. host->ios.bus_mode = mode;
  313. mmc_set_ios(host);
  314. }
  315. /*
  316. * Change data bus width of a host.
  317. */
  318. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  319. {
  320. host->ios.bus_width = width;
  321. mmc_set_ios(host);
  322. }
  323. /*
  324. * Mask off any voltages we don't support and select
  325. * the lowest voltage
  326. */
  327. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  328. {
  329. int bit;
  330. ocr &= host->ocr_avail;
  331. bit = ffs(ocr);
  332. if (bit) {
  333. bit -= 1;
  334. ocr &= 3 << bit;
  335. host->ios.vdd = bit;
  336. mmc_set_ios(host);
  337. } else {
  338. ocr = 0;
  339. }
  340. return ocr;
  341. }
  342. /*
  343. * Select timing parameters for host.
  344. */
  345. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  346. {
  347. host->ios.timing = timing;
  348. mmc_set_ios(host);
  349. }
  350. /*
  351. * Apply power to the MMC stack. This is a two-stage process.
  352. * First, we enable power to the card without the clock running.
  353. * We then wait a bit for the power to stabilise. Finally,
  354. * enable the bus drivers and clock to the card.
  355. *
  356. * We must _NOT_ enable the clock prior to power stablising.
  357. *
  358. * If a host does all the power sequencing itself, ignore the
  359. * initial MMC_POWER_UP stage.
  360. */
  361. static void mmc_power_up(struct mmc_host *host)
  362. {
  363. int bit = fls(host->ocr_avail) - 1;
  364. host->ios.vdd = bit;
  365. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  366. host->ios.chip_select = MMC_CS_DONTCARE;
  367. host->ios.power_mode = MMC_POWER_UP;
  368. host->ios.bus_width = MMC_BUS_WIDTH_1;
  369. host->ios.timing = MMC_TIMING_LEGACY;
  370. mmc_set_ios(host);
  371. mmc_delay(1);
  372. host->ios.clock = host->f_min;
  373. host->ios.power_mode = MMC_POWER_ON;
  374. mmc_set_ios(host);
  375. mmc_delay(2);
  376. }
  377. static void mmc_power_off(struct mmc_host *host)
  378. {
  379. host->ios.clock = 0;
  380. host->ios.vdd = 0;
  381. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  382. host->ios.chip_select = MMC_CS_DONTCARE;
  383. host->ios.power_mode = MMC_POWER_OFF;
  384. host->ios.bus_width = MMC_BUS_WIDTH_1;
  385. host->ios.timing = MMC_TIMING_LEGACY;
  386. mmc_set_ios(host);
  387. }
  388. /*
  389. * Cleanup when the last reference to the bus operator is dropped.
  390. */
  391. void __mmc_release_bus(struct mmc_host *host)
  392. {
  393. BUG_ON(!host);
  394. BUG_ON(host->bus_refs);
  395. BUG_ON(!host->bus_dead);
  396. host->bus_ops = NULL;
  397. }
  398. /*
  399. * Increase reference count of bus operator
  400. */
  401. static inline void mmc_bus_get(struct mmc_host *host)
  402. {
  403. unsigned long flags;
  404. spin_lock_irqsave(&host->lock, flags);
  405. host->bus_refs++;
  406. spin_unlock_irqrestore(&host->lock, flags);
  407. }
  408. /*
  409. * Decrease reference count of bus operator and free it if
  410. * it is the last reference.
  411. */
  412. static inline void mmc_bus_put(struct mmc_host *host)
  413. {
  414. unsigned long flags;
  415. spin_lock_irqsave(&host->lock, flags);
  416. host->bus_refs--;
  417. if ((host->bus_refs == 0) && host->bus_ops)
  418. __mmc_release_bus(host);
  419. spin_unlock_irqrestore(&host->lock, flags);
  420. }
  421. /*
  422. * Assign a mmc bus handler to a host. Only one bus handler may control a
  423. * host at any given time.
  424. */
  425. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  426. {
  427. unsigned long flags;
  428. BUG_ON(!host);
  429. BUG_ON(!ops);
  430. BUG_ON(!host->claimed);
  431. spin_lock_irqsave(&host->lock, flags);
  432. BUG_ON(host->bus_ops);
  433. BUG_ON(host->bus_refs);
  434. host->bus_ops = ops;
  435. host->bus_refs = 1;
  436. host->bus_dead = 0;
  437. spin_unlock_irqrestore(&host->lock, flags);
  438. }
  439. /*
  440. * Remove the current bus handler from a host. Assumes that there are
  441. * no interesting cards left, so the bus is powered down.
  442. */
  443. void mmc_detach_bus(struct mmc_host *host)
  444. {
  445. unsigned long flags;
  446. BUG_ON(!host);
  447. BUG_ON(!host->claimed);
  448. BUG_ON(!host->bus_ops);
  449. spin_lock_irqsave(&host->lock, flags);
  450. host->bus_dead = 1;
  451. spin_unlock_irqrestore(&host->lock, flags);
  452. mmc_power_off(host);
  453. mmc_bus_put(host);
  454. }
  455. /**
  456. * mmc_detect_change - process change of state on a MMC socket
  457. * @host: host which changed state.
  458. * @delay: optional delay to wait before detection (jiffies)
  459. *
  460. * MMC drivers should call this when they detect a card has been
  461. * inserted or removed. The MMC layer will confirm that any
  462. * present card is still functional, and initialize any newly
  463. * inserted.
  464. */
  465. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  466. {
  467. #ifdef CONFIG_MMC_DEBUG
  468. unsigned long flags;
  469. spin_lock_irqsave(&host->lock, flags);
  470. BUG_ON(host->removed);
  471. spin_unlock_irqrestore(&host->lock, flags);
  472. #endif
  473. mmc_schedule_delayed_work(&host->detect, delay);
  474. }
  475. EXPORT_SYMBOL(mmc_detect_change);
  476. void mmc_rescan(struct work_struct *work)
  477. {
  478. struct mmc_host *host =
  479. container_of(work, struct mmc_host, detect.work);
  480. u32 ocr;
  481. int err;
  482. mmc_bus_get(host);
  483. if (host->bus_ops == NULL) {
  484. /*
  485. * Only we can add a new handler, so it's safe to
  486. * release the lock here.
  487. */
  488. mmc_bus_put(host);
  489. mmc_claim_host(host);
  490. mmc_power_up(host);
  491. mmc_go_idle(host);
  492. mmc_send_if_cond(host, host->ocr_avail);
  493. /*
  494. * First we search for SDIO...
  495. */
  496. err = mmc_send_io_op_cond(host, 0, &ocr);
  497. if (!err) {
  498. if (mmc_attach_sdio(host, ocr))
  499. mmc_power_off(host);
  500. return;
  501. }
  502. /*
  503. * ...then normal SD...
  504. */
  505. err = mmc_send_app_op_cond(host, 0, &ocr);
  506. if (!err) {
  507. if (mmc_attach_sd(host, ocr))
  508. mmc_power_off(host);
  509. return;
  510. }
  511. /*
  512. * ...and finally MMC.
  513. */
  514. err = mmc_send_op_cond(host, 0, &ocr);
  515. if (!err) {
  516. if (mmc_attach_mmc(host, ocr))
  517. mmc_power_off(host);
  518. return;
  519. }
  520. mmc_release_host(host);
  521. mmc_power_off(host);
  522. } else {
  523. if (host->bus_ops->detect && !host->bus_dead)
  524. host->bus_ops->detect(host);
  525. mmc_bus_put(host);
  526. }
  527. }
  528. void mmc_start_host(struct mmc_host *host)
  529. {
  530. mmc_power_off(host);
  531. mmc_detect_change(host, 0);
  532. }
  533. void mmc_stop_host(struct mmc_host *host)
  534. {
  535. #ifdef CONFIG_MMC_DEBUG
  536. unsigned long flags;
  537. spin_lock_irqsave(&host->lock, flags);
  538. host->removed = 1;
  539. spin_unlock_irqrestore(&host->lock, flags);
  540. #endif
  541. mmc_flush_scheduled_work();
  542. mmc_bus_get(host);
  543. if (host->bus_ops && !host->bus_dead) {
  544. if (host->bus_ops->remove)
  545. host->bus_ops->remove(host);
  546. mmc_claim_host(host);
  547. mmc_detach_bus(host);
  548. mmc_release_host(host);
  549. }
  550. mmc_bus_put(host);
  551. BUG_ON(host->card);
  552. mmc_power_off(host);
  553. }
  554. #ifdef CONFIG_PM
  555. /**
  556. * mmc_suspend_host - suspend a host
  557. * @host: mmc host
  558. * @state: suspend mode (PM_SUSPEND_xxx)
  559. */
  560. int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
  561. {
  562. mmc_flush_scheduled_work();
  563. mmc_bus_get(host);
  564. if (host->bus_ops && !host->bus_dead) {
  565. if (host->bus_ops->suspend)
  566. host->bus_ops->suspend(host);
  567. if (!host->bus_ops->resume) {
  568. if (host->bus_ops->remove)
  569. host->bus_ops->remove(host);
  570. mmc_claim_host(host);
  571. mmc_detach_bus(host);
  572. mmc_release_host(host);
  573. }
  574. }
  575. mmc_bus_put(host);
  576. mmc_power_off(host);
  577. return 0;
  578. }
  579. EXPORT_SYMBOL(mmc_suspend_host);
  580. /**
  581. * mmc_resume_host - resume a previously suspended host
  582. * @host: mmc host
  583. */
  584. int mmc_resume_host(struct mmc_host *host)
  585. {
  586. mmc_bus_get(host);
  587. if (host->bus_ops && !host->bus_dead) {
  588. mmc_power_up(host);
  589. BUG_ON(!host->bus_ops->resume);
  590. host->bus_ops->resume(host);
  591. }
  592. mmc_bus_put(host);
  593. /*
  594. * We add a slight delay here so that resume can progress
  595. * in parallel.
  596. */
  597. mmc_detect_change(host, 1);
  598. return 0;
  599. }
  600. EXPORT_SYMBOL(mmc_resume_host);
  601. #endif
  602. static int __init mmc_init(void)
  603. {
  604. int ret;
  605. workqueue = create_singlethread_workqueue("kmmcd");
  606. if (!workqueue)
  607. return -ENOMEM;
  608. ret = mmc_register_bus();
  609. if (ret == 0) {
  610. ret = mmc_register_host_class();
  611. if (ret)
  612. mmc_unregister_bus();
  613. }
  614. return ret;
  615. }
  616. static void __exit mmc_exit(void)
  617. {
  618. mmc_unregister_host_class();
  619. mmc_unregister_bus();
  620. destroy_workqueue(workqueue);
  621. }
  622. module_init(mmc_init);
  623. module_exit(mmc_exit);
  624. MODULE_LICENSE("GPL");