core.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <asm/scatterlist.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/mmc/card.h>
  24. #include <linux/mmc/host.h>
  25. #include <linux/mmc/mmc.h>
  26. #include <linux/mmc/sd.h>
  27. #include "core.h"
  28. #include "bus.h"
  29. #include "host.h"
  30. #include "mmc_ops.h"
  31. #include "sd_ops.h"
  32. extern int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
  33. extern int mmc_attach_sd(struct mmc_host *host, u32 ocr);
  34. static struct workqueue_struct *workqueue;
  35. /*
  36. * Internal function. Schedule delayed work in the MMC work queue.
  37. */
  38. static int mmc_schedule_delayed_work(struct delayed_work *work,
  39. unsigned long delay)
  40. {
  41. return queue_delayed_work(workqueue, work, delay);
  42. }
  43. /*
  44. * Internal function. Flush all scheduled work from the MMC work queue.
  45. */
  46. static void mmc_flush_scheduled_work(void)
  47. {
  48. flush_workqueue(workqueue);
  49. }
  50. /**
  51. * mmc_request_done - finish processing an MMC request
  52. * @host: MMC host which completed request
  53. * @mrq: MMC request which request
  54. *
  55. * MMC drivers should call this function when they have completed
  56. * their processing of a request.
  57. */
  58. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  59. {
  60. struct mmc_command *cmd = mrq->cmd;
  61. int err = cmd->error;
  62. if (err && cmd->retries) {
  63. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  64. mmc_hostname(host), cmd->opcode, err);
  65. cmd->retries--;
  66. cmd->error = 0;
  67. host->ops->request(host, mrq);
  68. } else {
  69. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  70. mmc_hostname(host), cmd->opcode, err,
  71. cmd->resp[0], cmd->resp[1],
  72. cmd->resp[2], cmd->resp[3]);
  73. if (mrq->data) {
  74. pr_debug("%s: %d bytes transferred: %d\n",
  75. mmc_hostname(host),
  76. mrq->data->bytes_xfered, mrq->data->error);
  77. }
  78. if (mrq->stop) {
  79. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  80. mmc_hostname(host), mrq->stop->opcode,
  81. mrq->stop->error,
  82. mrq->stop->resp[0], mrq->stop->resp[1],
  83. mrq->stop->resp[2], mrq->stop->resp[3]);
  84. }
  85. if (mrq->done)
  86. mrq->done(mrq);
  87. }
  88. }
  89. EXPORT_SYMBOL(mmc_request_done);
  90. static void
  91. mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  92. {
  93. #ifdef CONFIG_MMC_DEBUG
  94. unsigned int i, sz;
  95. #endif
  96. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  97. mmc_hostname(host), mrq->cmd->opcode,
  98. mrq->cmd->arg, mrq->cmd->flags);
  99. if (mrq->data) {
  100. pr_debug("%s: blksz %d blocks %d flags %08x "
  101. "tsac %d ms nsac %d\n",
  102. mmc_hostname(host), mrq->data->blksz,
  103. mrq->data->blocks, mrq->data->flags,
  104. mrq->data->timeout_ns / 10000000,
  105. mrq->data->timeout_clks);
  106. }
  107. if (mrq->stop) {
  108. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  109. mmc_hostname(host), mrq->stop->opcode,
  110. mrq->stop->arg, mrq->stop->flags);
  111. }
  112. WARN_ON(!host->claimed);
  113. mrq->cmd->error = 0;
  114. mrq->cmd->mrq = mrq;
  115. if (mrq->data) {
  116. BUG_ON(mrq->data->blksz > host->max_blk_size);
  117. BUG_ON(mrq->data->blocks > host->max_blk_count);
  118. BUG_ON(mrq->data->blocks * mrq->data->blksz >
  119. host->max_req_size);
  120. #ifdef CONFIG_MMC_DEBUG
  121. sz = 0;
  122. for (i = 0;i < mrq->data->sg_len;i++)
  123. sz += mrq->data->sg[i].length;
  124. BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
  125. #endif
  126. mrq->cmd->data = mrq->data;
  127. mrq->data->error = 0;
  128. mrq->data->mrq = mrq;
  129. if (mrq->stop) {
  130. mrq->data->stop = mrq->stop;
  131. mrq->stop->error = 0;
  132. mrq->stop->mrq = mrq;
  133. }
  134. }
  135. host->ops->request(host, mrq);
  136. }
  137. static void mmc_wait_done(struct mmc_request *mrq)
  138. {
  139. complete(mrq->done_data);
  140. }
  141. /**
  142. * mmc_wait_for_req - start a request and wait for completion
  143. * @host: MMC host to start command
  144. * @mrq: MMC request to start
  145. *
  146. * Start a new MMC custom command request for a host, and wait
  147. * for the command to complete. Does not attempt to parse the
  148. * response.
  149. */
  150. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  151. {
  152. DECLARE_COMPLETION_ONSTACK(complete);
  153. mrq->done_data = &complete;
  154. mrq->done = mmc_wait_done;
  155. mmc_start_request(host, mrq);
  156. wait_for_completion(&complete);
  157. }
  158. EXPORT_SYMBOL(mmc_wait_for_req);
  159. /**
  160. * mmc_wait_for_cmd - start a command and wait for completion
  161. * @host: MMC host to start command
  162. * @cmd: MMC command to start
  163. * @retries: maximum number of retries
  164. *
  165. * Start a new MMC command for a host, and wait for the command
  166. * to complete. Return any error that occurred while the command
  167. * was executing. Do not attempt to parse the response.
  168. */
  169. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  170. {
  171. struct mmc_request mrq;
  172. BUG_ON(!host->claimed);
  173. memset(&mrq, 0, sizeof(struct mmc_request));
  174. memset(cmd->resp, 0, sizeof(cmd->resp));
  175. cmd->retries = retries;
  176. mrq.cmd = cmd;
  177. cmd->data = NULL;
  178. mmc_wait_for_req(host, &mrq);
  179. return cmd->error;
  180. }
  181. EXPORT_SYMBOL(mmc_wait_for_cmd);
  182. /**
  183. * mmc_set_data_timeout - set the timeout for a data command
  184. * @data: data phase for command
  185. * @card: the MMC card associated with the data transfer
  186. * @write: flag to differentiate reads from writes
  187. *
  188. * Computes the data timeout parameters according to the
  189. * correct algorithm given the card type.
  190. */
  191. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
  192. int write)
  193. {
  194. unsigned int mult;
  195. /*
  196. * SD cards use a 100 multiplier rather than 10
  197. */
  198. mult = mmc_card_sd(card) ? 100 : 10;
  199. /*
  200. * Scale up the multiplier (and therefore the timeout) by
  201. * the r2w factor for writes.
  202. */
  203. if (write)
  204. mult <<= card->csd.r2w_factor;
  205. data->timeout_ns = card->csd.tacc_ns * mult;
  206. data->timeout_clks = card->csd.tacc_clks * mult;
  207. /*
  208. * SD cards also have an upper limit on the timeout.
  209. */
  210. if (mmc_card_sd(card)) {
  211. unsigned int timeout_us, limit_us;
  212. timeout_us = data->timeout_ns / 1000;
  213. timeout_us += data->timeout_clks * 1000 /
  214. (card->host->ios.clock / 1000);
  215. if (write)
  216. limit_us = 250000;
  217. else
  218. limit_us = 100000;
  219. /*
  220. * SDHC cards always use these fixed values.
  221. */
  222. if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
  223. data->timeout_ns = limit_us * 1000;
  224. data->timeout_clks = 0;
  225. }
  226. }
  227. }
  228. EXPORT_SYMBOL(mmc_set_data_timeout);
  229. /**
  230. * mmc_claim_host - exclusively claim a host
  231. * @host: mmc host to claim
  232. *
  233. * Claim a host for a set of operations.
  234. */
  235. void mmc_claim_host(struct mmc_host *host)
  236. {
  237. DECLARE_WAITQUEUE(wait, current);
  238. unsigned long flags;
  239. might_sleep();
  240. add_wait_queue(&host->wq, &wait);
  241. spin_lock_irqsave(&host->lock, flags);
  242. while (1) {
  243. set_current_state(TASK_UNINTERRUPTIBLE);
  244. if (!host->claimed)
  245. break;
  246. spin_unlock_irqrestore(&host->lock, flags);
  247. schedule();
  248. spin_lock_irqsave(&host->lock, flags);
  249. }
  250. set_current_state(TASK_RUNNING);
  251. host->claimed = 1;
  252. spin_unlock_irqrestore(&host->lock, flags);
  253. remove_wait_queue(&host->wq, &wait);
  254. }
  255. EXPORT_SYMBOL(mmc_claim_host);
  256. /**
  257. * mmc_release_host - release a host
  258. * @host: mmc host to release
  259. *
  260. * Release a MMC host, allowing others to claim the host
  261. * for their operations.
  262. */
  263. void mmc_release_host(struct mmc_host *host)
  264. {
  265. unsigned long flags;
  266. BUG_ON(!host->claimed);
  267. spin_lock_irqsave(&host->lock, flags);
  268. host->claimed = 0;
  269. spin_unlock_irqrestore(&host->lock, flags);
  270. wake_up(&host->wq);
  271. }
  272. EXPORT_SYMBOL(mmc_release_host);
  273. /*
  274. * Internal function that does the actual ios call to the host driver,
  275. * optionally printing some debug output.
  276. */
  277. static inline void mmc_set_ios(struct mmc_host *host)
  278. {
  279. struct mmc_ios *ios = &host->ios;
  280. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  281. "width %u timing %u\n",
  282. mmc_hostname(host), ios->clock, ios->bus_mode,
  283. ios->power_mode, ios->chip_select, ios->vdd,
  284. ios->bus_width, ios->timing);
  285. host->ops->set_ios(host, ios);
  286. }
  287. /*
  288. * Control chip select pin on a host.
  289. */
  290. void mmc_set_chip_select(struct mmc_host *host, int mode)
  291. {
  292. host->ios.chip_select = mode;
  293. mmc_set_ios(host);
  294. }
  295. /*
  296. * Sets the host clock to the highest possible frequency that
  297. * is below "hz".
  298. */
  299. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  300. {
  301. WARN_ON(hz < host->f_min);
  302. if (hz > host->f_max)
  303. hz = host->f_max;
  304. host->ios.clock = hz;
  305. mmc_set_ios(host);
  306. }
  307. /*
  308. * Change the bus mode (open drain/push-pull) of a host.
  309. */
  310. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  311. {
  312. host->ios.bus_mode = mode;
  313. mmc_set_ios(host);
  314. }
  315. /*
  316. * Change data bus width of a host.
  317. */
  318. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  319. {
  320. host->ios.bus_width = width;
  321. mmc_set_ios(host);
  322. }
  323. /*
  324. * Mask off any voltages we don't support and select
  325. * the lowest voltage
  326. */
  327. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  328. {
  329. int bit;
  330. ocr &= host->ocr_avail;
  331. bit = ffs(ocr);
  332. if (bit) {
  333. bit -= 1;
  334. ocr &= 3 << bit;
  335. host->ios.vdd = bit;
  336. mmc_set_ios(host);
  337. } else {
  338. ocr = 0;
  339. }
  340. return ocr;
  341. }
  342. /*
  343. * Select timing parameters for host.
  344. */
  345. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  346. {
  347. host->ios.timing = timing;
  348. mmc_set_ios(host);
  349. }
  350. /*
  351. * Apply power to the MMC stack. This is a two-stage process.
  352. * First, we enable power to the card without the clock running.
  353. * We then wait a bit for the power to stabilise. Finally,
  354. * enable the bus drivers and clock to the card.
  355. *
  356. * We must _NOT_ enable the clock prior to power stablising.
  357. *
  358. * If a host does all the power sequencing itself, ignore the
  359. * initial MMC_POWER_UP stage.
  360. */
  361. static void mmc_power_up(struct mmc_host *host)
  362. {
  363. int bit = fls(host->ocr_avail) - 1;
  364. host->ios.vdd = bit;
  365. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  366. host->ios.chip_select = MMC_CS_DONTCARE;
  367. host->ios.power_mode = MMC_POWER_UP;
  368. host->ios.bus_width = MMC_BUS_WIDTH_1;
  369. host->ios.timing = MMC_TIMING_LEGACY;
  370. mmc_set_ios(host);
  371. mmc_delay(1);
  372. host->ios.clock = host->f_min;
  373. host->ios.power_mode = MMC_POWER_ON;
  374. mmc_set_ios(host);
  375. mmc_delay(2);
  376. }
  377. static void mmc_power_off(struct mmc_host *host)
  378. {
  379. host->ios.clock = 0;
  380. host->ios.vdd = 0;
  381. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  382. host->ios.chip_select = MMC_CS_DONTCARE;
  383. host->ios.power_mode = MMC_POWER_OFF;
  384. host->ios.bus_width = MMC_BUS_WIDTH_1;
  385. host->ios.timing = MMC_TIMING_LEGACY;
  386. mmc_set_ios(host);
  387. }
  388. /*
  389. * Cleanup when the last reference to the bus operator is dropped.
  390. */
  391. void __mmc_release_bus(struct mmc_host *host)
  392. {
  393. BUG_ON(!host);
  394. BUG_ON(host->bus_refs);
  395. BUG_ON(!host->bus_dead);
  396. host->bus_ops = NULL;
  397. }
  398. /*
  399. * Increase reference count of bus operator
  400. */
  401. static inline void mmc_bus_get(struct mmc_host *host)
  402. {
  403. unsigned long flags;
  404. spin_lock_irqsave(&host->lock, flags);
  405. host->bus_refs++;
  406. spin_unlock_irqrestore(&host->lock, flags);
  407. }
  408. /*
  409. * Decrease reference count of bus operator and free it if
  410. * it is the last reference.
  411. */
  412. static inline void mmc_bus_put(struct mmc_host *host)
  413. {
  414. unsigned long flags;
  415. spin_lock_irqsave(&host->lock, flags);
  416. host->bus_refs--;
  417. if ((host->bus_refs == 0) && host->bus_ops)
  418. __mmc_release_bus(host);
  419. spin_unlock_irqrestore(&host->lock, flags);
  420. }
  421. /*
  422. * Assign a mmc bus handler to a host. Only one bus handler may control a
  423. * host at any given time.
  424. */
  425. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  426. {
  427. unsigned long flags;
  428. BUG_ON(!host);
  429. BUG_ON(!ops);
  430. BUG_ON(!host->claimed);
  431. spin_lock_irqsave(&host->lock, flags);
  432. BUG_ON(host->bus_ops);
  433. BUG_ON(host->bus_refs);
  434. host->bus_ops = ops;
  435. host->bus_refs = 1;
  436. host->bus_dead = 0;
  437. spin_unlock_irqrestore(&host->lock, flags);
  438. }
  439. /*
  440. * Remove the current bus handler from a host. Assumes that there are
  441. * no interesting cards left, so the bus is powered down.
  442. */
  443. void mmc_detach_bus(struct mmc_host *host)
  444. {
  445. unsigned long flags;
  446. BUG_ON(!host);
  447. BUG_ON(!host->claimed);
  448. BUG_ON(!host->bus_ops);
  449. spin_lock_irqsave(&host->lock, flags);
  450. host->bus_dead = 1;
  451. spin_unlock_irqrestore(&host->lock, flags);
  452. mmc_power_off(host);
  453. mmc_bus_put(host);
  454. }
  455. /**
  456. * mmc_detect_change - process change of state on a MMC socket
  457. * @host: host which changed state.
  458. * @delay: optional delay to wait before detection (jiffies)
  459. *
  460. * MMC drivers should call this when they detect a card has been
  461. * inserted or removed. The MMC layer will confirm that any
  462. * present card is still functional, and initialize any newly
  463. * inserted.
  464. */
  465. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  466. {
  467. #ifdef CONFIG_MMC_DEBUG
  468. unsigned long flags;
  469. spin_lock_irqsave(&host->lock, flags);
  470. BUG_ON(host->removed);
  471. spin_unlock_irqrestore(&host->lock, flags);
  472. #endif
  473. mmc_schedule_delayed_work(&host->detect, delay);
  474. }
  475. EXPORT_SYMBOL(mmc_detect_change);
  476. void mmc_rescan(struct work_struct *work)
  477. {
  478. struct mmc_host *host =
  479. container_of(work, struct mmc_host, detect.work);
  480. u32 ocr;
  481. int err;
  482. mmc_bus_get(host);
  483. if (host->bus_ops == NULL) {
  484. /*
  485. * Only we can add a new handler, so it's safe to
  486. * release the lock here.
  487. */
  488. mmc_bus_put(host);
  489. mmc_claim_host(host);
  490. mmc_power_up(host);
  491. mmc_go_idle(host);
  492. mmc_send_if_cond(host, host->ocr_avail);
  493. err = mmc_send_app_op_cond(host, 0, &ocr);
  494. if (!err) {
  495. if (mmc_attach_sd(host, ocr))
  496. mmc_power_off(host);
  497. } else {
  498. /*
  499. * If we fail to detect any SD cards then try
  500. * searching for MMC cards.
  501. */
  502. err = mmc_send_op_cond(host, 0, &ocr);
  503. if (!err) {
  504. if (mmc_attach_mmc(host, ocr))
  505. mmc_power_off(host);
  506. } else {
  507. mmc_power_off(host);
  508. mmc_release_host(host);
  509. }
  510. }
  511. } else {
  512. if (host->bus_ops->detect && !host->bus_dead)
  513. host->bus_ops->detect(host);
  514. mmc_bus_put(host);
  515. }
  516. }
  517. void mmc_start_host(struct mmc_host *host)
  518. {
  519. mmc_power_off(host);
  520. mmc_detect_change(host, 0);
  521. }
  522. void mmc_stop_host(struct mmc_host *host)
  523. {
  524. #ifdef CONFIG_MMC_DEBUG
  525. unsigned long flags;
  526. spin_lock_irqsave(&host->lock, flags);
  527. host->removed = 1;
  528. spin_unlock_irqrestore(&host->lock, flags);
  529. #endif
  530. mmc_flush_scheduled_work();
  531. mmc_bus_get(host);
  532. if (host->bus_ops && !host->bus_dead) {
  533. if (host->bus_ops->remove)
  534. host->bus_ops->remove(host);
  535. mmc_claim_host(host);
  536. mmc_detach_bus(host);
  537. mmc_release_host(host);
  538. }
  539. mmc_bus_put(host);
  540. BUG_ON(host->card);
  541. mmc_power_off(host);
  542. }
  543. #ifdef CONFIG_PM
  544. /**
  545. * mmc_suspend_host - suspend a host
  546. * @host: mmc host
  547. * @state: suspend mode (PM_SUSPEND_xxx)
  548. */
  549. int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
  550. {
  551. mmc_flush_scheduled_work();
  552. mmc_bus_get(host);
  553. if (host->bus_ops && !host->bus_dead) {
  554. if (host->bus_ops->suspend)
  555. host->bus_ops->suspend(host);
  556. if (!host->bus_ops->resume) {
  557. if (host->bus_ops->remove)
  558. host->bus_ops->remove(host);
  559. mmc_claim_host(host);
  560. mmc_detach_bus(host);
  561. mmc_release_host(host);
  562. }
  563. }
  564. mmc_bus_put(host);
  565. mmc_power_off(host);
  566. return 0;
  567. }
  568. EXPORT_SYMBOL(mmc_suspend_host);
  569. /**
  570. * mmc_resume_host - resume a previously suspended host
  571. * @host: mmc host
  572. */
  573. int mmc_resume_host(struct mmc_host *host)
  574. {
  575. mmc_bus_get(host);
  576. if (host->bus_ops && !host->bus_dead) {
  577. mmc_power_up(host);
  578. BUG_ON(!host->bus_ops->resume);
  579. host->bus_ops->resume(host);
  580. }
  581. mmc_bus_put(host);
  582. /*
  583. * We add a slight delay here so that resume can progress
  584. * in parallel.
  585. */
  586. mmc_detect_change(host, 1);
  587. return 0;
  588. }
  589. EXPORT_SYMBOL(mmc_resume_host);
  590. #endif
  591. static int __init mmc_init(void)
  592. {
  593. int ret;
  594. workqueue = create_singlethread_workqueue("kmmcd");
  595. if (!workqueue)
  596. return -ENOMEM;
  597. ret = mmc_register_bus();
  598. if (ret == 0) {
  599. ret = mmc_register_host_class();
  600. if (ret)
  601. mmc_unregister_bus();
  602. }
  603. return ret;
  604. }
  605. static void __exit mmc_exit(void)
  606. {
  607. mmc_unregister_host_class();
  608. mmc_unregister_bus();
  609. destroy_workqueue(workqueue);
  610. }
  611. module_init(mmc_init);
  612. module_exit(mmc_exit);
  613. MODULE_LICENSE("GPL");