core.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /*
  2. * linux/drivers/mmc/core/core.c
  3. *
  4. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  5. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  6. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
  7. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/completion.h>
  17. #include <linux/device.h>
  18. #include <linux/delay.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/err.h>
  21. #include <linux/leds.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/log2.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/mmc/card.h>
  26. #include <linux/mmc/host.h>
  27. #include <linux/mmc/mmc.h>
  28. #include <linux/mmc/sd.h>
  29. #include "core.h"
  30. #include "bus.h"
  31. #include "host.h"
  32. #include "sdio_bus.h"
  33. #include "mmc_ops.h"
  34. #include "sd_ops.h"
  35. #include "sdio_ops.h"
  36. static struct workqueue_struct *workqueue;
  37. /*
  38. * Enabling software CRCs on the data blocks can be a significant (30%)
  39. * performance cost, and for other reasons may not always be desired.
  40. * So we allow it it to be disabled.
  41. */
  42. int use_spi_crc = 1;
  43. module_param(use_spi_crc, bool, 0);
  44. /*
  45. * Internal function. Schedule delayed work in the MMC work queue.
  46. */
  47. static int mmc_schedule_delayed_work(struct delayed_work *work,
  48. unsigned long delay)
  49. {
  50. return queue_delayed_work(workqueue, work, delay);
  51. }
  52. /*
  53. * Internal function. Flush all scheduled work from the MMC work queue.
  54. */
  55. static void mmc_flush_scheduled_work(void)
  56. {
  57. flush_workqueue(workqueue);
  58. }
  59. /**
  60. * mmc_request_done - finish processing an MMC request
  61. * @host: MMC host which completed request
  62. * @mrq: MMC request which request
  63. *
  64. * MMC drivers should call this function when they have completed
  65. * their processing of a request.
  66. */
  67. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  68. {
  69. struct mmc_command *cmd = mrq->cmd;
  70. int err = cmd->error;
  71. if (err && cmd->retries && mmc_host_is_spi(host)) {
  72. if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
  73. cmd->retries = 0;
  74. }
  75. if (err && cmd->retries) {
  76. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  77. mmc_hostname(host), cmd->opcode, err);
  78. cmd->retries--;
  79. cmd->error = 0;
  80. host->ops->request(host, mrq);
  81. } else {
  82. led_trigger_event(host->led, LED_OFF);
  83. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  84. mmc_hostname(host), cmd->opcode, err,
  85. cmd->resp[0], cmd->resp[1],
  86. cmd->resp[2], cmd->resp[3]);
  87. if (mrq->data) {
  88. pr_debug("%s: %d bytes transferred: %d\n",
  89. mmc_hostname(host),
  90. mrq->data->bytes_xfered, mrq->data->error);
  91. }
  92. if (mrq->stop) {
  93. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  94. mmc_hostname(host), mrq->stop->opcode,
  95. mrq->stop->error,
  96. mrq->stop->resp[0], mrq->stop->resp[1],
  97. mrq->stop->resp[2], mrq->stop->resp[3]);
  98. }
  99. if (mrq->done)
  100. mrq->done(mrq);
  101. }
  102. }
  103. EXPORT_SYMBOL(mmc_request_done);
  104. static void
  105. mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  106. {
  107. #ifdef CONFIG_MMC_DEBUG
  108. unsigned int i, sz;
  109. struct scatterlist *sg;
  110. #endif
  111. pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
  112. mmc_hostname(host), mrq->cmd->opcode,
  113. mrq->cmd->arg, mrq->cmd->flags);
  114. if (mrq->data) {
  115. pr_debug("%s: blksz %d blocks %d flags %08x "
  116. "tsac %d ms nsac %d\n",
  117. mmc_hostname(host), mrq->data->blksz,
  118. mrq->data->blocks, mrq->data->flags,
  119. mrq->data->timeout_ns / 1000000,
  120. mrq->data->timeout_clks);
  121. }
  122. if (mrq->stop) {
  123. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  124. mmc_hostname(host), mrq->stop->opcode,
  125. mrq->stop->arg, mrq->stop->flags);
  126. }
  127. WARN_ON(!host->claimed);
  128. led_trigger_event(host->led, LED_FULL);
  129. mrq->cmd->error = 0;
  130. mrq->cmd->mrq = mrq;
  131. if (mrq->data) {
  132. BUG_ON(mrq->data->blksz > host->max_blk_size);
  133. BUG_ON(mrq->data->blocks > host->max_blk_count);
  134. BUG_ON(mrq->data->blocks * mrq->data->blksz >
  135. host->max_req_size);
  136. #ifdef CONFIG_MMC_DEBUG
  137. sz = 0;
  138. for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
  139. sz += sg->length;
  140. BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
  141. #endif
  142. mrq->cmd->data = mrq->data;
  143. mrq->data->error = 0;
  144. mrq->data->mrq = mrq;
  145. if (mrq->stop) {
  146. mrq->data->stop = mrq->stop;
  147. mrq->stop->error = 0;
  148. mrq->stop->mrq = mrq;
  149. }
  150. }
  151. host->ops->request(host, mrq);
  152. }
  153. static void mmc_wait_done(struct mmc_request *mrq)
  154. {
  155. complete(mrq->done_data);
  156. }
  157. /**
  158. * mmc_wait_for_req - start a request and wait for completion
  159. * @host: MMC host to start command
  160. * @mrq: MMC request to start
  161. *
  162. * Start a new MMC custom command request for a host, and wait
  163. * for the command to complete. Does not attempt to parse the
  164. * response.
  165. */
  166. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  167. {
  168. DECLARE_COMPLETION_ONSTACK(complete);
  169. mrq->done_data = &complete;
  170. mrq->done = mmc_wait_done;
  171. mmc_start_request(host, mrq);
  172. wait_for_completion(&complete);
  173. }
  174. EXPORT_SYMBOL(mmc_wait_for_req);
  175. /**
  176. * mmc_wait_for_cmd - start a command and wait for completion
  177. * @host: MMC host to start command
  178. * @cmd: MMC command to start
  179. * @retries: maximum number of retries
  180. *
  181. * Start a new MMC command for a host, and wait for the command
  182. * to complete. Return any error that occurred while the command
  183. * was executing. Do not attempt to parse the response.
  184. */
  185. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  186. {
  187. struct mmc_request mrq;
  188. WARN_ON(!host->claimed);
  189. memset(&mrq, 0, sizeof(struct mmc_request));
  190. memset(cmd->resp, 0, sizeof(cmd->resp));
  191. cmd->retries = retries;
  192. mrq.cmd = cmd;
  193. cmd->data = NULL;
  194. mmc_wait_for_req(host, &mrq);
  195. return cmd->error;
  196. }
  197. EXPORT_SYMBOL(mmc_wait_for_cmd);
  198. /**
  199. * mmc_set_data_timeout - set the timeout for a data command
  200. * @data: data phase for command
  201. * @card: the MMC card associated with the data transfer
  202. *
  203. * Computes the data timeout parameters according to the
  204. * correct algorithm given the card type.
  205. */
  206. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  207. {
  208. unsigned int mult;
  209. /*
  210. * SDIO cards only define an upper 1 s limit on access.
  211. */
  212. if (mmc_card_sdio(card)) {
  213. data->timeout_ns = 1000000000;
  214. data->timeout_clks = 0;
  215. return;
  216. }
  217. /*
  218. * SD cards use a 100 multiplier rather than 10
  219. */
  220. mult = mmc_card_sd(card) ? 100 : 10;
  221. /*
  222. * Scale up the multiplier (and therefore the timeout) by
  223. * the r2w factor for writes.
  224. */
  225. if (data->flags & MMC_DATA_WRITE)
  226. mult <<= card->csd.r2w_factor;
  227. data->timeout_ns = card->csd.tacc_ns * mult;
  228. data->timeout_clks = card->csd.tacc_clks * mult;
  229. /*
  230. * SD cards also have an upper limit on the timeout.
  231. */
  232. if (mmc_card_sd(card)) {
  233. unsigned int timeout_us, limit_us;
  234. timeout_us = data->timeout_ns / 1000;
  235. timeout_us += data->timeout_clks * 1000 /
  236. (card->host->ios.clock / 1000);
  237. if (data->flags & MMC_DATA_WRITE)
  238. /*
  239. * The limit is really 250 ms, but that is
  240. * insufficient for some crappy cards.
  241. */
  242. limit_us = 300000;
  243. else
  244. limit_us = 100000;
  245. /*
  246. * SDHC cards always use these fixed values.
  247. */
  248. if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
  249. data->timeout_ns = limit_us * 1000;
  250. data->timeout_clks = 0;
  251. }
  252. }
  253. /*
  254. * Some cards need very high timeouts if driven in SPI mode.
  255. * The worst observed timeout was 900ms after writing a
  256. * continuous stream of data until the internal logic
  257. * overflowed.
  258. */
  259. if (mmc_host_is_spi(card->host)) {
  260. if (data->flags & MMC_DATA_WRITE) {
  261. if (data->timeout_ns < 1000000000)
  262. data->timeout_ns = 1000000000; /* 1s */
  263. } else {
  264. if (data->timeout_ns < 100000000)
  265. data->timeout_ns = 100000000; /* 100ms */
  266. }
  267. }
  268. }
  269. EXPORT_SYMBOL(mmc_set_data_timeout);
  270. /**
  271. * mmc_align_data_size - pads a transfer size to a more optimal value
  272. * @card: the MMC card associated with the data transfer
  273. * @sz: original transfer size
  274. *
  275. * Pads the original data size with a number of extra bytes in
  276. * order to avoid controller bugs and/or performance hits
  277. * (e.g. some controllers revert to PIO for certain sizes).
  278. *
  279. * Returns the improved size, which might be unmodified.
  280. *
  281. * Note that this function is only relevant when issuing a
  282. * single scatter gather entry.
  283. */
  284. unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
  285. {
  286. /*
  287. * FIXME: We don't have a system for the controller to tell
  288. * the core about its problems yet, so for now we just 32-bit
  289. * align the size.
  290. */
  291. sz = ((sz + 3) / 4) * 4;
  292. return sz;
  293. }
  294. EXPORT_SYMBOL(mmc_align_data_size);
  295. /**
  296. * mmc_host_enable - enable a host.
  297. * @host: mmc host to enable
  298. *
  299. * Hosts that support power saving can use the 'enable' and 'disable'
  300. * methods to exit and enter power saving states. For more information
  301. * see comments for struct mmc_host_ops.
  302. */
  303. int mmc_host_enable(struct mmc_host *host)
  304. {
  305. if (!(host->caps & MMC_CAP_DISABLE))
  306. return 0;
  307. if (host->en_dis_recurs)
  308. return 0;
  309. if (host->nesting_cnt++)
  310. return 0;
  311. cancel_delayed_work_sync(&host->disable);
  312. if (host->enabled)
  313. return 0;
  314. if (host->ops->enable) {
  315. int err;
  316. host->en_dis_recurs = 1;
  317. err = host->ops->enable(host);
  318. host->en_dis_recurs = 0;
  319. if (err) {
  320. pr_debug("%s: enable error %d\n",
  321. mmc_hostname(host), err);
  322. return err;
  323. }
  324. }
  325. host->enabled = 1;
  326. return 0;
  327. }
  328. EXPORT_SYMBOL(mmc_host_enable);
  329. static int mmc_host_do_disable(struct mmc_host *host, int lazy)
  330. {
  331. if (host->ops->disable) {
  332. int err;
  333. host->en_dis_recurs = 1;
  334. err = host->ops->disable(host, lazy);
  335. host->en_dis_recurs = 0;
  336. if (err < 0) {
  337. pr_debug("%s: disable error %d\n",
  338. mmc_hostname(host), err);
  339. return err;
  340. }
  341. if (err > 0) {
  342. unsigned long delay = msecs_to_jiffies(err);
  343. mmc_schedule_delayed_work(&host->disable, delay);
  344. }
  345. }
  346. host->enabled = 0;
  347. return 0;
  348. }
  349. /**
  350. * mmc_host_disable - disable a host.
  351. * @host: mmc host to disable
  352. *
  353. * Hosts that support power saving can use the 'enable' and 'disable'
  354. * methods to exit and enter power saving states. For more information
  355. * see comments for struct mmc_host_ops.
  356. */
  357. int mmc_host_disable(struct mmc_host *host)
  358. {
  359. int err;
  360. if (!(host->caps & MMC_CAP_DISABLE))
  361. return 0;
  362. if (host->en_dis_recurs)
  363. return 0;
  364. if (--host->nesting_cnt)
  365. return 0;
  366. if (!host->enabled)
  367. return 0;
  368. err = mmc_host_do_disable(host, 0);
  369. return err;
  370. }
  371. EXPORT_SYMBOL(mmc_host_disable);
  372. /**
  373. * __mmc_claim_host - exclusively claim a host
  374. * @host: mmc host to claim
  375. * @abort: whether or not the operation should be aborted
  376. *
  377. * Claim a host for a set of operations. If @abort is non null and
  378. * dereference a non-zero value then this will return prematurely with
  379. * that non-zero value without acquiring the lock. Returns zero
  380. * with the lock held otherwise.
  381. */
  382. int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
  383. {
  384. DECLARE_WAITQUEUE(wait, current);
  385. unsigned long flags;
  386. int stop;
  387. might_sleep();
  388. add_wait_queue(&host->wq, &wait);
  389. spin_lock_irqsave(&host->lock, flags);
  390. while (1) {
  391. set_current_state(TASK_UNINTERRUPTIBLE);
  392. stop = abort ? atomic_read(abort) : 0;
  393. if (stop || !host->claimed || host->claimer == current)
  394. break;
  395. spin_unlock_irqrestore(&host->lock, flags);
  396. schedule();
  397. spin_lock_irqsave(&host->lock, flags);
  398. }
  399. set_current_state(TASK_RUNNING);
  400. if (!stop) {
  401. host->claimed = 1;
  402. host->claimer = current;
  403. host->claim_cnt += 1;
  404. } else
  405. wake_up(&host->wq);
  406. spin_unlock_irqrestore(&host->lock, flags);
  407. remove_wait_queue(&host->wq, &wait);
  408. if (!stop)
  409. mmc_host_enable(host);
  410. return stop;
  411. }
  412. EXPORT_SYMBOL(__mmc_claim_host);
  413. /**
  414. * mmc_try_claim_host - try exclusively to claim a host
  415. * @host: mmc host to claim
  416. *
  417. * Returns %1 if the host is claimed, %0 otherwise.
  418. */
  419. int mmc_try_claim_host(struct mmc_host *host)
  420. {
  421. int claimed_host = 0;
  422. unsigned long flags;
  423. spin_lock_irqsave(&host->lock, flags);
  424. if (!host->claimed || host->claimer == current) {
  425. host->claimed = 1;
  426. host->claimer = current;
  427. host->claim_cnt += 1;
  428. claimed_host = 1;
  429. }
  430. spin_unlock_irqrestore(&host->lock, flags);
  431. return claimed_host;
  432. }
  433. EXPORT_SYMBOL(mmc_try_claim_host);
  434. static void mmc_do_release_host(struct mmc_host *host)
  435. {
  436. unsigned long flags;
  437. spin_lock_irqsave(&host->lock, flags);
  438. if (--host->claim_cnt) {
  439. /* Release for nested claim */
  440. spin_unlock_irqrestore(&host->lock, flags);
  441. } else {
  442. host->claimed = 0;
  443. host->claimer = NULL;
  444. spin_unlock_irqrestore(&host->lock, flags);
  445. wake_up(&host->wq);
  446. }
  447. }
  448. void mmc_host_deeper_disable(struct work_struct *work)
  449. {
  450. struct mmc_host *host =
  451. container_of(work, struct mmc_host, disable.work);
  452. /* If the host is claimed then we do not want to disable it anymore */
  453. if (!mmc_try_claim_host(host))
  454. return;
  455. mmc_host_do_disable(host, 1);
  456. mmc_do_release_host(host);
  457. }
  458. /**
  459. * mmc_host_lazy_disable - lazily disable a host.
  460. * @host: mmc host to disable
  461. *
  462. * Hosts that support power saving can use the 'enable' and 'disable'
  463. * methods to exit and enter power saving states. For more information
  464. * see comments for struct mmc_host_ops.
  465. */
  466. int mmc_host_lazy_disable(struct mmc_host *host)
  467. {
  468. if (!(host->caps & MMC_CAP_DISABLE))
  469. return 0;
  470. if (host->en_dis_recurs)
  471. return 0;
  472. if (--host->nesting_cnt)
  473. return 0;
  474. if (!host->enabled)
  475. return 0;
  476. if (host->disable_delay) {
  477. mmc_schedule_delayed_work(&host->disable,
  478. msecs_to_jiffies(host->disable_delay));
  479. return 0;
  480. } else
  481. return mmc_host_do_disable(host, 1);
  482. }
  483. EXPORT_SYMBOL(mmc_host_lazy_disable);
  484. /**
  485. * mmc_release_host - release a host
  486. * @host: mmc host to release
  487. *
  488. * Release a MMC host, allowing others to claim the host
  489. * for their operations.
  490. */
  491. void mmc_release_host(struct mmc_host *host)
  492. {
  493. WARN_ON(!host->claimed);
  494. mmc_host_lazy_disable(host);
  495. mmc_do_release_host(host);
  496. }
  497. EXPORT_SYMBOL(mmc_release_host);
  498. /*
  499. * Internal function that does the actual ios call to the host driver,
  500. * optionally printing some debug output.
  501. */
  502. static inline void mmc_set_ios(struct mmc_host *host)
  503. {
  504. struct mmc_ios *ios = &host->ios;
  505. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  506. "width %u timing %u\n",
  507. mmc_hostname(host), ios->clock, ios->bus_mode,
  508. ios->power_mode, ios->chip_select, ios->vdd,
  509. ios->bus_width, ios->timing);
  510. host->ops->set_ios(host, ios);
  511. }
  512. /*
  513. * Control chip select pin on a host.
  514. */
  515. void mmc_set_chip_select(struct mmc_host *host, int mode)
  516. {
  517. host->ios.chip_select = mode;
  518. mmc_set_ios(host);
  519. }
  520. /*
  521. * Sets the host clock to the highest possible frequency that
  522. * is below "hz".
  523. */
  524. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  525. {
  526. WARN_ON(hz < host->f_min);
  527. if (hz > host->f_max)
  528. hz = host->f_max;
  529. host->ios.clock = hz;
  530. mmc_set_ios(host);
  531. }
  532. /*
  533. * Change the bus mode (open drain/push-pull) of a host.
  534. */
  535. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  536. {
  537. host->ios.bus_mode = mode;
  538. mmc_set_ios(host);
  539. }
  540. /*
  541. * Change data bus width of a host.
  542. */
  543. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  544. {
  545. host->ios.bus_width = width;
  546. mmc_set_ios(host);
  547. }
  548. /**
  549. * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
  550. * @vdd: voltage (mV)
  551. * @low_bits: prefer low bits in boundary cases
  552. *
  553. * This function returns the OCR bit number according to the provided @vdd
  554. * value. If conversion is not possible a negative errno value returned.
  555. *
  556. * Depending on the @low_bits flag the function prefers low or high OCR bits
  557. * on boundary voltages. For example,
  558. * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
  559. * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
  560. *
  561. * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
  562. */
  563. static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
  564. {
  565. const int max_bit = ilog2(MMC_VDD_35_36);
  566. int bit;
  567. if (vdd < 1650 || vdd > 3600)
  568. return -EINVAL;
  569. if (vdd >= 1650 && vdd <= 1950)
  570. return ilog2(MMC_VDD_165_195);
  571. if (low_bits)
  572. vdd -= 1;
  573. /* Base 2000 mV, step 100 mV, bit's base 8. */
  574. bit = (vdd - 2000) / 100 + 8;
  575. if (bit > max_bit)
  576. return max_bit;
  577. return bit;
  578. }
  579. /**
  580. * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
  581. * @vdd_min: minimum voltage value (mV)
  582. * @vdd_max: maximum voltage value (mV)
  583. *
  584. * This function returns the OCR mask bits according to the provided @vdd_min
  585. * and @vdd_max values. If conversion is not possible the function returns 0.
  586. *
  587. * Notes wrt boundary cases:
  588. * This function sets the OCR bits for all boundary voltages, for example
  589. * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
  590. * MMC_VDD_34_35 mask.
  591. */
  592. u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
  593. {
  594. u32 mask = 0;
  595. if (vdd_max < vdd_min)
  596. return 0;
  597. /* Prefer high bits for the boundary vdd_max values. */
  598. vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
  599. if (vdd_max < 0)
  600. return 0;
  601. /* Prefer low bits for the boundary vdd_min values. */
  602. vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
  603. if (vdd_min < 0)
  604. return 0;
  605. /* Fill the mask, from max bit to min bit. */
  606. while (vdd_max >= vdd_min)
  607. mask |= 1 << vdd_max--;
  608. return mask;
  609. }
  610. EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
  611. #ifdef CONFIG_REGULATOR
  612. /**
  613. * mmc_regulator_get_ocrmask - return mask of supported voltages
  614. * @supply: regulator to use
  615. *
  616. * This returns either a negative errno, or a mask of voltages that
  617. * can be provided to MMC/SD/SDIO devices using the specified voltage
  618. * regulator. This would normally be called before registering the
  619. * MMC host adapter.
  620. */
  621. int mmc_regulator_get_ocrmask(struct regulator *supply)
  622. {
  623. int result = 0;
  624. int count;
  625. int i;
  626. count = regulator_count_voltages(supply);
  627. if (count < 0)
  628. return count;
  629. for (i = 0; i < count; i++) {
  630. int vdd_uV;
  631. int vdd_mV;
  632. vdd_uV = regulator_list_voltage(supply, i);
  633. if (vdd_uV <= 0)
  634. continue;
  635. vdd_mV = vdd_uV / 1000;
  636. result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
  637. }
  638. return result;
  639. }
  640. EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
  641. /**
  642. * mmc_regulator_set_ocr - set regulator to match host->ios voltage
  643. * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
  644. * @supply: regulator to use
  645. *
  646. * Returns zero on success, else negative errno.
  647. *
  648. * MMC host drivers may use this to enable or disable a regulator using
  649. * a particular supply voltage. This would normally be called from the
  650. * set_ios() method.
  651. */
  652. int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit)
  653. {
  654. int result = 0;
  655. int min_uV, max_uV;
  656. int enabled;
  657. enabled = regulator_is_enabled(supply);
  658. if (enabled < 0)
  659. return enabled;
  660. if (vdd_bit) {
  661. int tmp;
  662. int voltage;
  663. /* REVISIT mmc_vddrange_to_ocrmask() may have set some
  664. * bits this regulator doesn't quite support ... don't
  665. * be too picky, most cards and regulators are OK with
  666. * a 0.1V range goof (it's a small error percentage).
  667. */
  668. tmp = vdd_bit - ilog2(MMC_VDD_165_195);
  669. if (tmp == 0) {
  670. min_uV = 1650 * 1000;
  671. max_uV = 1950 * 1000;
  672. } else {
  673. min_uV = 1900 * 1000 + tmp * 100 * 1000;
  674. max_uV = min_uV + 100 * 1000;
  675. }
  676. /* avoid needless changes to this voltage; the regulator
  677. * might not allow this operation
  678. */
  679. voltage = regulator_get_voltage(supply);
  680. if (voltage < 0)
  681. result = voltage;
  682. else if (voltage < min_uV || voltage > max_uV)
  683. result = regulator_set_voltage(supply, min_uV, max_uV);
  684. else
  685. result = 0;
  686. if (result == 0 && !enabled)
  687. result = regulator_enable(supply);
  688. } else if (enabled) {
  689. result = regulator_disable(supply);
  690. }
  691. return result;
  692. }
  693. EXPORT_SYMBOL(mmc_regulator_set_ocr);
  694. #endif
  695. /*
  696. * Mask off any voltages we don't support and select
  697. * the lowest voltage
  698. */
  699. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  700. {
  701. int bit;
  702. ocr &= host->ocr_avail;
  703. bit = ffs(ocr);
  704. if (bit) {
  705. bit -= 1;
  706. ocr &= 3 << bit;
  707. host->ios.vdd = bit;
  708. mmc_set_ios(host);
  709. } else {
  710. pr_warning("%s: host doesn't support card's voltages\n",
  711. mmc_hostname(host));
  712. ocr = 0;
  713. }
  714. return ocr;
  715. }
  716. /*
  717. * Select timing parameters for host.
  718. */
  719. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  720. {
  721. host->ios.timing = timing;
  722. mmc_set_ios(host);
  723. }
  724. /*
  725. * Apply power to the MMC stack. This is a two-stage process.
  726. * First, we enable power to the card without the clock running.
  727. * We then wait a bit for the power to stabilise. Finally,
  728. * enable the bus drivers and clock to the card.
  729. *
  730. * We must _NOT_ enable the clock prior to power stablising.
  731. *
  732. * If a host does all the power sequencing itself, ignore the
  733. * initial MMC_POWER_UP stage.
  734. */
  735. static void mmc_power_up(struct mmc_host *host)
  736. {
  737. int bit;
  738. /* If ocr is set, we use it */
  739. if (host->ocr)
  740. bit = ffs(host->ocr) - 1;
  741. else
  742. bit = fls(host->ocr_avail) - 1;
  743. host->ios.vdd = bit;
  744. if (mmc_host_is_spi(host)) {
  745. host->ios.chip_select = MMC_CS_HIGH;
  746. host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
  747. } else {
  748. host->ios.chip_select = MMC_CS_DONTCARE;
  749. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  750. }
  751. host->ios.power_mode = MMC_POWER_UP;
  752. host->ios.bus_width = MMC_BUS_WIDTH_1;
  753. host->ios.timing = MMC_TIMING_LEGACY;
  754. mmc_set_ios(host);
  755. /*
  756. * This delay should be sufficient to allow the power supply
  757. * to reach the minimum voltage.
  758. */
  759. mmc_delay(10);
  760. if (host->f_min > 400000) {
  761. pr_warning("%s: Minimum clock frequency too high for "
  762. "identification mode\n", mmc_hostname(host));
  763. host->ios.clock = host->f_min;
  764. } else
  765. host->ios.clock = 400000;
  766. host->ios.power_mode = MMC_POWER_ON;
  767. mmc_set_ios(host);
  768. /*
  769. * This delay must be at least 74 clock sizes, or 1 ms, or the
  770. * time required to reach a stable voltage.
  771. */
  772. mmc_delay(10);
  773. }
  774. static void mmc_power_off(struct mmc_host *host)
  775. {
  776. host->ios.clock = 0;
  777. host->ios.vdd = 0;
  778. if (!mmc_host_is_spi(host)) {
  779. host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
  780. host->ios.chip_select = MMC_CS_DONTCARE;
  781. }
  782. host->ios.power_mode = MMC_POWER_OFF;
  783. host->ios.bus_width = MMC_BUS_WIDTH_1;
  784. host->ios.timing = MMC_TIMING_LEGACY;
  785. mmc_set_ios(host);
  786. }
  787. /*
  788. * Cleanup when the last reference to the bus operator is dropped.
  789. */
  790. static void __mmc_release_bus(struct mmc_host *host)
  791. {
  792. BUG_ON(!host);
  793. BUG_ON(host->bus_refs);
  794. BUG_ON(!host->bus_dead);
  795. host->bus_ops = NULL;
  796. }
  797. /*
  798. * Increase reference count of bus operator
  799. */
  800. static inline void mmc_bus_get(struct mmc_host *host)
  801. {
  802. unsigned long flags;
  803. spin_lock_irqsave(&host->lock, flags);
  804. host->bus_refs++;
  805. spin_unlock_irqrestore(&host->lock, flags);
  806. }
  807. /*
  808. * Decrease reference count of bus operator and free it if
  809. * it is the last reference.
  810. */
  811. static inline void mmc_bus_put(struct mmc_host *host)
  812. {
  813. unsigned long flags;
  814. spin_lock_irqsave(&host->lock, flags);
  815. host->bus_refs--;
  816. if ((host->bus_refs == 0) && host->bus_ops)
  817. __mmc_release_bus(host);
  818. spin_unlock_irqrestore(&host->lock, flags);
  819. }
  820. /*
  821. * Assign a mmc bus handler to a host. Only one bus handler may control a
  822. * host at any given time.
  823. */
  824. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  825. {
  826. unsigned long flags;
  827. BUG_ON(!host);
  828. BUG_ON(!ops);
  829. WARN_ON(!host->claimed);
  830. spin_lock_irqsave(&host->lock, flags);
  831. BUG_ON(host->bus_ops);
  832. BUG_ON(host->bus_refs);
  833. host->bus_ops = ops;
  834. host->bus_refs = 1;
  835. host->bus_dead = 0;
  836. spin_unlock_irqrestore(&host->lock, flags);
  837. }
  838. /*
  839. * Remove the current bus handler from a host. Assumes that there are
  840. * no interesting cards left, so the bus is powered down.
  841. */
  842. void mmc_detach_bus(struct mmc_host *host)
  843. {
  844. unsigned long flags;
  845. BUG_ON(!host);
  846. WARN_ON(!host->claimed);
  847. WARN_ON(!host->bus_ops);
  848. spin_lock_irqsave(&host->lock, flags);
  849. host->bus_dead = 1;
  850. spin_unlock_irqrestore(&host->lock, flags);
  851. mmc_power_off(host);
  852. mmc_bus_put(host);
  853. }
  854. /**
  855. * mmc_detect_change - process change of state on a MMC socket
  856. * @host: host which changed state.
  857. * @delay: optional delay to wait before detection (jiffies)
  858. *
  859. * MMC drivers should call this when they detect a card has been
  860. * inserted or removed. The MMC layer will confirm that any
  861. * present card is still functional, and initialize any newly
  862. * inserted.
  863. */
  864. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  865. {
  866. #ifdef CONFIG_MMC_DEBUG
  867. unsigned long flags;
  868. spin_lock_irqsave(&host->lock, flags);
  869. WARN_ON(host->removed);
  870. spin_unlock_irqrestore(&host->lock, flags);
  871. #endif
  872. mmc_schedule_delayed_work(&host->detect, delay);
  873. }
  874. EXPORT_SYMBOL(mmc_detect_change);
  875. void mmc_rescan(struct work_struct *work)
  876. {
  877. struct mmc_host *host =
  878. container_of(work, struct mmc_host, detect.work);
  879. u32 ocr;
  880. int err;
  881. mmc_bus_get(host);
  882. /* if there is a card registered, check whether it is still present */
  883. if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
  884. host->bus_ops->detect(host);
  885. mmc_bus_put(host);
  886. mmc_bus_get(host);
  887. /* if there still is a card present, stop here */
  888. if (host->bus_ops != NULL) {
  889. mmc_bus_put(host);
  890. goto out;
  891. }
  892. /* detect a newly inserted card */
  893. /*
  894. * Only we can add a new handler, so it's safe to
  895. * release the lock here.
  896. */
  897. mmc_bus_put(host);
  898. if (host->ops->get_cd && host->ops->get_cd(host) == 0)
  899. goto out;
  900. mmc_claim_host(host);
  901. mmc_power_up(host);
  902. mmc_go_idle(host);
  903. mmc_send_if_cond(host, host->ocr_avail);
  904. /*
  905. * First we search for SDIO...
  906. */
  907. err = mmc_send_io_op_cond(host, 0, &ocr);
  908. if (!err) {
  909. if (mmc_attach_sdio(host, ocr))
  910. mmc_power_off(host);
  911. goto out;
  912. }
  913. /*
  914. * ...then normal SD...
  915. */
  916. err = mmc_send_app_op_cond(host, 0, &ocr);
  917. if (!err) {
  918. if (mmc_attach_sd(host, ocr))
  919. mmc_power_off(host);
  920. goto out;
  921. }
  922. /*
  923. * ...and finally MMC.
  924. */
  925. err = mmc_send_op_cond(host, 0, &ocr);
  926. if (!err) {
  927. if (mmc_attach_mmc(host, ocr))
  928. mmc_power_off(host);
  929. goto out;
  930. }
  931. mmc_release_host(host);
  932. mmc_power_off(host);
  933. out:
  934. if (host->caps & MMC_CAP_NEEDS_POLL)
  935. mmc_schedule_delayed_work(&host->detect, HZ);
  936. }
  937. void mmc_start_host(struct mmc_host *host)
  938. {
  939. mmc_power_off(host);
  940. mmc_detect_change(host, 0);
  941. }
  942. void mmc_stop_host(struct mmc_host *host)
  943. {
  944. #ifdef CONFIG_MMC_DEBUG
  945. unsigned long flags;
  946. spin_lock_irqsave(&host->lock, flags);
  947. host->removed = 1;
  948. spin_unlock_irqrestore(&host->lock, flags);
  949. #endif
  950. if (host->caps & MMC_CAP_DISABLE)
  951. cancel_delayed_work(&host->disable);
  952. cancel_delayed_work(&host->detect);
  953. mmc_flush_scheduled_work();
  954. mmc_bus_get(host);
  955. if (host->bus_ops && !host->bus_dead) {
  956. if (host->bus_ops->remove)
  957. host->bus_ops->remove(host);
  958. mmc_claim_host(host);
  959. mmc_detach_bus(host);
  960. mmc_release_host(host);
  961. mmc_bus_put(host);
  962. return;
  963. }
  964. mmc_bus_put(host);
  965. BUG_ON(host->card);
  966. mmc_power_off(host);
  967. }
  968. void mmc_power_save_host(struct mmc_host *host)
  969. {
  970. mmc_bus_get(host);
  971. if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
  972. mmc_bus_put(host);
  973. return;
  974. }
  975. if (host->bus_ops->power_save)
  976. host->bus_ops->power_save(host);
  977. mmc_bus_put(host);
  978. mmc_power_off(host);
  979. }
  980. EXPORT_SYMBOL(mmc_power_save_host);
  981. void mmc_power_restore_host(struct mmc_host *host)
  982. {
  983. mmc_bus_get(host);
  984. if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
  985. mmc_bus_put(host);
  986. return;
  987. }
  988. mmc_power_up(host);
  989. host->bus_ops->power_restore(host);
  990. mmc_bus_put(host);
  991. }
  992. EXPORT_SYMBOL(mmc_power_restore_host);
  993. int mmc_card_awake(struct mmc_host *host)
  994. {
  995. int err = -ENOSYS;
  996. mmc_bus_get(host);
  997. if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
  998. err = host->bus_ops->awake(host);
  999. mmc_bus_put(host);
  1000. return err;
  1001. }
  1002. EXPORT_SYMBOL(mmc_card_awake);
  1003. int mmc_card_sleep(struct mmc_host *host)
  1004. {
  1005. int err = -ENOSYS;
  1006. mmc_bus_get(host);
  1007. if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
  1008. err = host->bus_ops->sleep(host);
  1009. mmc_bus_put(host);
  1010. return err;
  1011. }
  1012. EXPORT_SYMBOL(mmc_card_sleep);
  1013. int mmc_card_can_sleep(struct mmc_host *host)
  1014. {
  1015. struct mmc_card *card = host->card;
  1016. if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
  1017. return 1;
  1018. return 0;
  1019. }
  1020. EXPORT_SYMBOL(mmc_card_can_sleep);
  1021. #ifdef CONFIG_PM
  1022. /**
  1023. * mmc_suspend_host - suspend a host
  1024. * @host: mmc host
  1025. * @state: suspend mode (PM_SUSPEND_xxx)
  1026. */
  1027. int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
  1028. {
  1029. int err = 0;
  1030. if (host->caps & MMC_CAP_DISABLE)
  1031. cancel_delayed_work(&host->disable);
  1032. cancel_delayed_work(&host->detect);
  1033. mmc_flush_scheduled_work();
  1034. mmc_bus_get(host);
  1035. if (host->bus_ops && !host->bus_dead) {
  1036. if (host->bus_ops->suspend)
  1037. err = host->bus_ops->suspend(host);
  1038. if (err == -ENOSYS || !host->bus_ops->resume) {
  1039. /*
  1040. * We simply "remove" the card in this case.
  1041. * It will be redetected on resume.
  1042. */
  1043. if (host->bus_ops->remove)
  1044. host->bus_ops->remove(host);
  1045. mmc_claim_host(host);
  1046. mmc_detach_bus(host);
  1047. mmc_release_host(host);
  1048. err = 0;
  1049. }
  1050. }
  1051. mmc_bus_put(host);
  1052. if (!err)
  1053. mmc_power_off(host);
  1054. return err;
  1055. }
  1056. EXPORT_SYMBOL(mmc_suspend_host);
  1057. /**
  1058. * mmc_resume_host - resume a previously suspended host
  1059. * @host: mmc host
  1060. */
  1061. int mmc_resume_host(struct mmc_host *host)
  1062. {
  1063. int err = 0;
  1064. mmc_bus_get(host);
  1065. if (host->bus_ops && !host->bus_dead) {
  1066. mmc_power_up(host);
  1067. mmc_select_voltage(host, host->ocr);
  1068. BUG_ON(!host->bus_ops->resume);
  1069. err = host->bus_ops->resume(host);
  1070. if (err) {
  1071. printk(KERN_WARNING "%s: error %d during resume "
  1072. "(card was removed?)\n",
  1073. mmc_hostname(host), err);
  1074. if (host->bus_ops->remove)
  1075. host->bus_ops->remove(host);
  1076. mmc_claim_host(host);
  1077. mmc_detach_bus(host);
  1078. mmc_release_host(host);
  1079. /* no need to bother upper layers */
  1080. err = 0;
  1081. }
  1082. }
  1083. mmc_bus_put(host);
  1084. /*
  1085. * We add a slight delay here so that resume can progress
  1086. * in parallel.
  1087. */
  1088. mmc_detect_change(host, 1);
  1089. return err;
  1090. }
  1091. EXPORT_SYMBOL(mmc_resume_host);
  1092. #endif
  1093. static int __init mmc_init(void)
  1094. {
  1095. int ret;
  1096. workqueue = create_singlethread_workqueue("kmmcd");
  1097. if (!workqueue)
  1098. return -ENOMEM;
  1099. ret = mmc_register_bus();
  1100. if (ret)
  1101. goto destroy_workqueue;
  1102. ret = mmc_register_host_class();
  1103. if (ret)
  1104. goto unregister_bus;
  1105. ret = sdio_register_bus();
  1106. if (ret)
  1107. goto unregister_host_class;
  1108. return 0;
  1109. unregister_host_class:
  1110. mmc_unregister_host_class();
  1111. unregister_bus:
  1112. mmc_unregister_bus();
  1113. destroy_workqueue:
  1114. destroy_workqueue(workqueue);
  1115. return ret;
  1116. }
  1117. static void __exit mmc_exit(void)
  1118. {
  1119. sdio_unregister_bus();
  1120. mmc_unregister_host_class();
  1121. mmc_unregister_bus();
  1122. destroy_workqueue(workqueue);
  1123. }
  1124. subsys_initcall(mmc_init);
  1125. module_exit(mmc_exit);
  1126. MODULE_LICENSE("GPL");