mmc_ops.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "mmc_ops.h"
  20. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  21. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  22. {
  23. int err;
  24. struct mmc_command cmd = {0};
  25. BUG_ON(!host);
  26. cmd.opcode = MMC_SELECT_CARD;
  27. if (card) {
  28. cmd.arg = card->rca << 16;
  29. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  30. } else {
  31. cmd.arg = 0;
  32. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  33. }
  34. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  35. if (err)
  36. return err;
  37. return 0;
  38. }
  39. int mmc_select_card(struct mmc_card *card)
  40. {
  41. BUG_ON(!card);
  42. return _mmc_select_card(card->host, card);
  43. }
  44. int mmc_deselect_cards(struct mmc_host *host)
  45. {
  46. return _mmc_select_card(host, NULL);
  47. }
  48. int mmc_card_sleepawake(struct mmc_host *host, int sleep)
  49. {
  50. struct mmc_command cmd = {0};
  51. struct mmc_card *card = host->card;
  52. int err;
  53. if (sleep)
  54. mmc_deselect_cards(host);
  55. cmd.opcode = MMC_SLEEP_AWAKE;
  56. cmd.arg = card->rca << 16;
  57. if (sleep)
  58. cmd.arg |= 1 << 15;
  59. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  60. err = mmc_wait_for_cmd(host, &cmd, 0);
  61. if (err)
  62. return err;
  63. /*
  64. * If the host does not wait while the card signals busy, then we will
  65. * will have to wait the sleep/awake timeout. Note, we cannot use the
  66. * SEND_STATUS command to poll the status because that command (and most
  67. * others) is invalid while the card sleeps.
  68. */
  69. if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  70. mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
  71. if (!sleep)
  72. err = mmc_select_card(card);
  73. return err;
  74. }
  75. int mmc_go_idle(struct mmc_host *host)
  76. {
  77. int err;
  78. struct mmc_command cmd = {0};
  79. /*
  80. * Non-SPI hosts need to prevent chipselect going active during
  81. * GO_IDLE; that would put chips into SPI mode. Remind them of
  82. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  83. *
  84. * SPI hosts ignore ios.chip_select; it's managed according to
  85. * rules that must accommodate non-MMC slaves which this layer
  86. * won't even know about.
  87. */
  88. if (!mmc_host_is_spi(host)) {
  89. mmc_set_chip_select(host, MMC_CS_HIGH);
  90. mmc_delay(1);
  91. }
  92. cmd.opcode = MMC_GO_IDLE_STATE;
  93. cmd.arg = 0;
  94. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  95. err = mmc_wait_for_cmd(host, &cmd, 0);
  96. mmc_delay(1);
  97. if (!mmc_host_is_spi(host)) {
  98. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  99. mmc_delay(1);
  100. }
  101. host->use_spi_crc = 0;
  102. return err;
  103. }
  104. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  105. {
  106. struct mmc_command cmd = {0};
  107. int i, err = 0;
  108. BUG_ON(!host);
  109. cmd.opcode = MMC_SEND_OP_COND;
  110. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  111. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  112. for (i = 100; i; i--) {
  113. err = mmc_wait_for_cmd(host, &cmd, 0);
  114. if (err)
  115. break;
  116. /* if we're just probing, do a single pass */
  117. if (ocr == 0)
  118. break;
  119. /* otherwise wait until reset completes */
  120. if (mmc_host_is_spi(host)) {
  121. if (!(cmd.resp[0] & R1_SPI_IDLE))
  122. break;
  123. } else {
  124. if (cmd.resp[0] & MMC_CARD_BUSY)
  125. break;
  126. }
  127. err = -ETIMEDOUT;
  128. mmc_delay(10);
  129. }
  130. if (rocr && !mmc_host_is_spi(host))
  131. *rocr = cmd.resp[0];
  132. return err;
  133. }
  134. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  135. {
  136. int err;
  137. struct mmc_command cmd = {0};
  138. BUG_ON(!host);
  139. BUG_ON(!cid);
  140. cmd.opcode = MMC_ALL_SEND_CID;
  141. cmd.arg = 0;
  142. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  143. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  144. if (err)
  145. return err;
  146. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  147. return 0;
  148. }
  149. int mmc_set_relative_addr(struct mmc_card *card)
  150. {
  151. int err;
  152. struct mmc_command cmd = {0};
  153. BUG_ON(!card);
  154. BUG_ON(!card->host);
  155. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  156. cmd.arg = card->rca << 16;
  157. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  158. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  159. if (err)
  160. return err;
  161. return 0;
  162. }
  163. static int
  164. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  165. {
  166. int err;
  167. struct mmc_command cmd = {0};
  168. BUG_ON(!host);
  169. BUG_ON(!cxd);
  170. cmd.opcode = opcode;
  171. cmd.arg = arg;
  172. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  173. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  174. if (err)
  175. return err;
  176. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  177. return 0;
  178. }
  179. /*
  180. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  181. * buffer or on-stack buffer (with some overhead in callee).
  182. */
  183. static int
  184. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  185. u32 opcode, void *buf, unsigned len)
  186. {
  187. struct mmc_request mrq = {NULL};
  188. struct mmc_command cmd = {0};
  189. struct mmc_data data = {0};
  190. struct scatterlist sg;
  191. void *data_buf;
  192. int is_on_stack;
  193. is_on_stack = object_is_on_stack(buf);
  194. if (is_on_stack) {
  195. /*
  196. * dma onto stack is unsafe/nonportable, but callers to this
  197. * routine normally provide temporary on-stack buffers ...
  198. */
  199. data_buf = kmalloc(len, GFP_KERNEL);
  200. if (!data_buf)
  201. return -ENOMEM;
  202. } else
  203. data_buf = buf;
  204. mrq.cmd = &cmd;
  205. mrq.data = &data;
  206. cmd.opcode = opcode;
  207. cmd.arg = 0;
  208. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  209. * rely on callers to never use this with "native" calls for reading
  210. * CSD or CID. Native versions of those commands use the R2 type,
  211. * not R1 plus a data block.
  212. */
  213. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  214. data.blksz = len;
  215. data.blocks = 1;
  216. data.flags = MMC_DATA_READ;
  217. data.sg = &sg;
  218. data.sg_len = 1;
  219. sg_init_one(&sg, data_buf, len);
  220. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  221. /*
  222. * The spec states that CSR and CID accesses have a timeout
  223. * of 64 clock cycles.
  224. */
  225. data.timeout_ns = 0;
  226. data.timeout_clks = 64;
  227. } else
  228. mmc_set_data_timeout(&data, card);
  229. mmc_wait_for_req(host, &mrq);
  230. if (is_on_stack) {
  231. memcpy(buf, data_buf, len);
  232. kfree(data_buf);
  233. }
  234. if (cmd.error)
  235. return cmd.error;
  236. if (data.error)
  237. return data.error;
  238. return 0;
  239. }
  240. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  241. {
  242. int ret, i;
  243. u32 *csd_tmp;
  244. if (!mmc_host_is_spi(card->host))
  245. return mmc_send_cxd_native(card->host, card->rca << 16,
  246. csd, MMC_SEND_CSD);
  247. csd_tmp = kmalloc(16, GFP_KERNEL);
  248. if (!csd_tmp)
  249. return -ENOMEM;
  250. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  251. if (ret)
  252. goto err;
  253. for (i = 0;i < 4;i++)
  254. csd[i] = be32_to_cpu(csd_tmp[i]);
  255. err:
  256. kfree(csd_tmp);
  257. return ret;
  258. }
  259. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  260. {
  261. int ret, i;
  262. u32 *cid_tmp;
  263. if (!mmc_host_is_spi(host)) {
  264. if (!host->card)
  265. return -EINVAL;
  266. return mmc_send_cxd_native(host, host->card->rca << 16,
  267. cid, MMC_SEND_CID);
  268. }
  269. cid_tmp = kmalloc(16, GFP_KERNEL);
  270. if (!cid_tmp)
  271. return -ENOMEM;
  272. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  273. if (ret)
  274. goto err;
  275. for (i = 0;i < 4;i++)
  276. cid[i] = be32_to_cpu(cid_tmp[i]);
  277. err:
  278. kfree(cid_tmp);
  279. return ret;
  280. }
  281. int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
  282. {
  283. return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
  284. ext_csd, 512);
  285. }
  286. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  287. {
  288. struct mmc_command cmd = {0};
  289. int err;
  290. cmd.opcode = MMC_SPI_READ_OCR;
  291. cmd.arg = highcap ? (1 << 30) : 0;
  292. cmd.flags = MMC_RSP_SPI_R3;
  293. err = mmc_wait_for_cmd(host, &cmd, 0);
  294. *ocrp = cmd.resp[1];
  295. return err;
  296. }
  297. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  298. {
  299. struct mmc_command cmd = {0};
  300. int err;
  301. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  302. cmd.flags = MMC_RSP_SPI_R1;
  303. cmd.arg = use_crc;
  304. err = mmc_wait_for_cmd(host, &cmd, 0);
  305. if (!err)
  306. host->use_spi_crc = use_crc;
  307. return err;
  308. }
  309. /**
  310. * __mmc_switch - modify EXT_CSD register
  311. * @card: the MMC card associated with the data transfer
  312. * @set: cmd set values
  313. * @index: EXT_CSD register index
  314. * @value: value to program into EXT_CSD register
  315. * @timeout_ms: timeout (ms) for operation performed by register write,
  316. * timeout of zero implies maximum possible timeout
  317. * @use_busy_signal: use the busy signal as response type
  318. *
  319. * Modifies the EXT_CSD register for selected card.
  320. */
  321. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  322. unsigned int timeout_ms, bool use_busy_signal)
  323. {
  324. int err;
  325. struct mmc_command cmd = {0};
  326. unsigned long timeout;
  327. u32 status;
  328. BUG_ON(!card);
  329. BUG_ON(!card->host);
  330. cmd.opcode = MMC_SWITCH;
  331. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  332. (index << 16) |
  333. (value << 8) |
  334. set;
  335. cmd.flags = MMC_CMD_AC;
  336. if (use_busy_signal)
  337. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  338. else
  339. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  340. cmd.cmd_timeout_ms = timeout_ms;
  341. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  342. if (err)
  343. return err;
  344. /* No need to check card status in case of unblocking command */
  345. if (!use_busy_signal)
  346. return 0;
  347. /* Must check status to be sure of no errors */
  348. timeout = jiffies + msecs_to_jiffies(MMC_OPS_TIMEOUT_MS);
  349. do {
  350. err = mmc_send_status(card, &status);
  351. if (err)
  352. return err;
  353. if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
  354. break;
  355. if (mmc_host_is_spi(card->host))
  356. break;
  357. /* Timeout if the device never leaves the program state. */
  358. if (time_after(jiffies, timeout)) {
  359. pr_err("%s: Card stuck in programming state! %s\n",
  360. mmc_hostname(card->host), __func__);
  361. return -ETIMEDOUT;
  362. }
  363. } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
  364. if (mmc_host_is_spi(card->host)) {
  365. if (status & R1_SPI_ILLEGAL_COMMAND)
  366. return -EBADMSG;
  367. } else {
  368. if (status & 0xFDFFA000)
  369. pr_warning("%s: unexpected status %#x after "
  370. "switch", mmc_hostname(card->host), status);
  371. if (status & R1_SWITCH_ERROR)
  372. return -EBADMSG;
  373. }
  374. return 0;
  375. }
  376. EXPORT_SYMBOL_GPL(__mmc_switch);
  377. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  378. unsigned int timeout_ms)
  379. {
  380. return __mmc_switch(card, set, index, value, timeout_ms, true);
  381. }
  382. EXPORT_SYMBOL_GPL(mmc_switch);
  383. int mmc_send_status(struct mmc_card *card, u32 *status)
  384. {
  385. int err;
  386. struct mmc_command cmd = {0};
  387. BUG_ON(!card);
  388. BUG_ON(!card->host);
  389. cmd.opcode = MMC_SEND_STATUS;
  390. if (!mmc_host_is_spi(card->host))
  391. cmd.arg = card->rca << 16;
  392. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  393. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  394. if (err)
  395. return err;
  396. /* NOTE: callers are required to understand the difference
  397. * between "native" and SPI format status words!
  398. */
  399. if (status)
  400. *status = cmd.resp[0];
  401. return 0;
  402. }
  403. static int
  404. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  405. u8 len)
  406. {
  407. struct mmc_request mrq = {NULL};
  408. struct mmc_command cmd = {0};
  409. struct mmc_data data = {0};
  410. struct scatterlist sg;
  411. u8 *data_buf;
  412. u8 *test_buf;
  413. int i, err;
  414. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  415. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  416. /* dma onto stack is unsafe/nonportable, but callers to this
  417. * routine normally provide temporary on-stack buffers ...
  418. */
  419. data_buf = kmalloc(len, GFP_KERNEL);
  420. if (!data_buf)
  421. return -ENOMEM;
  422. if (len == 8)
  423. test_buf = testdata_8bit;
  424. else if (len == 4)
  425. test_buf = testdata_4bit;
  426. else {
  427. pr_err("%s: Invalid bus_width %d\n",
  428. mmc_hostname(host), len);
  429. kfree(data_buf);
  430. return -EINVAL;
  431. }
  432. if (opcode == MMC_BUS_TEST_W)
  433. memcpy(data_buf, test_buf, len);
  434. mrq.cmd = &cmd;
  435. mrq.data = &data;
  436. cmd.opcode = opcode;
  437. cmd.arg = 0;
  438. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  439. * rely on callers to never use this with "native" calls for reading
  440. * CSD or CID. Native versions of those commands use the R2 type,
  441. * not R1 plus a data block.
  442. */
  443. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  444. data.blksz = len;
  445. data.blocks = 1;
  446. if (opcode == MMC_BUS_TEST_R)
  447. data.flags = MMC_DATA_READ;
  448. else
  449. data.flags = MMC_DATA_WRITE;
  450. data.sg = &sg;
  451. data.sg_len = 1;
  452. sg_init_one(&sg, data_buf, len);
  453. mmc_wait_for_req(host, &mrq);
  454. err = 0;
  455. if (opcode == MMC_BUS_TEST_R) {
  456. for (i = 0; i < len / 4; i++)
  457. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  458. err = -EIO;
  459. break;
  460. }
  461. }
  462. kfree(data_buf);
  463. if (cmd.error)
  464. return cmd.error;
  465. if (data.error)
  466. return data.error;
  467. return err;
  468. }
  469. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  470. {
  471. int err, width;
  472. if (bus_width == MMC_BUS_WIDTH_8)
  473. width = 8;
  474. else if (bus_width == MMC_BUS_WIDTH_4)
  475. width = 4;
  476. else if (bus_width == MMC_BUS_WIDTH_1)
  477. return 0; /* no need for test */
  478. else
  479. return -EINVAL;
  480. /*
  481. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  482. * is a problem. This improves chances that the test will work.
  483. */
  484. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  485. err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  486. return err;
  487. }
  488. int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  489. {
  490. struct mmc_command cmd = {0};
  491. unsigned int opcode;
  492. int err;
  493. if (!card->ext_csd.hpi) {
  494. pr_warning("%s: Card didn't support HPI command\n",
  495. mmc_hostname(card->host));
  496. return -EINVAL;
  497. }
  498. opcode = card->ext_csd.hpi_cmd;
  499. if (opcode == MMC_STOP_TRANSMISSION)
  500. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  501. else if (opcode == MMC_SEND_STATUS)
  502. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  503. cmd.opcode = opcode;
  504. cmd.arg = card->rca << 16 | 1;
  505. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  506. if (err) {
  507. pr_warn("%s: error %d interrupting operation. "
  508. "HPI command response %#x\n", mmc_hostname(card->host),
  509. err, cmd.resp[0]);
  510. return err;
  511. }
  512. if (status)
  513. *status = cmd.resp[0];
  514. return 0;
  515. }