mmc_ops.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "mmc_ops.h"
  20. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  21. {
  22. int err;
  23. struct mmc_command cmd = {0};
  24. BUG_ON(!host);
  25. cmd.opcode = MMC_SELECT_CARD;
  26. if (card) {
  27. cmd.arg = card->rca << 16;
  28. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  29. } else {
  30. cmd.arg = 0;
  31. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  32. }
  33. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  34. if (err)
  35. return err;
  36. return 0;
  37. }
  38. int mmc_select_card(struct mmc_card *card)
  39. {
  40. BUG_ON(!card);
  41. return _mmc_select_card(card->host, card);
  42. }
  43. int mmc_deselect_cards(struct mmc_host *host)
  44. {
  45. return _mmc_select_card(host, NULL);
  46. }
  47. int mmc_card_sleepawake(struct mmc_host *host, int sleep)
  48. {
  49. struct mmc_command cmd = {0};
  50. struct mmc_card *card = host->card;
  51. int err;
  52. if (sleep)
  53. mmc_deselect_cards(host);
  54. cmd.opcode = MMC_SLEEP_AWAKE;
  55. cmd.arg = card->rca << 16;
  56. if (sleep)
  57. cmd.arg |= 1 << 15;
  58. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  59. err = mmc_wait_for_cmd(host, &cmd, 0);
  60. if (err)
  61. return err;
  62. /*
  63. * If the host does not wait while the card signals busy, then we will
  64. * will have to wait the sleep/awake timeout. Note, we cannot use the
  65. * SEND_STATUS command to poll the status because that command (and most
  66. * others) is invalid while the card sleeps.
  67. */
  68. if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  69. mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
  70. if (!sleep)
  71. err = mmc_select_card(card);
  72. return err;
  73. }
  74. int mmc_go_idle(struct mmc_host *host)
  75. {
  76. int err;
  77. struct mmc_command cmd = {0};
  78. /*
  79. * Non-SPI hosts need to prevent chipselect going active during
  80. * GO_IDLE; that would put chips into SPI mode. Remind them of
  81. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  82. *
  83. * SPI hosts ignore ios.chip_select; it's managed according to
  84. * rules that must accommodate non-MMC slaves which this layer
  85. * won't even know about.
  86. */
  87. if (!mmc_host_is_spi(host)) {
  88. mmc_set_chip_select(host, MMC_CS_HIGH);
  89. mmc_delay(1);
  90. }
  91. cmd.opcode = MMC_GO_IDLE_STATE;
  92. cmd.arg = 0;
  93. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  94. err = mmc_wait_for_cmd(host, &cmd, 0);
  95. mmc_delay(1);
  96. if (!mmc_host_is_spi(host)) {
  97. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  98. mmc_delay(1);
  99. }
  100. host->use_spi_crc = 0;
  101. return err;
  102. }
  103. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  104. {
  105. struct mmc_command cmd = {0};
  106. int i, err = 0;
  107. BUG_ON(!host);
  108. cmd.opcode = MMC_SEND_OP_COND;
  109. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  110. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  111. for (i = 100; i; i--) {
  112. err = mmc_wait_for_cmd(host, &cmd, 0);
  113. if (err)
  114. break;
  115. /* if we're just probing, do a single pass */
  116. if (ocr == 0)
  117. break;
  118. /* otherwise wait until reset completes */
  119. if (mmc_host_is_spi(host)) {
  120. if (!(cmd.resp[0] & R1_SPI_IDLE))
  121. break;
  122. } else {
  123. if (cmd.resp[0] & MMC_CARD_BUSY)
  124. break;
  125. }
  126. err = -ETIMEDOUT;
  127. mmc_delay(10);
  128. }
  129. if (rocr && !mmc_host_is_spi(host))
  130. *rocr = cmd.resp[0];
  131. return err;
  132. }
  133. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  134. {
  135. int err;
  136. struct mmc_command cmd = {0};
  137. BUG_ON(!host);
  138. BUG_ON(!cid);
  139. cmd.opcode = MMC_ALL_SEND_CID;
  140. cmd.arg = 0;
  141. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  142. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  143. if (err)
  144. return err;
  145. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  146. return 0;
  147. }
  148. int mmc_set_relative_addr(struct mmc_card *card)
  149. {
  150. int err;
  151. struct mmc_command cmd = {0};
  152. BUG_ON(!card);
  153. BUG_ON(!card->host);
  154. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  155. cmd.arg = card->rca << 16;
  156. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  157. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  158. if (err)
  159. return err;
  160. return 0;
  161. }
  162. static int
  163. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  164. {
  165. int err;
  166. struct mmc_command cmd = {0};
  167. BUG_ON(!host);
  168. BUG_ON(!cxd);
  169. cmd.opcode = opcode;
  170. cmd.arg = arg;
  171. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  172. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  173. if (err)
  174. return err;
  175. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  176. return 0;
  177. }
  178. /*
  179. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  180. * buffer or on-stack buffer (with some overhead in callee).
  181. */
  182. static int
  183. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  184. u32 opcode, void *buf, unsigned len)
  185. {
  186. struct mmc_request mrq = {NULL};
  187. struct mmc_command cmd = {0};
  188. struct mmc_data data = {0};
  189. struct scatterlist sg;
  190. void *data_buf;
  191. int is_on_stack;
  192. is_on_stack = object_is_on_stack(buf);
  193. if (is_on_stack) {
  194. /*
  195. * dma onto stack is unsafe/nonportable, but callers to this
  196. * routine normally provide temporary on-stack buffers ...
  197. */
  198. data_buf = kmalloc(len, GFP_KERNEL);
  199. if (!data_buf)
  200. return -ENOMEM;
  201. } else
  202. data_buf = buf;
  203. mrq.cmd = &cmd;
  204. mrq.data = &data;
  205. cmd.opcode = opcode;
  206. cmd.arg = 0;
  207. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  208. * rely on callers to never use this with "native" calls for reading
  209. * CSD or CID. Native versions of those commands use the R2 type,
  210. * not R1 plus a data block.
  211. */
  212. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  213. data.blksz = len;
  214. data.blocks = 1;
  215. data.flags = MMC_DATA_READ;
  216. data.sg = &sg;
  217. data.sg_len = 1;
  218. sg_init_one(&sg, data_buf, len);
  219. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  220. /*
  221. * The spec states that CSR and CID accesses have a timeout
  222. * of 64 clock cycles.
  223. */
  224. data.timeout_ns = 0;
  225. data.timeout_clks = 64;
  226. } else
  227. mmc_set_data_timeout(&data, card);
  228. mmc_wait_for_req(host, &mrq);
  229. if (is_on_stack) {
  230. memcpy(buf, data_buf, len);
  231. kfree(data_buf);
  232. }
  233. if (cmd.error)
  234. return cmd.error;
  235. if (data.error)
  236. return data.error;
  237. return 0;
  238. }
  239. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  240. {
  241. int ret, i;
  242. u32 *csd_tmp;
  243. if (!mmc_host_is_spi(card->host))
  244. return mmc_send_cxd_native(card->host, card->rca << 16,
  245. csd, MMC_SEND_CSD);
  246. csd_tmp = kmalloc(16, GFP_KERNEL);
  247. if (!csd_tmp)
  248. return -ENOMEM;
  249. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  250. if (ret)
  251. goto err;
  252. for (i = 0;i < 4;i++)
  253. csd[i] = be32_to_cpu(csd_tmp[i]);
  254. err:
  255. kfree(csd_tmp);
  256. return ret;
  257. }
  258. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  259. {
  260. int ret, i;
  261. u32 *cid_tmp;
  262. if (!mmc_host_is_spi(host)) {
  263. if (!host->card)
  264. return -EINVAL;
  265. return mmc_send_cxd_native(host, host->card->rca << 16,
  266. cid, MMC_SEND_CID);
  267. }
  268. cid_tmp = kmalloc(16, GFP_KERNEL);
  269. if (!cid_tmp)
  270. return -ENOMEM;
  271. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  272. if (ret)
  273. goto err;
  274. for (i = 0;i < 4;i++)
  275. cid[i] = be32_to_cpu(cid_tmp[i]);
  276. err:
  277. kfree(cid_tmp);
  278. return ret;
  279. }
  280. int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
  281. {
  282. return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
  283. ext_csd, 512);
  284. }
  285. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  286. {
  287. struct mmc_command cmd = {0};
  288. int err;
  289. cmd.opcode = MMC_SPI_READ_OCR;
  290. cmd.arg = highcap ? (1 << 30) : 0;
  291. cmd.flags = MMC_RSP_SPI_R3;
  292. err = mmc_wait_for_cmd(host, &cmd, 0);
  293. *ocrp = cmd.resp[1];
  294. return err;
  295. }
  296. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  297. {
  298. struct mmc_command cmd = {0};
  299. int err;
  300. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  301. cmd.flags = MMC_RSP_SPI_R1;
  302. cmd.arg = use_crc;
  303. err = mmc_wait_for_cmd(host, &cmd, 0);
  304. if (!err)
  305. host->use_spi_crc = use_crc;
  306. return err;
  307. }
  308. /**
  309. * __mmc_switch - modify EXT_CSD register
  310. * @card: the MMC card associated with the data transfer
  311. * @set: cmd set values
  312. * @index: EXT_CSD register index
  313. * @value: value to program into EXT_CSD register
  314. * @timeout_ms: timeout (ms) for operation performed by register write,
  315. * timeout of zero implies maximum possible timeout
  316. * @use_busy_signal: use the busy signal as response type
  317. *
  318. * Modifies the EXT_CSD register for selected card.
  319. */
  320. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  321. unsigned int timeout_ms, bool use_busy_signal)
  322. {
  323. int err;
  324. struct mmc_command cmd = {0};
  325. u32 status;
  326. BUG_ON(!card);
  327. BUG_ON(!card->host);
  328. cmd.opcode = MMC_SWITCH;
  329. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  330. (index << 16) |
  331. (value << 8) |
  332. set;
  333. cmd.flags = MMC_CMD_AC;
  334. if (use_busy_signal)
  335. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  336. else
  337. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  338. cmd.cmd_timeout_ms = timeout_ms;
  339. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  340. if (err)
  341. return err;
  342. /* No need to check card status in case of unblocking command */
  343. if (!use_busy_signal)
  344. return 0;
  345. /* Must check status to be sure of no errors */
  346. do {
  347. err = mmc_send_status(card, &status);
  348. if (err)
  349. return err;
  350. if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
  351. break;
  352. if (mmc_host_is_spi(card->host))
  353. break;
  354. } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
  355. if (mmc_host_is_spi(card->host)) {
  356. if (status & R1_SPI_ILLEGAL_COMMAND)
  357. return -EBADMSG;
  358. } else {
  359. if (status & 0xFDFFA000)
  360. pr_warning("%s: unexpected status %#x after "
  361. "switch", mmc_hostname(card->host), status);
  362. if (status & R1_SWITCH_ERROR)
  363. return -EBADMSG;
  364. }
  365. return 0;
  366. }
  367. EXPORT_SYMBOL_GPL(__mmc_switch);
  368. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  369. unsigned int timeout_ms)
  370. {
  371. return __mmc_switch(card, set, index, value, timeout_ms, true);
  372. }
  373. EXPORT_SYMBOL_GPL(mmc_switch);
  374. int mmc_send_status(struct mmc_card *card, u32 *status)
  375. {
  376. int err;
  377. struct mmc_command cmd = {0};
  378. BUG_ON(!card);
  379. BUG_ON(!card->host);
  380. cmd.opcode = MMC_SEND_STATUS;
  381. if (!mmc_host_is_spi(card->host))
  382. cmd.arg = card->rca << 16;
  383. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  384. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  385. if (err)
  386. return err;
  387. /* NOTE: callers are required to understand the difference
  388. * between "native" and SPI format status words!
  389. */
  390. if (status)
  391. *status = cmd.resp[0];
  392. return 0;
  393. }
  394. static int
  395. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  396. u8 len)
  397. {
  398. struct mmc_request mrq = {NULL};
  399. struct mmc_command cmd = {0};
  400. struct mmc_data data = {0};
  401. struct scatterlist sg;
  402. u8 *data_buf;
  403. u8 *test_buf;
  404. int i, err;
  405. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  406. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  407. /* dma onto stack is unsafe/nonportable, but callers to this
  408. * routine normally provide temporary on-stack buffers ...
  409. */
  410. data_buf = kmalloc(len, GFP_KERNEL);
  411. if (!data_buf)
  412. return -ENOMEM;
  413. if (len == 8)
  414. test_buf = testdata_8bit;
  415. else if (len == 4)
  416. test_buf = testdata_4bit;
  417. else {
  418. pr_err("%s: Invalid bus_width %d\n",
  419. mmc_hostname(host), len);
  420. kfree(data_buf);
  421. return -EINVAL;
  422. }
  423. if (opcode == MMC_BUS_TEST_W)
  424. memcpy(data_buf, test_buf, len);
  425. mrq.cmd = &cmd;
  426. mrq.data = &data;
  427. cmd.opcode = opcode;
  428. cmd.arg = 0;
  429. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  430. * rely on callers to never use this with "native" calls for reading
  431. * CSD or CID. Native versions of those commands use the R2 type,
  432. * not R1 plus a data block.
  433. */
  434. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  435. data.blksz = len;
  436. data.blocks = 1;
  437. if (opcode == MMC_BUS_TEST_R)
  438. data.flags = MMC_DATA_READ;
  439. else
  440. data.flags = MMC_DATA_WRITE;
  441. data.sg = &sg;
  442. data.sg_len = 1;
  443. sg_init_one(&sg, data_buf, len);
  444. mmc_wait_for_req(host, &mrq);
  445. err = 0;
  446. if (opcode == MMC_BUS_TEST_R) {
  447. for (i = 0; i < len / 4; i++)
  448. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  449. err = -EIO;
  450. break;
  451. }
  452. }
  453. kfree(data_buf);
  454. if (cmd.error)
  455. return cmd.error;
  456. if (data.error)
  457. return data.error;
  458. return err;
  459. }
  460. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  461. {
  462. int err, width;
  463. if (bus_width == MMC_BUS_WIDTH_8)
  464. width = 8;
  465. else if (bus_width == MMC_BUS_WIDTH_4)
  466. width = 4;
  467. else if (bus_width == MMC_BUS_WIDTH_1)
  468. return 0; /* no need for test */
  469. else
  470. return -EINVAL;
  471. /*
  472. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  473. * is a problem. This improves chances that the test will work.
  474. */
  475. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  476. err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  477. return err;
  478. }
  479. int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  480. {
  481. struct mmc_command cmd = {0};
  482. unsigned int opcode;
  483. int err;
  484. if (!card->ext_csd.hpi) {
  485. pr_warning("%s: Card didn't support HPI command\n",
  486. mmc_hostname(card->host));
  487. return -EINVAL;
  488. }
  489. opcode = card->ext_csd.hpi_cmd;
  490. if (opcode == MMC_STOP_TRANSMISSION)
  491. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  492. else if (opcode == MMC_SEND_STATUS)
  493. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  494. cmd.opcode = opcode;
  495. cmd.arg = card->rca << 16 | 1;
  496. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  497. if (err) {
  498. pr_warn("%s: error %d interrupting operation. "
  499. "HPI command response %#x\n", mmc_hostname(card->host),
  500. err, cmd.resp[0]);
  501. return err;
  502. }
  503. if (status)
  504. *status = cmd.resp[0];
  505. return 0;
  506. }