mmc_ops.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/types.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/mmc/host.h>
  15. #include <linux/mmc/card.h>
  16. #include <linux/mmc/mmc.h>
  17. #include "core.h"
  18. #include "mmc_ops.h"
  19. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  20. {
  21. int err;
  22. struct mmc_command cmd = {0};
  23. BUG_ON(!host);
  24. cmd.opcode = MMC_SELECT_CARD;
  25. if (card) {
  26. cmd.arg = card->rca << 16;
  27. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  28. } else {
  29. cmd.arg = 0;
  30. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  31. }
  32. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  33. if (err)
  34. return err;
  35. return 0;
  36. }
  37. int mmc_select_card(struct mmc_card *card)
  38. {
  39. BUG_ON(!card);
  40. return _mmc_select_card(card->host, card);
  41. }
  42. int mmc_deselect_cards(struct mmc_host *host)
  43. {
  44. return _mmc_select_card(host, NULL);
  45. }
  46. int mmc_card_sleepawake(struct mmc_host *host, int sleep)
  47. {
  48. struct mmc_command cmd = {0};
  49. struct mmc_card *card = host->card;
  50. int err;
  51. if (sleep)
  52. mmc_deselect_cards(host);
  53. cmd.opcode = MMC_SLEEP_AWAKE;
  54. cmd.arg = card->rca << 16;
  55. if (sleep)
  56. cmd.arg |= 1 << 15;
  57. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  58. err = mmc_wait_for_cmd(host, &cmd, 0);
  59. if (err)
  60. return err;
  61. /*
  62. * If the host does not wait while the card signals busy, then we will
  63. * will have to wait the sleep/awake timeout. Note, we cannot use the
  64. * SEND_STATUS command to poll the status because that command (and most
  65. * others) is invalid while the card sleeps.
  66. */
  67. if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  68. mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
  69. if (!sleep)
  70. err = mmc_select_card(card);
  71. return err;
  72. }
  73. int mmc_go_idle(struct mmc_host *host)
  74. {
  75. int err;
  76. struct mmc_command cmd = {0};
  77. /*
  78. * Non-SPI hosts need to prevent chipselect going active during
  79. * GO_IDLE; that would put chips into SPI mode. Remind them of
  80. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  81. *
  82. * SPI hosts ignore ios.chip_select; it's managed according to
  83. * rules that must accommodate non-MMC slaves which this layer
  84. * won't even know about.
  85. */
  86. if (!mmc_host_is_spi(host)) {
  87. mmc_set_chip_select(host, MMC_CS_HIGH);
  88. mmc_delay(1);
  89. }
  90. cmd.opcode = MMC_GO_IDLE_STATE;
  91. cmd.arg = 0;
  92. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  93. err = mmc_wait_for_cmd(host, &cmd, 0);
  94. mmc_delay(1);
  95. if (!mmc_host_is_spi(host)) {
  96. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  97. mmc_delay(1);
  98. }
  99. host->use_spi_crc = 0;
  100. return err;
  101. }
  102. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  103. {
  104. struct mmc_command cmd = {0};
  105. int i, err = 0;
  106. BUG_ON(!host);
  107. cmd.opcode = MMC_SEND_OP_COND;
  108. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  109. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  110. for (i = 100; i; i--) {
  111. err = mmc_wait_for_cmd(host, &cmd, 0);
  112. if (err)
  113. break;
  114. /* if we're just probing, do a single pass */
  115. if (ocr == 0)
  116. break;
  117. /* otherwise wait until reset completes */
  118. if (mmc_host_is_spi(host)) {
  119. if (!(cmd.resp[0] & R1_SPI_IDLE))
  120. break;
  121. } else {
  122. if (cmd.resp[0] & MMC_CARD_BUSY)
  123. break;
  124. }
  125. err = -ETIMEDOUT;
  126. mmc_delay(10);
  127. }
  128. if (rocr && !mmc_host_is_spi(host))
  129. *rocr = cmd.resp[0];
  130. return err;
  131. }
  132. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  133. {
  134. int err;
  135. struct mmc_command cmd = {0};
  136. BUG_ON(!host);
  137. BUG_ON(!cid);
  138. cmd.opcode = MMC_ALL_SEND_CID;
  139. cmd.arg = 0;
  140. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  141. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  142. if (err)
  143. return err;
  144. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  145. return 0;
  146. }
  147. int mmc_set_relative_addr(struct mmc_card *card)
  148. {
  149. int err;
  150. struct mmc_command cmd = {0};
  151. BUG_ON(!card);
  152. BUG_ON(!card->host);
  153. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  154. cmd.arg = card->rca << 16;
  155. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  156. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  157. if (err)
  158. return err;
  159. return 0;
  160. }
  161. static int
  162. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  163. {
  164. int err;
  165. struct mmc_command cmd = {0};
  166. BUG_ON(!host);
  167. BUG_ON(!cxd);
  168. cmd.opcode = opcode;
  169. cmd.arg = arg;
  170. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  171. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  172. if (err)
  173. return err;
  174. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  175. return 0;
  176. }
  177. static int
  178. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  179. u32 opcode, void *buf, unsigned len)
  180. {
  181. struct mmc_request mrq;
  182. struct mmc_command cmd = {0};
  183. struct mmc_data data = {0};
  184. struct scatterlist sg;
  185. void *data_buf;
  186. /* dma onto stack is unsafe/nonportable, but callers to this
  187. * routine normally provide temporary on-stack buffers ...
  188. */
  189. data_buf = kmalloc(len, GFP_KERNEL);
  190. if (data_buf == NULL)
  191. return -ENOMEM;
  192. memset(&mrq, 0, sizeof(struct mmc_request));
  193. mrq.cmd = &cmd;
  194. mrq.data = &data;
  195. cmd.opcode = opcode;
  196. cmd.arg = 0;
  197. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  198. * rely on callers to never use this with "native" calls for reading
  199. * CSD or CID. Native versions of those commands use the R2 type,
  200. * not R1 plus a data block.
  201. */
  202. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  203. data.blksz = len;
  204. data.blocks = 1;
  205. data.flags = MMC_DATA_READ;
  206. data.sg = &sg;
  207. data.sg_len = 1;
  208. sg_init_one(&sg, data_buf, len);
  209. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  210. /*
  211. * The spec states that CSR and CID accesses have a timeout
  212. * of 64 clock cycles.
  213. */
  214. data.timeout_ns = 0;
  215. data.timeout_clks = 64;
  216. } else
  217. mmc_set_data_timeout(&data, card);
  218. mmc_wait_for_req(host, &mrq);
  219. memcpy(buf, data_buf, len);
  220. kfree(data_buf);
  221. if (cmd.error)
  222. return cmd.error;
  223. if (data.error)
  224. return data.error;
  225. return 0;
  226. }
  227. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  228. {
  229. int ret, i;
  230. if (!mmc_host_is_spi(card->host))
  231. return mmc_send_cxd_native(card->host, card->rca << 16,
  232. csd, MMC_SEND_CSD);
  233. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
  234. if (ret)
  235. return ret;
  236. for (i = 0;i < 4;i++)
  237. csd[i] = be32_to_cpu(csd[i]);
  238. return 0;
  239. }
  240. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  241. {
  242. int ret, i;
  243. if (!mmc_host_is_spi(host)) {
  244. if (!host->card)
  245. return -EINVAL;
  246. return mmc_send_cxd_native(host, host->card->rca << 16,
  247. cid, MMC_SEND_CID);
  248. }
  249. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
  250. if (ret)
  251. return ret;
  252. for (i = 0;i < 4;i++)
  253. cid[i] = be32_to_cpu(cid[i]);
  254. return 0;
  255. }
  256. int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
  257. {
  258. return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
  259. ext_csd, 512);
  260. }
  261. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  262. {
  263. struct mmc_command cmd = {0};
  264. int err;
  265. cmd.opcode = MMC_SPI_READ_OCR;
  266. cmd.arg = highcap ? (1 << 30) : 0;
  267. cmd.flags = MMC_RSP_SPI_R3;
  268. err = mmc_wait_for_cmd(host, &cmd, 0);
  269. *ocrp = cmd.resp[1];
  270. return err;
  271. }
  272. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  273. {
  274. struct mmc_command cmd = {0};
  275. int err;
  276. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  277. cmd.flags = MMC_RSP_SPI_R1;
  278. cmd.arg = use_crc;
  279. err = mmc_wait_for_cmd(host, &cmd, 0);
  280. if (!err)
  281. host->use_spi_crc = use_crc;
  282. return err;
  283. }
  284. /**
  285. * mmc_switch - modify EXT_CSD register
  286. * @card: the MMC card associated with the data transfer
  287. * @set: cmd set values
  288. * @index: EXT_CSD register index
  289. * @value: value to program into EXT_CSD register
  290. * @timeout_ms: timeout (ms) for operation performed by register write,
  291. * timeout of zero implies maximum possible timeout
  292. *
  293. * Modifies the EXT_CSD register for selected card.
  294. */
  295. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  296. unsigned int timeout_ms)
  297. {
  298. int err;
  299. struct mmc_command cmd = {0};
  300. u32 status;
  301. BUG_ON(!card);
  302. BUG_ON(!card->host);
  303. cmd.opcode = MMC_SWITCH;
  304. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  305. (index << 16) |
  306. (value << 8) |
  307. set;
  308. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  309. cmd.cmd_timeout_ms = timeout_ms;
  310. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  311. if (err)
  312. return err;
  313. /* Must check status to be sure of no errors */
  314. do {
  315. err = mmc_send_status(card, &status);
  316. if (err)
  317. return err;
  318. if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
  319. break;
  320. if (mmc_host_is_spi(card->host))
  321. break;
  322. } while (R1_CURRENT_STATE(status) == 7);
  323. if (mmc_host_is_spi(card->host)) {
  324. if (status & R1_SPI_ILLEGAL_COMMAND)
  325. return -EBADMSG;
  326. } else {
  327. if (status & 0xFDFFA000)
  328. printk(KERN_WARNING "%s: unexpected status %#x after "
  329. "switch", mmc_hostname(card->host), status);
  330. if (status & R1_SWITCH_ERROR)
  331. return -EBADMSG;
  332. }
  333. return 0;
  334. }
  335. EXPORT_SYMBOL_GPL(mmc_switch);
  336. int mmc_send_status(struct mmc_card *card, u32 *status)
  337. {
  338. int err;
  339. struct mmc_command cmd = {0};
  340. BUG_ON(!card);
  341. BUG_ON(!card->host);
  342. cmd.opcode = MMC_SEND_STATUS;
  343. if (!mmc_host_is_spi(card->host))
  344. cmd.arg = card->rca << 16;
  345. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  346. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  347. if (err)
  348. return err;
  349. /* NOTE: callers are required to understand the difference
  350. * between "native" and SPI format status words!
  351. */
  352. if (status)
  353. *status = cmd.resp[0];
  354. return 0;
  355. }
  356. static int
  357. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  358. u8 len)
  359. {
  360. struct mmc_request mrq;
  361. struct mmc_command cmd = {0};
  362. struct mmc_data data = {0};
  363. struct scatterlist sg;
  364. u8 *data_buf;
  365. u8 *test_buf;
  366. int i, err;
  367. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  368. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  369. /* dma onto stack is unsafe/nonportable, but callers to this
  370. * routine normally provide temporary on-stack buffers ...
  371. */
  372. data_buf = kmalloc(len, GFP_KERNEL);
  373. if (!data_buf)
  374. return -ENOMEM;
  375. if (len == 8)
  376. test_buf = testdata_8bit;
  377. else if (len == 4)
  378. test_buf = testdata_4bit;
  379. else {
  380. printk(KERN_ERR "%s: Invalid bus_width %d\n",
  381. mmc_hostname(host), len);
  382. kfree(data_buf);
  383. return -EINVAL;
  384. }
  385. if (opcode == MMC_BUS_TEST_W)
  386. memcpy(data_buf, test_buf, len);
  387. memset(&mrq, 0, sizeof(struct mmc_request));
  388. mrq.cmd = &cmd;
  389. mrq.data = &data;
  390. cmd.opcode = opcode;
  391. cmd.arg = 0;
  392. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  393. * rely on callers to never use this with "native" calls for reading
  394. * CSD or CID. Native versions of those commands use the R2 type,
  395. * not R1 plus a data block.
  396. */
  397. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  398. data.blksz = len;
  399. data.blocks = 1;
  400. if (opcode == MMC_BUS_TEST_R)
  401. data.flags = MMC_DATA_READ;
  402. else
  403. data.flags = MMC_DATA_WRITE;
  404. data.sg = &sg;
  405. data.sg_len = 1;
  406. sg_init_one(&sg, data_buf, len);
  407. mmc_wait_for_req(host, &mrq);
  408. err = 0;
  409. if (opcode == MMC_BUS_TEST_R) {
  410. for (i = 0; i < len / 4; i++)
  411. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  412. err = -EIO;
  413. break;
  414. }
  415. }
  416. kfree(data_buf);
  417. if (cmd.error)
  418. return cmd.error;
  419. if (data.error)
  420. return data.error;
  421. return err;
  422. }
  423. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  424. {
  425. int err, width;
  426. if (bus_width == MMC_BUS_WIDTH_8)
  427. width = 8;
  428. else if (bus_width == MMC_BUS_WIDTH_4)
  429. width = 4;
  430. else if (bus_width == MMC_BUS_WIDTH_1)
  431. return 0; /* no need for test */
  432. else
  433. return -EINVAL;
  434. /*
  435. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  436. * is a problem. This improves chances that the test will work.
  437. */
  438. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  439. err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  440. return err;
  441. }