mmc_ops.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/types.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/mmc/host.h>
  15. #include <linux/mmc/card.h>
  16. #include <linux/mmc/mmc.h>
  17. #include "core.h"
  18. #include "mmc_ops.h"
  19. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  20. {
  21. int err;
  22. struct mmc_command cmd = {0};
  23. BUG_ON(!host);
  24. cmd.opcode = MMC_SELECT_CARD;
  25. if (card) {
  26. cmd.arg = card->rca << 16;
  27. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  28. } else {
  29. cmd.arg = 0;
  30. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  31. }
  32. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  33. if (err)
  34. return err;
  35. return 0;
  36. }
  37. int mmc_select_card(struct mmc_card *card)
  38. {
  39. BUG_ON(!card);
  40. return _mmc_select_card(card->host, card);
  41. }
  42. int mmc_deselect_cards(struct mmc_host *host)
  43. {
  44. return _mmc_select_card(host, NULL);
  45. }
  46. int mmc_card_sleepawake(struct mmc_host *host, int sleep)
  47. {
  48. struct mmc_command cmd = {0};
  49. struct mmc_card *card = host->card;
  50. int err;
  51. if (sleep)
  52. mmc_deselect_cards(host);
  53. cmd.opcode = MMC_SLEEP_AWAKE;
  54. cmd.arg = card->rca << 16;
  55. if (sleep)
  56. cmd.arg |= 1 << 15;
  57. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  58. err = mmc_wait_for_cmd(host, &cmd, 0);
  59. if (err)
  60. return err;
  61. /*
  62. * If the host does not wait while the card signals busy, then we will
  63. * will have to wait the sleep/awake timeout. Note, we cannot use the
  64. * SEND_STATUS command to poll the status because that command (and most
  65. * others) is invalid while the card sleeps.
  66. */
  67. if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
  68. mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
  69. if (!sleep)
  70. err = mmc_select_card(card);
  71. return err;
  72. }
  73. int mmc_go_idle(struct mmc_host *host)
  74. {
  75. int err;
  76. struct mmc_command cmd = {0};
  77. /*
  78. * Non-SPI hosts need to prevent chipselect going active during
  79. * GO_IDLE; that would put chips into SPI mode. Remind them of
  80. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  81. *
  82. * SPI hosts ignore ios.chip_select; it's managed according to
  83. * rules that must accommodate non-MMC slaves which this layer
  84. * won't even know about.
  85. */
  86. if (!mmc_host_is_spi(host)) {
  87. mmc_set_chip_select(host, MMC_CS_HIGH);
  88. mmc_delay(1);
  89. }
  90. cmd.opcode = MMC_GO_IDLE_STATE;
  91. cmd.arg = 0;
  92. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  93. err = mmc_wait_for_cmd(host, &cmd, 0);
  94. mmc_delay(1);
  95. if (!mmc_host_is_spi(host)) {
  96. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  97. mmc_delay(1);
  98. }
  99. host->use_spi_crc = 0;
  100. return err;
  101. }
  102. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  103. {
  104. struct mmc_command cmd = {0};
  105. int i, err = 0;
  106. BUG_ON(!host);
  107. cmd.opcode = MMC_SEND_OP_COND;
  108. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  109. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  110. for (i = 100; i; i--) {
  111. err = mmc_wait_for_cmd(host, &cmd, 0);
  112. if (err)
  113. break;
  114. /* if we're just probing, do a single pass */
  115. if (ocr == 0)
  116. break;
  117. /* otherwise wait until reset completes */
  118. if (mmc_host_is_spi(host)) {
  119. if (!(cmd.resp[0] & R1_SPI_IDLE))
  120. break;
  121. } else {
  122. if (cmd.resp[0] & MMC_CARD_BUSY)
  123. break;
  124. }
  125. err = -ETIMEDOUT;
  126. mmc_delay(10);
  127. }
  128. if (rocr && !mmc_host_is_spi(host))
  129. *rocr = cmd.resp[0];
  130. return err;
  131. }
  132. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  133. {
  134. int err;
  135. struct mmc_command cmd = {0};
  136. BUG_ON(!host);
  137. BUG_ON(!cid);
  138. cmd.opcode = MMC_ALL_SEND_CID;
  139. cmd.arg = 0;
  140. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  141. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  142. if (err)
  143. return err;
  144. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  145. return 0;
  146. }
  147. int mmc_set_relative_addr(struct mmc_card *card)
  148. {
  149. int err;
  150. struct mmc_command cmd = {0};
  151. BUG_ON(!card);
  152. BUG_ON(!card->host);
  153. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  154. cmd.arg = card->rca << 16;
  155. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  156. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  157. if (err)
  158. return err;
  159. return 0;
  160. }
  161. static int
  162. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  163. {
  164. int err;
  165. struct mmc_command cmd = {0};
  166. BUG_ON(!host);
  167. BUG_ON(!cxd);
  168. cmd.opcode = opcode;
  169. cmd.arg = arg;
  170. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  171. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  172. if (err)
  173. return err;
  174. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  175. return 0;
  176. }
  177. static int
  178. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  179. u32 opcode, void *buf, unsigned len)
  180. {
  181. struct mmc_request mrq = {0};
  182. struct mmc_command cmd = {0};
  183. struct mmc_data data = {0};
  184. struct scatterlist sg;
  185. void *data_buf;
  186. /* dma onto stack is unsafe/nonportable, but callers to this
  187. * routine normally provide temporary on-stack buffers ...
  188. */
  189. data_buf = kmalloc(len, GFP_KERNEL);
  190. if (data_buf == NULL)
  191. return -ENOMEM;
  192. mrq.cmd = &cmd;
  193. mrq.data = &data;
  194. cmd.opcode = opcode;
  195. cmd.arg = 0;
  196. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  197. * rely on callers to never use this with "native" calls for reading
  198. * CSD or CID. Native versions of those commands use the R2 type,
  199. * not R1 plus a data block.
  200. */
  201. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  202. data.blksz = len;
  203. data.blocks = 1;
  204. data.flags = MMC_DATA_READ;
  205. data.sg = &sg;
  206. data.sg_len = 1;
  207. sg_init_one(&sg, data_buf, len);
  208. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  209. /*
  210. * The spec states that CSR and CID accesses have a timeout
  211. * of 64 clock cycles.
  212. */
  213. data.timeout_ns = 0;
  214. data.timeout_clks = 64;
  215. } else
  216. mmc_set_data_timeout(&data, card);
  217. mmc_wait_for_req(host, &mrq);
  218. memcpy(buf, data_buf, len);
  219. kfree(data_buf);
  220. if (cmd.error)
  221. return cmd.error;
  222. if (data.error)
  223. return data.error;
  224. return 0;
  225. }
  226. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  227. {
  228. int ret, i;
  229. if (!mmc_host_is_spi(card->host))
  230. return mmc_send_cxd_native(card->host, card->rca << 16,
  231. csd, MMC_SEND_CSD);
  232. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
  233. if (ret)
  234. return ret;
  235. for (i = 0;i < 4;i++)
  236. csd[i] = be32_to_cpu(csd[i]);
  237. return 0;
  238. }
  239. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  240. {
  241. int ret, i;
  242. if (!mmc_host_is_spi(host)) {
  243. if (!host->card)
  244. return -EINVAL;
  245. return mmc_send_cxd_native(host, host->card->rca << 16,
  246. cid, MMC_SEND_CID);
  247. }
  248. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
  249. if (ret)
  250. return ret;
  251. for (i = 0;i < 4;i++)
  252. cid[i] = be32_to_cpu(cid[i]);
  253. return 0;
  254. }
  255. int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
  256. {
  257. return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
  258. ext_csd, 512);
  259. }
  260. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  261. {
  262. struct mmc_command cmd = {0};
  263. int err;
  264. cmd.opcode = MMC_SPI_READ_OCR;
  265. cmd.arg = highcap ? (1 << 30) : 0;
  266. cmd.flags = MMC_RSP_SPI_R3;
  267. err = mmc_wait_for_cmd(host, &cmd, 0);
  268. *ocrp = cmd.resp[1];
  269. return err;
  270. }
  271. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  272. {
  273. struct mmc_command cmd = {0};
  274. int err;
  275. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  276. cmd.flags = MMC_RSP_SPI_R1;
  277. cmd.arg = use_crc;
  278. err = mmc_wait_for_cmd(host, &cmd, 0);
  279. if (!err)
  280. host->use_spi_crc = use_crc;
  281. return err;
  282. }
  283. /**
  284. * mmc_switch - modify EXT_CSD register
  285. * @card: the MMC card associated with the data transfer
  286. * @set: cmd set values
  287. * @index: EXT_CSD register index
  288. * @value: value to program into EXT_CSD register
  289. * @timeout_ms: timeout (ms) for operation performed by register write,
  290. * timeout of zero implies maximum possible timeout
  291. *
  292. * Modifies the EXT_CSD register for selected card.
  293. */
  294. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  295. unsigned int timeout_ms)
  296. {
  297. int err;
  298. struct mmc_command cmd = {0};
  299. u32 status;
  300. BUG_ON(!card);
  301. BUG_ON(!card->host);
  302. cmd.opcode = MMC_SWITCH;
  303. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  304. (index << 16) |
  305. (value << 8) |
  306. set;
  307. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  308. cmd.cmd_timeout_ms = timeout_ms;
  309. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  310. if (err)
  311. return err;
  312. /* Must check status to be sure of no errors */
  313. do {
  314. err = mmc_send_status(card, &status);
  315. if (err)
  316. return err;
  317. if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
  318. break;
  319. if (mmc_host_is_spi(card->host))
  320. break;
  321. } while (R1_CURRENT_STATE(status) == 7);
  322. if (mmc_host_is_spi(card->host)) {
  323. if (status & R1_SPI_ILLEGAL_COMMAND)
  324. return -EBADMSG;
  325. } else {
  326. if (status & 0xFDFFA000)
  327. printk(KERN_WARNING "%s: unexpected status %#x after "
  328. "switch", mmc_hostname(card->host), status);
  329. if (status & R1_SWITCH_ERROR)
  330. return -EBADMSG;
  331. }
  332. return 0;
  333. }
  334. EXPORT_SYMBOL_GPL(mmc_switch);
  335. int mmc_send_status(struct mmc_card *card, u32 *status)
  336. {
  337. int err;
  338. struct mmc_command cmd = {0};
  339. BUG_ON(!card);
  340. BUG_ON(!card->host);
  341. cmd.opcode = MMC_SEND_STATUS;
  342. if (!mmc_host_is_spi(card->host))
  343. cmd.arg = card->rca << 16;
  344. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  345. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  346. if (err)
  347. return err;
  348. /* NOTE: callers are required to understand the difference
  349. * between "native" and SPI format status words!
  350. */
  351. if (status)
  352. *status = cmd.resp[0];
  353. return 0;
  354. }
  355. static int
  356. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  357. u8 len)
  358. {
  359. struct mmc_request mrq = {0};
  360. struct mmc_command cmd = {0};
  361. struct mmc_data data = {0};
  362. struct scatterlist sg;
  363. u8 *data_buf;
  364. u8 *test_buf;
  365. int i, err;
  366. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  367. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  368. /* dma onto stack is unsafe/nonportable, but callers to this
  369. * routine normally provide temporary on-stack buffers ...
  370. */
  371. data_buf = kmalloc(len, GFP_KERNEL);
  372. if (!data_buf)
  373. return -ENOMEM;
  374. if (len == 8)
  375. test_buf = testdata_8bit;
  376. else if (len == 4)
  377. test_buf = testdata_4bit;
  378. else {
  379. printk(KERN_ERR "%s: Invalid bus_width %d\n",
  380. mmc_hostname(host), len);
  381. kfree(data_buf);
  382. return -EINVAL;
  383. }
  384. if (opcode == MMC_BUS_TEST_W)
  385. memcpy(data_buf, test_buf, len);
  386. mrq.cmd = &cmd;
  387. mrq.data = &data;
  388. cmd.opcode = opcode;
  389. cmd.arg = 0;
  390. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  391. * rely on callers to never use this with "native" calls for reading
  392. * CSD or CID. Native versions of those commands use the R2 type,
  393. * not R1 plus a data block.
  394. */
  395. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  396. data.blksz = len;
  397. data.blocks = 1;
  398. if (opcode == MMC_BUS_TEST_R)
  399. data.flags = MMC_DATA_READ;
  400. else
  401. data.flags = MMC_DATA_WRITE;
  402. data.sg = &sg;
  403. data.sg_len = 1;
  404. sg_init_one(&sg, data_buf, len);
  405. mmc_wait_for_req(host, &mrq);
  406. err = 0;
  407. if (opcode == MMC_BUS_TEST_R) {
  408. for (i = 0; i < len / 4; i++)
  409. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  410. err = -EIO;
  411. break;
  412. }
  413. }
  414. kfree(data_buf);
  415. if (cmd.error)
  416. return cmd.error;
  417. if (data.error)
  418. return data.error;
  419. return err;
  420. }
  421. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  422. {
  423. int err, width;
  424. if (bus_width == MMC_BUS_WIDTH_8)
  425. width = 8;
  426. else if (bus_width == MMC_BUS_WIDTH_4)
  427. width = 4;
  428. else if (bus_width == MMC_BUS_WIDTH_1)
  429. return 0; /* no need for test */
  430. else
  431. return -EINVAL;
  432. /*
  433. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  434. * is a problem. This improves chances that the test will work.
  435. */
  436. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  437. err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  438. return err;
  439. }