block.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. /*
  2. * Block driver for media (i.e., flash cards)
  3. *
  4. * Copyright 2002 Hewlett-Packard Company
  5. * Copyright 2005-2008 Pierre Ossman
  6. *
  7. * Use consistent with the GNU GPL is permitted,
  8. * provided that this copyright notice is
  9. * preserved in its entirety in all copies and derived works.
  10. *
  11. * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12. * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13. * FITNESS FOR ANY PARTICULAR PURPOSE.
  14. *
  15. * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16. *
  17. * Author: Andrew Christian
  18. * 28 May 2002
  19. */
  20. #include <linux/moduleparam.h>
  21. #include <linux/module.h>
  22. #include <linux/init.h>
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/slab.h>
  26. #include <linux/errno.h>
  27. #include <linux/hdreg.h>
  28. #include <linux/kdev_t.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/mutex.h>
  31. #include <linux/scatterlist.h>
  32. #include <linux/string_helpers.h>
  33. #include <linux/mmc/card.h>
  34. #include <linux/mmc/host.h>
  35. #include <linux/mmc/mmc.h>
  36. #include <linux/mmc/sd.h>
  37. #include <asm/system.h>
  38. #include <asm/uaccess.h>
  39. #include "queue.h"
  40. MODULE_ALIAS("mmc:block");
  41. #ifdef MODULE_PARAM_PREFIX
  42. #undef MODULE_PARAM_PREFIX
  43. #endif
  44. #define MODULE_PARAM_PREFIX "mmcblk."
  45. #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
  46. (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
  47. ((card)->ext_csd.rel_sectors)))
  48. static DEFINE_MUTEX(block_mutex);
  49. /*
  50. * The defaults come from config options but can be overriden by module
  51. * or bootarg options.
  52. */
  53. static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  54. /*
  55. * We've only got one major, so number of mmcblk devices is
  56. * limited to 256 / number of minors per device.
  57. */
  58. static int max_devices;
  59. /* 256 minors, so at most 256 separate devices */
  60. static DECLARE_BITMAP(dev_use, 256);
  61. /*
  62. * There is one mmc_blk_data per slot.
  63. */
  64. struct mmc_blk_data {
  65. spinlock_t lock;
  66. struct gendisk *disk;
  67. struct mmc_queue queue;
  68. unsigned int usage;
  69. unsigned int read_only;
  70. };
  71. static DEFINE_MUTEX(open_lock);
  72. module_param(perdev_minors, int, 0444);
  73. MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
  74. static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
  75. {
  76. struct mmc_blk_data *md;
  77. mutex_lock(&open_lock);
  78. md = disk->private_data;
  79. if (md && md->usage == 0)
  80. md = NULL;
  81. if (md)
  82. md->usage++;
  83. mutex_unlock(&open_lock);
  84. return md;
  85. }
  86. static void mmc_blk_put(struct mmc_blk_data *md)
  87. {
  88. mutex_lock(&open_lock);
  89. md->usage--;
  90. if (md->usage == 0) {
  91. int devmaj = MAJOR(disk_devt(md->disk));
  92. int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
  93. if (!devmaj)
  94. devidx = md->disk->first_minor / perdev_minors;
  95. blk_cleanup_queue(md->queue.queue);
  96. __clear_bit(devidx, dev_use);
  97. put_disk(md->disk);
  98. kfree(md);
  99. }
  100. mutex_unlock(&open_lock);
  101. }
  102. static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
  103. {
  104. struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
  105. int ret = -ENXIO;
  106. mutex_lock(&block_mutex);
  107. if (md) {
  108. if (md->usage == 2)
  109. check_disk_change(bdev);
  110. ret = 0;
  111. if ((mode & FMODE_WRITE) && md->read_only) {
  112. mmc_blk_put(md);
  113. ret = -EROFS;
  114. }
  115. }
  116. mutex_unlock(&block_mutex);
  117. return ret;
  118. }
  119. static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
  120. {
  121. struct mmc_blk_data *md = disk->private_data;
  122. mutex_lock(&block_mutex);
  123. mmc_blk_put(md);
  124. mutex_unlock(&block_mutex);
  125. return 0;
  126. }
  127. static int
  128. mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  129. {
  130. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  131. geo->heads = 4;
  132. geo->sectors = 16;
  133. return 0;
  134. }
  135. static const struct block_device_operations mmc_bdops = {
  136. .open = mmc_blk_open,
  137. .release = mmc_blk_release,
  138. .getgeo = mmc_blk_getgeo,
  139. .owner = THIS_MODULE,
  140. };
  141. struct mmc_blk_request {
  142. struct mmc_request mrq;
  143. struct mmc_command cmd;
  144. struct mmc_command stop;
  145. struct mmc_data data;
  146. };
  147. static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
  148. {
  149. int err;
  150. u32 result;
  151. __be32 *blocks;
  152. struct mmc_request mrq;
  153. struct mmc_command cmd;
  154. struct mmc_data data;
  155. unsigned int timeout_us;
  156. struct scatterlist sg;
  157. memset(&cmd, 0, sizeof(struct mmc_command));
  158. cmd.opcode = MMC_APP_CMD;
  159. cmd.arg = card->rca << 16;
  160. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  161. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  162. if (err)
  163. return (u32)-1;
  164. if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  165. return (u32)-1;
  166. memset(&cmd, 0, sizeof(struct mmc_command));
  167. cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
  168. cmd.arg = 0;
  169. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  170. memset(&data, 0, sizeof(struct mmc_data));
  171. data.timeout_ns = card->csd.tacc_ns * 100;
  172. data.timeout_clks = card->csd.tacc_clks * 100;
  173. timeout_us = data.timeout_ns / 1000;
  174. timeout_us += data.timeout_clks * 1000 /
  175. (card->host->ios.clock / 1000);
  176. if (timeout_us > 100000) {
  177. data.timeout_ns = 100000000;
  178. data.timeout_clks = 0;
  179. }
  180. data.blksz = 4;
  181. data.blocks = 1;
  182. data.flags = MMC_DATA_READ;
  183. data.sg = &sg;
  184. data.sg_len = 1;
  185. memset(&mrq, 0, sizeof(struct mmc_request));
  186. mrq.cmd = &cmd;
  187. mrq.data = &data;
  188. blocks = kmalloc(4, GFP_KERNEL);
  189. if (!blocks)
  190. return (u32)-1;
  191. sg_init_one(&sg, blocks, 4);
  192. mmc_wait_for_req(card->host, &mrq);
  193. result = ntohl(*blocks);
  194. kfree(blocks);
  195. if (cmd.error || data.error)
  196. result = (u32)-1;
  197. return result;
  198. }
  199. static u32 get_card_status(struct mmc_card *card, struct request *req)
  200. {
  201. struct mmc_command cmd;
  202. int err;
  203. memset(&cmd, 0, sizeof(struct mmc_command));
  204. cmd.opcode = MMC_SEND_STATUS;
  205. if (!mmc_host_is_spi(card->host))
  206. cmd.arg = card->rca << 16;
  207. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  208. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  209. if (err)
  210. printk(KERN_ERR "%s: error %d sending status command",
  211. req->rq_disk->disk_name, err);
  212. return cmd.resp[0];
  213. }
  214. static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
  215. {
  216. struct mmc_blk_data *md = mq->data;
  217. struct mmc_card *card = md->queue.card;
  218. unsigned int from, nr, arg;
  219. int err = 0;
  220. if (!mmc_can_erase(card)) {
  221. err = -EOPNOTSUPP;
  222. goto out;
  223. }
  224. from = blk_rq_pos(req);
  225. nr = blk_rq_sectors(req);
  226. if (mmc_can_trim(card))
  227. arg = MMC_TRIM_ARG;
  228. else
  229. arg = MMC_ERASE_ARG;
  230. err = mmc_erase(card, from, nr, arg);
  231. out:
  232. spin_lock_irq(&md->lock);
  233. __blk_end_request(req, err, blk_rq_bytes(req));
  234. spin_unlock_irq(&md->lock);
  235. return err ? 0 : 1;
  236. }
  237. static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
  238. struct request *req)
  239. {
  240. struct mmc_blk_data *md = mq->data;
  241. struct mmc_card *card = md->queue.card;
  242. unsigned int from, nr, arg;
  243. int err = 0;
  244. if (!mmc_can_secure_erase_trim(card)) {
  245. err = -EOPNOTSUPP;
  246. goto out;
  247. }
  248. from = blk_rq_pos(req);
  249. nr = blk_rq_sectors(req);
  250. if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
  251. arg = MMC_SECURE_TRIM1_ARG;
  252. else
  253. arg = MMC_SECURE_ERASE_ARG;
  254. err = mmc_erase(card, from, nr, arg);
  255. if (!err && arg == MMC_SECURE_TRIM1_ARG)
  256. err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
  257. out:
  258. spin_lock_irq(&md->lock);
  259. __blk_end_request(req, err, blk_rq_bytes(req));
  260. spin_unlock_irq(&md->lock);
  261. return err ? 0 : 1;
  262. }
  263. static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
  264. {
  265. struct mmc_blk_data *md = mq->data;
  266. /*
  267. * No-op, only service this because we need REQ_FUA for reliable
  268. * writes.
  269. */
  270. spin_lock_irq(&md->lock);
  271. __blk_end_request_all(req, 0);
  272. spin_unlock_irq(&md->lock);
  273. return 1;
  274. }
  275. /*
  276. * Reformat current write as a reliable write, supporting
  277. * both legacy and the enhanced reliable write MMC cards.
  278. * In each transfer we'll handle only as much as a single
  279. * reliable write can handle, thus finish the request in
  280. * partial completions.
  281. */
  282. static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
  283. struct mmc_card *card,
  284. struct request *req)
  285. {
  286. int err;
  287. struct mmc_command set_count;
  288. if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
  289. /* Legacy mode imposes restrictions on transfers. */
  290. if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
  291. brq->data.blocks = 1;
  292. if (brq->data.blocks > card->ext_csd.rel_sectors)
  293. brq->data.blocks = card->ext_csd.rel_sectors;
  294. else if (brq->data.blocks < card->ext_csd.rel_sectors)
  295. brq->data.blocks = 1;
  296. }
  297. memset(&set_count, 0, sizeof(struct mmc_command));
  298. set_count.opcode = MMC_SET_BLOCK_COUNT;
  299. set_count.arg = brq->data.blocks | (1 << 31);
  300. set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
  301. err = mmc_wait_for_cmd(card->host, &set_count, 0);
  302. if (err)
  303. printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
  304. req->rq_disk->disk_name, err);
  305. return err;
  306. }
  307. static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
  308. {
  309. struct mmc_blk_data *md = mq->data;
  310. struct mmc_card *card = md->queue.card;
  311. struct mmc_blk_request brq;
  312. int ret = 1, disable_multi = 0;
  313. /*
  314. * Reliable writes are used to implement Forced Unit Access and
  315. * REQ_META accesses, and are supported only on MMCs.
  316. */
  317. bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
  318. (req->cmd_flags & REQ_META)) &&
  319. (rq_data_dir(req) == WRITE) &&
  320. REL_WRITES_SUPPORTED(card);
  321. do {
  322. struct mmc_command cmd;
  323. u32 readcmd, writecmd, status = 0;
  324. memset(&brq, 0, sizeof(struct mmc_blk_request));
  325. brq.mrq.cmd = &brq.cmd;
  326. brq.mrq.data = &brq.data;
  327. brq.cmd.arg = blk_rq_pos(req);
  328. if (!mmc_card_blockaddr(card))
  329. brq.cmd.arg <<= 9;
  330. brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  331. brq.data.blksz = 512;
  332. brq.stop.opcode = MMC_STOP_TRANSMISSION;
  333. brq.stop.arg = 0;
  334. brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  335. brq.data.blocks = blk_rq_sectors(req);
  336. /*
  337. * The block layer doesn't support all sector count
  338. * restrictions, so we need to be prepared for too big
  339. * requests.
  340. */
  341. if (brq.data.blocks > card->host->max_blk_count)
  342. brq.data.blocks = card->host->max_blk_count;
  343. /*
  344. * After a read error, we redo the request one sector at a time
  345. * in order to accurately determine which sectors can be read
  346. * successfully.
  347. */
  348. if (disable_multi && brq.data.blocks > 1)
  349. brq.data.blocks = 1;
  350. if (brq.data.blocks > 1 || do_rel_wr) {
  351. /* SPI multiblock writes terminate using a special
  352. * token, not a STOP_TRANSMISSION request. Reliable
  353. * writes use SET_BLOCK_COUNT and do not use a
  354. * STOP_TRANSMISSION request either.
  355. */
  356. if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
  357. rq_data_dir(req) == READ)
  358. brq.mrq.stop = &brq.stop;
  359. readcmd = MMC_READ_MULTIPLE_BLOCK;
  360. writecmd = MMC_WRITE_MULTIPLE_BLOCK;
  361. } else {
  362. brq.mrq.stop = NULL;
  363. readcmd = MMC_READ_SINGLE_BLOCK;
  364. writecmd = MMC_WRITE_BLOCK;
  365. }
  366. if (rq_data_dir(req) == READ) {
  367. brq.cmd.opcode = readcmd;
  368. brq.data.flags |= MMC_DATA_READ;
  369. } else {
  370. brq.cmd.opcode = writecmd;
  371. brq.data.flags |= MMC_DATA_WRITE;
  372. }
  373. if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
  374. goto cmd_err;
  375. mmc_set_data_timeout(&brq.data, card);
  376. brq.data.sg = mq->sg;
  377. brq.data.sg_len = mmc_queue_map_sg(mq);
  378. /*
  379. * Adjust the sg list so it is the same size as the
  380. * request.
  381. */
  382. if (brq.data.blocks != blk_rq_sectors(req)) {
  383. int i, data_size = brq.data.blocks << 9;
  384. struct scatterlist *sg;
  385. for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
  386. data_size -= sg->length;
  387. if (data_size <= 0) {
  388. sg->length += data_size;
  389. i++;
  390. break;
  391. }
  392. }
  393. brq.data.sg_len = i;
  394. }
  395. mmc_queue_bounce_pre(mq);
  396. mmc_wait_for_req(card->host, &brq.mrq);
  397. mmc_queue_bounce_post(mq);
  398. /*
  399. * Check for errors here, but don't jump to cmd_err
  400. * until later as we need to wait for the card to leave
  401. * programming mode even when things go wrong.
  402. */
  403. if (brq.cmd.error || brq.data.error || brq.stop.error) {
  404. if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
  405. /* Redo read one sector at a time */
  406. printk(KERN_WARNING "%s: retrying using single "
  407. "block read\n", req->rq_disk->disk_name);
  408. disable_multi = 1;
  409. continue;
  410. }
  411. status = get_card_status(card, req);
  412. }
  413. if (brq.cmd.error) {
  414. printk(KERN_ERR "%s: error %d sending read/write "
  415. "command, response %#x, card status %#x\n",
  416. req->rq_disk->disk_name, brq.cmd.error,
  417. brq.cmd.resp[0], status);
  418. }
  419. if (brq.data.error) {
  420. if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
  421. /* 'Stop' response contains card status */
  422. status = brq.mrq.stop->resp[0];
  423. printk(KERN_ERR "%s: error %d transferring data,"
  424. " sector %u, nr %u, card status %#x\n",
  425. req->rq_disk->disk_name, brq.data.error,
  426. (unsigned)blk_rq_pos(req),
  427. (unsigned)blk_rq_sectors(req), status);
  428. }
  429. if (brq.stop.error) {
  430. printk(KERN_ERR "%s: error %d sending stop command, "
  431. "response %#x, card status %#x\n",
  432. req->rq_disk->disk_name, brq.stop.error,
  433. brq.stop.resp[0], status);
  434. }
  435. if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
  436. do {
  437. int err;
  438. cmd.opcode = MMC_SEND_STATUS;
  439. cmd.arg = card->rca << 16;
  440. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  441. err = mmc_wait_for_cmd(card->host, &cmd, 5);
  442. if (err) {
  443. printk(KERN_ERR "%s: error %d requesting status\n",
  444. req->rq_disk->disk_name, err);
  445. goto cmd_err;
  446. }
  447. /*
  448. * Some cards mishandle the status bits,
  449. * so make sure to check both the busy
  450. * indication and the card state.
  451. */
  452. } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
  453. (R1_CURRENT_STATE(cmd.resp[0]) == 7));
  454. #if 0
  455. if (cmd.resp[0] & ~0x00000900)
  456. printk(KERN_ERR "%s: status = %08x\n",
  457. req->rq_disk->disk_name, cmd.resp[0]);
  458. if (mmc_decode_status(cmd.resp))
  459. goto cmd_err;
  460. #endif
  461. }
  462. if (brq.cmd.error || brq.stop.error || brq.data.error) {
  463. if (rq_data_dir(req) == READ) {
  464. /*
  465. * After an error, we redo I/O one sector at a
  466. * time, so we only reach here after trying to
  467. * read a single sector.
  468. */
  469. spin_lock_irq(&md->lock);
  470. ret = __blk_end_request(req, -EIO, brq.data.blksz);
  471. spin_unlock_irq(&md->lock);
  472. continue;
  473. }
  474. goto cmd_err;
  475. }
  476. /*
  477. * A block was successfully transferred.
  478. */
  479. spin_lock_irq(&md->lock);
  480. ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
  481. spin_unlock_irq(&md->lock);
  482. } while (ret);
  483. return 1;
  484. cmd_err:
  485. /*
  486. * If this is an SD card and we're writing, we can first
  487. * mark the known good sectors as ok.
  488. *
  489. * If the card is not SD, we can still ok written sectors
  490. * as reported by the controller (which might be less than
  491. * the real number of written sectors, but never more).
  492. */
  493. if (mmc_card_sd(card)) {
  494. u32 blocks;
  495. blocks = mmc_sd_num_wr_blocks(card);
  496. if (blocks != (u32)-1) {
  497. spin_lock_irq(&md->lock);
  498. ret = __blk_end_request(req, 0, blocks << 9);
  499. spin_unlock_irq(&md->lock);
  500. }
  501. } else {
  502. spin_lock_irq(&md->lock);
  503. ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
  504. spin_unlock_irq(&md->lock);
  505. }
  506. spin_lock_irq(&md->lock);
  507. while (ret)
  508. ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
  509. spin_unlock_irq(&md->lock);
  510. return 0;
  511. }
  512. static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
  513. {
  514. int ret;
  515. struct mmc_blk_data *md = mq->data;
  516. struct mmc_card *card = md->queue.card;
  517. mmc_claim_host(card->host);
  518. if (req->cmd_flags & REQ_DISCARD) {
  519. if (req->cmd_flags & REQ_SECURE)
  520. ret = mmc_blk_issue_secdiscard_rq(mq, req);
  521. else
  522. ret = mmc_blk_issue_discard_rq(mq, req);
  523. } else if (req->cmd_flags & REQ_FLUSH) {
  524. ret = mmc_blk_issue_flush(mq, req);
  525. } else {
  526. ret = mmc_blk_issue_rw_rq(mq, req);
  527. }
  528. mmc_release_host(card->host);
  529. return ret;
  530. }
  531. static inline int mmc_blk_readonly(struct mmc_card *card)
  532. {
  533. return mmc_card_readonly(card) ||
  534. !(card->csd.cmdclass & CCC_BLOCK_WRITE);
  535. }
  536. static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
  537. {
  538. struct mmc_blk_data *md;
  539. int devidx, ret;
  540. devidx = find_first_zero_bit(dev_use, max_devices);
  541. if (devidx >= max_devices)
  542. return ERR_PTR(-ENOSPC);
  543. __set_bit(devidx, dev_use);
  544. md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
  545. if (!md) {
  546. ret = -ENOMEM;
  547. goto out;
  548. }
  549. /*
  550. * Set the read-only status based on the supported commands
  551. * and the write protect switch.
  552. */
  553. md->read_only = mmc_blk_readonly(card);
  554. md->disk = alloc_disk(perdev_minors);
  555. if (md->disk == NULL) {
  556. ret = -ENOMEM;
  557. goto err_kfree;
  558. }
  559. spin_lock_init(&md->lock);
  560. md->usage = 1;
  561. ret = mmc_init_queue(&md->queue, card, &md->lock);
  562. if (ret)
  563. goto err_putdisk;
  564. md->queue.issue_fn = mmc_blk_issue_rq;
  565. md->queue.data = md;
  566. md->disk->major = MMC_BLOCK_MAJOR;
  567. md->disk->first_minor = devidx * perdev_minors;
  568. md->disk->fops = &mmc_bdops;
  569. md->disk->private_data = md;
  570. md->disk->queue = md->queue.queue;
  571. md->disk->driverfs_dev = &card->dev;
  572. set_disk_ro(md->disk, md->read_only);
  573. if (REL_WRITES_SUPPORTED(card))
  574. blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
  575. /*
  576. * As discussed on lkml, GENHD_FL_REMOVABLE should:
  577. *
  578. * - be set for removable media with permanent block devices
  579. * - be unset for removable block devices with permanent media
  580. *
  581. * Since MMC block devices clearly fall under the second
  582. * case, we do not set GENHD_FL_REMOVABLE. Userspace
  583. * should use the block device creation/destruction hotplug
  584. * messages to tell when the card is present.
  585. */
  586. snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
  587. "mmcblk%d", devidx);
  588. blk_queue_logical_block_size(md->queue.queue, 512);
  589. if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
  590. /*
  591. * The EXT_CSD sector count is in number or 512 byte
  592. * sectors.
  593. */
  594. set_capacity(md->disk, card->ext_csd.sectors);
  595. } else {
  596. /*
  597. * The CSD capacity field is in units of read_blkbits.
  598. * set_capacity takes units of 512 bytes.
  599. */
  600. set_capacity(md->disk,
  601. card->csd.capacity << (card->csd.read_blkbits - 9));
  602. }
  603. return md;
  604. err_putdisk:
  605. put_disk(md->disk);
  606. err_kfree:
  607. kfree(md);
  608. out:
  609. return ERR_PTR(ret);
  610. }
  611. static int
  612. mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
  613. {
  614. int err;
  615. mmc_claim_host(card->host);
  616. err = mmc_set_blocklen(card, 512);
  617. mmc_release_host(card->host);
  618. if (err) {
  619. printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
  620. md->disk->disk_name, err);
  621. return -EINVAL;
  622. }
  623. return 0;
  624. }
  625. static int mmc_blk_probe(struct mmc_card *card)
  626. {
  627. struct mmc_blk_data *md;
  628. int err;
  629. char cap_str[10];
  630. /*
  631. * Check that the card supports the command class(es) we need.
  632. */
  633. if (!(card->csd.cmdclass & CCC_BLOCK_READ))
  634. return -ENODEV;
  635. md = mmc_blk_alloc(card);
  636. if (IS_ERR(md))
  637. return PTR_ERR(md);
  638. err = mmc_blk_set_blksize(md, card);
  639. if (err)
  640. goto out;
  641. string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
  642. cap_str, sizeof(cap_str));
  643. printk(KERN_INFO "%s: %s %s %s %s\n",
  644. md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
  645. cap_str, md->read_only ? "(ro)" : "");
  646. mmc_set_drvdata(card, md);
  647. add_disk(md->disk);
  648. return 0;
  649. out:
  650. mmc_cleanup_queue(&md->queue);
  651. mmc_blk_put(md);
  652. return err;
  653. }
  654. static void mmc_blk_remove(struct mmc_card *card)
  655. {
  656. struct mmc_blk_data *md = mmc_get_drvdata(card);
  657. if (md) {
  658. /* Stop new requests from getting into the queue */
  659. del_gendisk(md->disk);
  660. /* Then flush out any already in there */
  661. mmc_cleanup_queue(&md->queue);
  662. mmc_blk_put(md);
  663. }
  664. mmc_set_drvdata(card, NULL);
  665. }
  666. #ifdef CONFIG_PM
  667. static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
  668. {
  669. struct mmc_blk_data *md = mmc_get_drvdata(card);
  670. if (md) {
  671. mmc_queue_suspend(&md->queue);
  672. }
  673. return 0;
  674. }
  675. static int mmc_blk_resume(struct mmc_card *card)
  676. {
  677. struct mmc_blk_data *md = mmc_get_drvdata(card);
  678. if (md) {
  679. mmc_blk_set_blksize(md, card);
  680. mmc_queue_resume(&md->queue);
  681. }
  682. return 0;
  683. }
  684. #else
  685. #define mmc_blk_suspend NULL
  686. #define mmc_blk_resume NULL
  687. #endif
  688. static struct mmc_driver mmc_driver = {
  689. .drv = {
  690. .name = "mmcblk",
  691. },
  692. .probe = mmc_blk_probe,
  693. .remove = mmc_blk_remove,
  694. .suspend = mmc_blk_suspend,
  695. .resume = mmc_blk_resume,
  696. };
  697. static int __init mmc_blk_init(void)
  698. {
  699. int res;
  700. if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
  701. pr_info("mmcblk: using %d minors per device\n", perdev_minors);
  702. max_devices = 256 / perdev_minors;
  703. res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
  704. if (res)
  705. goto out;
  706. res = mmc_register_driver(&mmc_driver);
  707. if (res)
  708. goto out2;
  709. return 0;
  710. out2:
  711. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  712. out:
  713. return res;
  714. }
  715. static void __exit mmc_blk_exit(void)
  716. {
  717. mmc_unregister_driver(&mmc_driver);
  718. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  719. }
  720. module_init(mmc_blk_init);
  721. module_exit(mmc_blk_exit);
  722. MODULE_LICENSE("GPL");
  723. MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");