|
@@ -145,7 +145,7 @@ struct mmc_blk_request {
|
|
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
|
|
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
- u32 blocks;
|
|
|
|
|
|
+ __be32 blocks;
|
|
|
|
|
|
struct mmc_request mrq;
|
|
struct mmc_request mrq;
|
|
struct mmc_command cmd;
|
|
struct mmc_command cmd;
|
|
@@ -204,9 +204,24 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
|
|
if (cmd.error || data.error)
|
|
if (cmd.error || data.error)
|
|
return (u32)-1;
|
|
return (u32)-1;
|
|
|
|
|
|
- blocks = ntohl(blocks);
|
|
|
|
|
|
+ return ntohl(blocks);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static u32 get_card_status(struct mmc_card *card, struct request *req)
|
|
|
|
+{
|
|
|
|
+ struct mmc_command cmd;
|
|
|
|
+ int err;
|
|
|
|
|
|
- return blocks;
|
|
|
|
|
|
+ memset(&cmd, 0, sizeof(struct mmc_command));
|
|
|
|
+ cmd.opcode = MMC_SEND_STATUS;
|
|
|
|
+ if (!mmc_host_is_spi(card->host))
|
|
|
|
+ cmd.arg = card->rca << 16;
|
|
|
|
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
|
|
|
|
+ if (err)
|
|
|
|
+ printk(KERN_ERR "%s: error %d sending status comand",
|
|
|
|
+ req->rq_disk->disk_name, err);
|
|
|
|
+ return cmd.resp[0];
|
|
}
|
|
}
|
|
|
|
|
|
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
@@ -214,13 +229,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
struct mmc_blk_data *md = mq->data;
|
|
struct mmc_blk_data *md = mq->data;
|
|
struct mmc_card *card = md->queue.card;
|
|
struct mmc_card *card = md->queue.card;
|
|
struct mmc_blk_request brq;
|
|
struct mmc_blk_request brq;
|
|
- int ret = 1;
|
|
|
|
|
|
+ int ret = 1, disable_multi = 0;
|
|
|
|
|
|
mmc_claim_host(card->host);
|
|
mmc_claim_host(card->host);
|
|
|
|
|
|
do {
|
|
do {
|
|
struct mmc_command cmd;
|
|
struct mmc_command cmd;
|
|
- u32 readcmd, writecmd;
|
|
|
|
|
|
+ u32 readcmd, writecmd, status = 0;
|
|
|
|
|
|
memset(&brq, 0, sizeof(struct mmc_blk_request));
|
|
memset(&brq, 0, sizeof(struct mmc_blk_request));
|
|
brq.mrq.cmd = &brq.cmd;
|
|
brq.mrq.cmd = &brq.cmd;
|
|
@@ -236,6 +251,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
|
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
|
brq.data.blocks = req->nr_sectors;
|
|
brq.data.blocks = req->nr_sectors;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * After a read error, we redo the request one sector at a time
|
|
|
|
+ * in order to accurately determine which sectors can be read
|
|
|
|
+ * successfully.
|
|
|
|
+ */
|
|
|
|
+ if (disable_multi && brq.data.blocks > 1)
|
|
|
|
+ brq.data.blocks = 1;
|
|
|
|
+
|
|
if (brq.data.blocks > 1) {
|
|
if (brq.data.blocks > 1) {
|
|
/* SPI multiblock writes terminate using a special
|
|
/* SPI multiblock writes terminate using a special
|
|
* token, not a STOP_TRANSMISSION request.
|
|
* token, not a STOP_TRANSMISSION request.
|
|
@@ -264,6 +287,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
brq.data.sg = mq->sg;
|
|
brq.data.sg = mq->sg;
|
|
brq.data.sg_len = mmc_queue_map_sg(mq);
|
|
brq.data.sg_len = mmc_queue_map_sg(mq);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Adjust the sg list so it is the same size as the
|
|
|
|
+ * request.
|
|
|
|
+ */
|
|
|
|
+ if (brq.data.blocks != req->nr_sectors) {
|
|
|
|
+ int i, data_size = brq.data.blocks << 9;
|
|
|
|
+ struct scatterlist *sg;
|
|
|
|
+
|
|
|
|
+ for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
|
|
|
|
+ data_size -= sg->length;
|
|
|
|
+ if (data_size <= 0) {
|
|
|
|
+ sg->length += data_size;
|
|
|
|
+ i++;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ brq.data.sg_len = i;
|
|
|
|
+ }
|
|
|
|
+
|
|
mmc_queue_bounce_pre(mq);
|
|
mmc_queue_bounce_pre(mq);
|
|
|
|
|
|
mmc_wait_for_req(card->host, &brq.mrq);
|
|
mmc_wait_for_req(card->host, &brq.mrq);
|
|
@@ -275,19 +317,40 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
* until later as we need to wait for the card to leave
|
|
* until later as we need to wait for the card to leave
|
|
* programming mode even when things go wrong.
|
|
* programming mode even when things go wrong.
|
|
*/
|
|
*/
|
|
|
|
+ if (brq.cmd.error || brq.data.error || brq.stop.error) {
|
|
|
|
+ if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
|
|
|
|
+ /* Redo read one sector at a time */
|
|
|
|
+ printk(KERN_WARNING "%s: retrying using single "
|
|
|
|
+ "block read\n", req->rq_disk->disk_name);
|
|
|
|
+ disable_multi = 1;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ status = get_card_status(card, req);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (brq.cmd.error) {
|
|
if (brq.cmd.error) {
|
|
- printk(KERN_ERR "%s: error %d sending read/write command\n",
|
|
|
|
- req->rq_disk->disk_name, brq.cmd.error);
|
|
|
|
|
|
+ printk(KERN_ERR "%s: error %d sending read/write "
|
|
|
|
+ "command, response %#x, card status %#x\n",
|
|
|
|
+ req->rq_disk->disk_name, brq.cmd.error,
|
|
|
|
+ brq.cmd.resp[0], status);
|
|
}
|
|
}
|
|
|
|
|
|
if (brq.data.error) {
|
|
if (brq.data.error) {
|
|
- printk(KERN_ERR "%s: error %d transferring data\n",
|
|
|
|
- req->rq_disk->disk_name, brq.data.error);
|
|
|
|
|
|
+ if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
|
|
|
|
+ /* 'Stop' response contains card status */
|
|
|
|
+ status = brq.mrq.stop->resp[0];
|
|
|
|
+ printk(KERN_ERR "%s: error %d transferring data,"
|
|
|
|
+ " sector %u, nr %u, card status %#x\n",
|
|
|
|
+ req->rq_disk->disk_name, brq.data.error,
|
|
|
|
+ (unsigned)req->sector,
|
|
|
|
+ (unsigned)req->nr_sectors, status);
|
|
}
|
|
}
|
|
|
|
|
|
if (brq.stop.error) {
|
|
if (brq.stop.error) {
|
|
- printk(KERN_ERR "%s: error %d sending stop command\n",
|
|
|
|
- req->rq_disk->disk_name, brq.stop.error);
|
|
|
|
|
|
+ printk(KERN_ERR "%s: error %d sending stop command, "
|
|
|
|
+ "response %#x, card status %#x\n",
|
|
|
|
+ req->rq_disk->disk_name, brq.stop.error,
|
|
|
|
+ brq.stop.resp[0], status);
|
|
}
|
|
}
|
|
|
|
|
|
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
|
|
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
|
|
@@ -320,8 +383,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
- if (brq.cmd.error || brq.data.error || brq.stop.error)
|
|
|
|
|
|
+ if (brq.cmd.error || brq.stop.error || brq.data.error) {
|
|
|
|
+ if (rq_data_dir(req) == READ) {
|
|
|
|
+ /*
|
|
|
|
+ * After an error, we redo I/O one sector at a
|
|
|
|
+ * time, so we only reach here after trying to
|
|
|
|
+ * read a single sector.
|
|
|
|
+ */
|
|
|
|
+ spin_lock_irq(&md->lock);
|
|
|
|
+ ret = __blk_end_request(req, -EIO, brq.data.blksz);
|
|
|
|
+ spin_unlock_irq(&md->lock);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
goto cmd_err;
|
|
goto cmd_err;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* A block was successfully transferred.
|
|
* A block was successfully transferred.
|
|
@@ -343,25 +418,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|
* If the card is not SD, we can still ok written sectors
|
|
* If the card is not SD, we can still ok written sectors
|
|
* as reported by the controller (which might be less than
|
|
* as reported by the controller (which might be less than
|
|
* the real number of written sectors, but never more).
|
|
* the real number of written sectors, but never more).
|
|
- *
|
|
|
|
- * For reads we just fail the entire chunk as that should
|
|
|
|
- * be safe in all cases.
|
|
|
|
*/
|
|
*/
|
|
- if (rq_data_dir(req) != READ) {
|
|
|
|
- if (mmc_card_sd(card)) {
|
|
|
|
- u32 blocks;
|
|
|
|
|
|
+ if (mmc_card_sd(card)) {
|
|
|
|
+ u32 blocks;
|
|
|
|
|
|
- blocks = mmc_sd_num_wr_blocks(card);
|
|
|
|
- if (blocks != (u32)-1) {
|
|
|
|
- spin_lock_irq(&md->lock);
|
|
|
|
- ret = __blk_end_request(req, 0, blocks << 9);
|
|
|
|
- spin_unlock_irq(&md->lock);
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
|
|
+ blocks = mmc_sd_num_wr_blocks(card);
|
|
|
|
+ if (blocks != (u32)-1) {
|
|
spin_lock_irq(&md->lock);
|
|
spin_lock_irq(&md->lock);
|
|
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
|
|
|
|
|
+ ret = __blk_end_request(req, 0, blocks << 9);
|
|
spin_unlock_irq(&md->lock);
|
|
spin_unlock_irq(&md->lock);
|
|
}
|
|
}
|
|
|
|
+ } else {
|
|
|
|
+ spin_lock_irq(&md->lock);
|
|
|
|
+ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
|
|
|
+ spin_unlock_irq(&md->lock);
|
|
}
|
|
}
|
|
|
|
|
|
mmc_release_host(card->host);
|
|
mmc_release_host(card->host);
|