|
@@ -1030,13 +1030,20 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
|
|
if (brq->data.blocks > card->host->max_blk_count)
|
|
if (brq->data.blocks > card->host->max_blk_count)
|
|
brq->data.blocks = card->host->max_blk_count;
|
|
brq->data.blocks = card->host->max_blk_count;
|
|
|
|
|
|
- /*
|
|
|
|
- * After a read error, we redo the request one sector at a time
|
|
|
|
- * in order to accurately determine which sectors can be read
|
|
|
|
- * successfully.
|
|
|
|
- */
|
|
|
|
- if (disable_multi && brq->data.blocks > 1)
|
|
|
|
- brq->data.blocks = 1;
|
|
|
|
|
|
+ if (brq->data.blocks > 1) {
|
|
|
|
+ /*
|
|
|
|
+ * After a read error, we redo the request one sector
|
|
|
|
+ * at a time in order to accurately determine which
|
|
|
|
+ * sectors can be read successfully.
|
|
|
|
+ */
|
|
|
|
+ if (disable_multi)
|
|
|
|
+ brq->data.blocks = 1;
|
|
|
|
+
|
|
|
|
+ /* Some controllers can't do multiblock reads due to hw bugs */
|
|
|
|
+ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
|
|
|
|
+ rq_data_dir(req) == READ)
|
|
|
|
+ brq->data.blocks = 1;
|
|
|
|
+ }
|
|
|
|
|
|
if (brq->data.blocks > 1 || do_rel_wr) {
|
|
if (brq->data.blocks > 1 || do_rel_wr) {
|
|
/* SPI multiblock writes terminate using a special
|
|
/* SPI multiblock writes terminate using a special
|