|
@@ -153,6 +153,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
u64 limit = BLK_BOUNCE_HIGH;
|
|
|
int ret;
|
|
|
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
|
|
|
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
|
|
|
|
|
|
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
|
|
limit = *mmc_dev(host)->dma_mask;
|
|
@@ -163,7 +164,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
return -ENOMEM;
|
|
|
|
|
|
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
|
|
|
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
|
|
|
mq->mqrq_cur = mqrq_cur;
|
|
|
+ mq->mqrq_prev = mqrq_prev;
|
|
|
mq->queue->queuedata = mq;
|
|
|
|
|
|
blk_queue_prep_rq(mq->queue, mmc_prep_request);
|
|
@@ -191,9 +194,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
"allocate bounce cur buffer\n",
|
|
|
mmc_card_name(card));
|
|
|
}
|
|
|
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
|
|
+ if (!mqrq_prev->bounce_buf) {
|
|
|
+ printk(KERN_WARNING "%s: unable to "
|
|
|
+ "allocate bounce prev buffer\n",
|
|
|
+ mmc_card_name(card));
|
|
|
+ kfree(mqrq_cur->bounce_buf);
|
|
|
+ mqrq_cur->bounce_buf = NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- if (mqrq_cur->bounce_buf) {
|
|
|
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
|
|
|
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
|
|
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
|
|
|
blk_queue_max_segments(mq->queue, bouncesz / 512);
|
|
@@ -208,11 +219,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
if (ret)
|
|
|
goto cleanup_queue;
|
|
|
|
|
|
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
|
|
|
+ if (ret)
|
|
|
+ goto cleanup_queue;
|
|
|
+
|
|
|
+ mqrq_prev->bounce_sg =
|
|
|
+ mmc_alloc_sg(bouncesz / 512, &ret);
|
|
|
+ if (ret)
|
|
|
+ goto cleanup_queue;
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- if (!mqrq_cur->bounce_buf) {
|
|
|
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
|
|
|
blk_queue_bounce_limit(mq->queue, limit);
|
|
|
blk_queue_max_hw_sectors(mq->queue,
|
|
|
min(host->max_blk_count, host->max_req_size / 512));
|
|
@@ -223,6 +242,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
if (ret)
|
|
|
goto cleanup_queue;
|
|
|
|
|
|
+
|
|
|
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
|
|
|
+ if (ret)
|
|
|
+ goto cleanup_queue;
|
|
|
}
|
|
|
|
|
|
sema_init(&mq->thread_sem, 1);
|
|
@@ -239,6 +262,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
free_bounce_sg:
|
|
|
kfree(mqrq_cur->bounce_sg);
|
|
|
mqrq_cur->bounce_sg = NULL;
|
|
|
+ kfree(mqrq_prev->bounce_sg);
|
|
|
+ mqrq_prev->bounce_sg = NULL;
|
|
|
|
|
|
cleanup_queue:
|
|
|
kfree(mqrq_cur->sg);
|
|
@@ -246,6 +271,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|
|
kfree(mqrq_cur->bounce_buf);
|
|
|
mqrq_cur->bounce_buf = NULL;
|
|
|
|
|
|
+ kfree(mqrq_prev->sg);
|
|
|
+ mqrq_prev->sg = NULL;
|
|
|
+ kfree(mqrq_prev->bounce_buf);
|
|
|
+ mqrq_prev->bounce_buf = NULL;
|
|
|
+
|
|
|
blk_cleanup_queue(mq->queue);
|
|
|
return ret;
|
|
|
}
|
|
@@ -255,6 +285,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|
|
struct request_queue *q = mq->queue;
|
|
|
unsigned long flags;
|
|
|
struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
|
|
|
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
|
|
|
|
|
|
/* Make sure the queue isn't suspended, as that will deadlock */
|
|
|
mmc_queue_resume(mq);
|
|
@@ -277,6 +308,15 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|
|
kfree(mqrq_cur->bounce_buf);
|
|
|
mqrq_cur->bounce_buf = NULL;
|
|
|
|
|
|
+ kfree(mqrq_prev->bounce_sg);
|
|
|
+ mqrq_prev->bounce_sg = NULL;
|
|
|
+
|
|
|
+ kfree(mqrq_prev->sg);
|
|
|
+ mqrq_prev->sg = NULL;
|
|
|
+
|
|
|
+ kfree(mqrq_prev->bounce_buf);
|
|
|
+ mqrq_prev->bounce_buf = NULL;
|
|
|
+
|
|
|
mq->card = NULL;
|
|
|
}
|
|
|
EXPORT_SYMBOL(mmc_cleanup_queue);
|