|
@@ -43,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
void __user *ubuf, unsigned int len)
|
|
|
{
|
|
|
unsigned long uaddr;
|
|
|
+ unsigned int alignment;
|
|
|
struct bio *bio, *orig_bio;
|
|
|
int reading, ret;
|
|
|
|
|
@@ -53,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
* direct dma. else, set up kernel bounce buffers
|
|
|
*/
|
|
|
uaddr = (unsigned long) ubuf;
|
|
|
- if (!(uaddr & queue_dma_alignment(q)) &&
|
|
|
- !(len & queue_dma_alignment(q)))
|
|
|
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
|
|
+ if (!(uaddr & alignment) && !(len & alignment))
|
|
|
bio = bio_map_user(q, NULL, uaddr, len, reading);
|
|
|
else
|
|
|
bio = bio_copy_user(q, uaddr, len, reading);
|
|
@@ -141,15 +142,20 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
|
|
|
/*
|
|
|
* __blk_rq_map_user() copies the buffers if starting address
|
|
|
- * or length isn't aligned. As the copied buffer is always
|
|
|
- * page aligned, we know that there's enough room for padding.
|
|
|
- * Extend the last bio and update rq->data_len accordingly.
|
|
|
+ * or length isn't aligned to dma_pad_mask. As the copied
|
|
|
+ * buffer is always page aligned, we know that there's enough
|
|
|
+ * room for padding. Extend the last bio and update
|
|
|
+ * rq->data_len accordingly.
|
|
|
*
|
|
|
* On unmap, bio_uncopy_user() will use unmodified
|
|
|
* bio_map_data pointed to by bio->bi_private.
|
|
|
*/
|
|
|
- if (len & queue_dma_alignment(q)) {
|
|
|
- unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
|
|
|
+ if (len & q->dma_pad_mask) {
|
|
|
+ unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
|
|
|
+ struct bio *bio = rq->biotail;
|
|
|
+
|
|
|
+ bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
|
|
|
+ bio->bi_size += pad_len;
|
|
|
|
|
|
rq->extra_len += pad_len;
|
|
|
}
|