blk-map.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Functions related to mapping data to requests
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <scsi/sg.h> /* for struct sg_iovec */
  9. #include "blk.h"
  10. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  11. struct bio *bio)
  12. {
  13. if (!rq->bio)
  14. blk_rq_bio_prep(q, rq, bio);
  15. else if (!ll_back_merge_fn(q, rq, bio))
  16. return -EINVAL;
  17. else {
  18. rq->biotail->bi_next = bio;
  19. rq->biotail = bio;
  20. rq->data_len += bio->bi_size;
  21. }
  22. return 0;
  23. }
  24. EXPORT_SYMBOL(blk_rq_append_bio);
  25. static int __blk_rq_unmap_user(struct bio *bio)
  26. {
  27. int ret = 0;
  28. if (bio) {
  29. if (bio_flagged(bio, BIO_USER_MAPPED))
  30. bio_unmap_user(bio);
  31. else
  32. ret = bio_uncopy_user(bio);
  33. }
  34. return ret;
  35. }
  36. static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  37. struct rq_map_data *map_data, void __user *ubuf,
  38. unsigned int len, gfp_t gfp_mask)
  39. {
  40. unsigned long uaddr;
  41. struct bio *bio, *orig_bio;
  42. int reading, ret;
  43. reading = rq_data_dir(rq) == READ;
  44. /*
  45. * if alignment requirement is satisfied, map in user pages for
  46. * direct dma. else, set up kernel bounce buffers
  47. */
  48. uaddr = (unsigned long) ubuf;
  49. if (blk_rq_aligned(q, ubuf, len) && !map_data)
  50. bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
  51. else
  52. bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
  53. if (IS_ERR(bio))
  54. return PTR_ERR(bio);
  55. if (map_data && map_data->null_mapped)
  56. bio->bi_flags |= (1 << BIO_NULL_MAPPED);
  57. orig_bio = bio;
  58. blk_queue_bounce(q, &bio);
  59. /*
  60. * We link the bounce buffer in and could have to traverse it
  61. * later so we have to get a ref to prevent it from being freed
  62. */
  63. bio_get(bio);
  64. ret = blk_rq_append_bio(q, rq, bio);
  65. if (!ret)
  66. return bio->bi_size;
  67. /* if it was boucned we must call the end io function */
  68. bio_endio(bio, 0);
  69. __blk_rq_unmap_user(orig_bio);
  70. bio_put(bio);
  71. return ret;
  72. }
  73. /**
  74. * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  75. * @q: request queue where request should be inserted
  76. * @rq: request structure to fill
  77. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  78. * @ubuf: the user buffer
  79. * @len: length of user data
  80. * @gfp_mask: memory allocation flags
  81. *
  82. * Description:
  83. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  84. * a kernel bounce buffer is used.
  85. *
  86. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  87. * still in process context.
  88. *
  89. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  90. * before being submitted to the device, as pages mapped may be out of
  91. * reach. It's the callers responsibility to make sure this happens. The
  92. * original bio must be passed back in to blk_rq_unmap_user() for proper
  93. * unmapping.
  94. */
  95. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  96. struct rq_map_data *map_data, void __user *ubuf,
  97. unsigned long len, gfp_t gfp_mask)
  98. {
  99. unsigned long bytes_read = 0;
  100. struct bio *bio = NULL;
  101. int ret;
  102. if (len > (q->max_hw_sectors << 9))
  103. return -EINVAL;
  104. if (!len)
  105. return -EINVAL;
  106. if (!ubuf && (!map_data || !map_data->null_mapped))
  107. return -EINVAL;
  108. while (bytes_read != len) {
  109. unsigned long map_len, end, start;
  110. map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
  111. end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
  112. >> PAGE_SHIFT;
  113. start = (unsigned long)ubuf >> PAGE_SHIFT;
  114. /*
  115. * A bad offset could cause us to require BIO_MAX_PAGES + 1
  116. * pages. If this happens we just lower the requested
  117. * mapping len by a page so that we can fit
  118. */
  119. if (end - start > BIO_MAX_PAGES)
  120. map_len -= PAGE_SIZE;
  121. ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
  122. gfp_mask);
  123. if (ret < 0)
  124. goto unmap_rq;
  125. if (!bio)
  126. bio = rq->bio;
  127. bytes_read += ret;
  128. ubuf += ret;
  129. if (map_data)
  130. map_data->offset += ret;
  131. }
  132. if (!bio_flagged(bio, BIO_USER_MAPPED))
  133. rq->cmd_flags |= REQ_COPY_USER;
  134. rq->buffer = rq->data = NULL;
  135. return 0;
  136. unmap_rq:
  137. blk_rq_unmap_user(bio);
  138. rq->bio = NULL;
  139. return ret;
  140. }
  141. EXPORT_SYMBOL(blk_rq_map_user);
  142. /**
  143. * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  144. * @q: request queue where request should be inserted
  145. * @rq: request to map data to
  146. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  147. * @iov: pointer to the iovec
  148. * @iov_count: number of elements in the iovec
  149. * @len: I/O byte count
  150. * @gfp_mask: memory allocation flags
  151. *
  152. * Description:
  153. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  154. * a kernel bounce buffer is used.
  155. *
  156. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  157. * still in process context.
  158. *
  159. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  160. * before being submitted to the device, as pages mapped may be out of
  161. * reach. It's the callers responsibility to make sure this happens. The
  162. * original bio must be passed back in to blk_rq_unmap_user() for proper
  163. * unmapping.
  164. */
  165. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  166. struct rq_map_data *map_data, struct sg_iovec *iov,
  167. int iov_count, unsigned int len, gfp_t gfp_mask)
  168. {
  169. struct bio *bio;
  170. int i, read = rq_data_dir(rq) == READ;
  171. int unaligned = 0;
  172. if (!iov || iov_count <= 0)
  173. return -EINVAL;
  174. for (i = 0; i < iov_count; i++) {
  175. unsigned long uaddr = (unsigned long)iov[i].iov_base;
  176. if (uaddr & queue_dma_alignment(q)) {
  177. unaligned = 1;
  178. break;
  179. }
  180. }
  181. if (unaligned || (q->dma_pad_mask & len) || map_data)
  182. bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
  183. gfp_mask);
  184. else
  185. bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
  186. if (IS_ERR(bio))
  187. return PTR_ERR(bio);
  188. if (bio->bi_size != len) {
  189. /*
  190. * Grab an extra reference to this bio, as bio_unmap_user()
  191. * expects to be able to drop it twice as it happens on the
  192. * normal IO completion path
  193. */
  194. bio_get(bio);
  195. bio_endio(bio, 0);
  196. __blk_rq_unmap_user(bio);
  197. return -EINVAL;
  198. }
  199. if (!bio_flagged(bio, BIO_USER_MAPPED))
  200. rq->cmd_flags |= REQ_COPY_USER;
  201. blk_queue_bounce(q, &bio);
  202. bio_get(bio);
  203. blk_rq_bio_prep(q, rq, bio);
  204. rq->buffer = rq->data = NULL;
  205. return 0;
  206. }
  207. EXPORT_SYMBOL(blk_rq_map_user_iov);
  208. /**
  209. * blk_rq_unmap_user - unmap a request with user data
  210. * @bio: start of bio list
  211. *
  212. * Description:
  213. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  214. * supply the original rq->bio from the blk_rq_map_user() return, since
  215. * the I/O completion may have changed rq->bio.
  216. */
  217. int blk_rq_unmap_user(struct bio *bio)
  218. {
  219. struct bio *mapped_bio;
  220. int ret = 0, ret2;
  221. while (bio) {
  222. mapped_bio = bio;
  223. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  224. mapped_bio = bio->bi_private;
  225. ret2 = __blk_rq_unmap_user(mapped_bio);
  226. if (ret2 && !ret)
  227. ret = ret2;
  228. mapped_bio = bio;
  229. bio = bio->bi_next;
  230. bio_put(mapped_bio);
  231. }
  232. return ret;
  233. }
  234. EXPORT_SYMBOL(blk_rq_unmap_user);
  235. /**
  236. * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  237. * @q: request queue where request should be inserted
  238. * @rq: request to fill
  239. * @kbuf: the kernel buffer
  240. * @len: length of user data
  241. * @gfp_mask: memory allocation flags
  242. *
  243. * Description:
  244. * Data will be mapped directly if possible. Otherwise a bounce
  245. * buffer is used.
  246. */
  247. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  248. unsigned int len, gfp_t gfp_mask)
  249. {
  250. int reading = rq_data_dir(rq) == READ;
  251. int do_copy = 0;
  252. struct bio *bio;
  253. if (len > (q->max_hw_sectors << 9))
  254. return -EINVAL;
  255. if (!len || !kbuf)
  256. return -EINVAL;
  257. do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
  258. if (do_copy)
  259. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  260. else
  261. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  262. if (IS_ERR(bio))
  263. return PTR_ERR(bio);
  264. if (rq_data_dir(rq) == WRITE)
  265. bio->bi_rw |= (1 << BIO_RW);
  266. if (do_copy)
  267. rq->cmd_flags |= REQ_COPY_USER;
  268. blk_rq_bio_prep(q, rq, bio);
  269. blk_queue_bounce(q, &rq->bio);
  270. rq->buffer = rq->data = NULL;
  271. return 0;
  272. }
  273. EXPORT_SYMBOL(blk_rq_map_kern);