blk-map.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * Functions related to mapping data to requests
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include "blk.h"
  9. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  10. struct bio *bio)
  11. {
  12. if (!rq->bio)
  13. blk_rq_bio_prep(q, rq, bio);
  14. else if (!ll_back_merge_fn(q, rq, bio))
  15. return -EINVAL;
  16. else {
  17. rq->biotail->bi_next = bio;
  18. rq->biotail = bio;
  19. rq->data_len += bio->bi_size;
  20. }
  21. return 0;
  22. }
  23. EXPORT_SYMBOL(blk_rq_append_bio);
  24. static int __blk_rq_unmap_user(struct bio *bio)
  25. {
  26. int ret = 0;
  27. if (bio) {
  28. if (bio_flagged(bio, BIO_USER_MAPPED))
  29. bio_unmap_user(bio);
  30. else
  31. ret = bio_uncopy_user(bio);
  32. }
  33. return ret;
  34. }
  35. static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  36. void __user *ubuf, unsigned int len)
  37. {
  38. unsigned long uaddr;
  39. struct bio *bio, *orig_bio;
  40. int reading, ret;
  41. reading = rq_data_dir(rq) == READ;
  42. /*
  43. * if alignment requirement is satisfied, map in user pages for
  44. * direct dma. else, set up kernel bounce buffers
  45. */
  46. uaddr = (unsigned long) ubuf;
  47. if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
  48. bio = bio_map_user(q, NULL, uaddr, len, reading);
  49. else
  50. bio = bio_copy_user(q, uaddr, len, reading);
  51. if (IS_ERR(bio))
  52. return PTR_ERR(bio);
  53. orig_bio = bio;
  54. blk_queue_bounce(q, &bio);
  55. /*
  56. * We link the bounce buffer in and could have to traverse it
  57. * later so we have to get a ref to prevent it from being freed
  58. */
  59. bio_get(bio);
  60. ret = blk_rq_append_bio(q, rq, bio);
  61. if (!ret)
  62. return bio->bi_size;
  63. /* if it was boucned we must call the end io function */
  64. bio_endio(bio, 0);
  65. __blk_rq_unmap_user(orig_bio);
  66. bio_put(bio);
  67. return ret;
  68. }
  69. /**
  70. * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  71. * @q: request queue where request should be inserted
  72. * @rq: request structure to fill
  73. * @ubuf: the user buffer
  74. * @len: length of user data
  75. *
  76. * Description:
  77. * Data will be mapped directly for zero copy io, if possible. Otherwise
  78. * a kernel bounce buffer is used.
  79. *
  80. * A matching blk_rq_unmap_user() must be issued at the end of io, while
  81. * still in process context.
  82. *
  83. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  84. * before being submitted to the device, as pages mapped may be out of
  85. * reach. It's the callers responsibility to make sure this happens. The
  86. * original bio must be passed back in to blk_rq_unmap_user() for proper
  87. * unmapping.
  88. */
  89. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  90. void __user *ubuf, unsigned long len)
  91. {
  92. unsigned long bytes_read = 0;
  93. struct bio *bio = NULL;
  94. int ret;
  95. if (len > (q->max_hw_sectors << 9))
  96. return -EINVAL;
  97. if (!len || !ubuf)
  98. return -EINVAL;
  99. while (bytes_read != len) {
  100. unsigned long map_len, end, start;
  101. map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
  102. end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
  103. >> PAGE_SHIFT;
  104. start = (unsigned long)ubuf >> PAGE_SHIFT;
  105. /*
  106. * A bad offset could cause us to require BIO_MAX_PAGES + 1
  107. * pages. If this happens we just lower the requested
  108. * mapping len by a page so that we can fit
  109. */
  110. if (end - start > BIO_MAX_PAGES)
  111. map_len -= PAGE_SIZE;
  112. ret = __blk_rq_map_user(q, rq, ubuf, map_len);
  113. if (ret < 0)
  114. goto unmap_rq;
  115. if (!bio)
  116. bio = rq->bio;
  117. bytes_read += ret;
  118. ubuf += ret;
  119. }
  120. rq->buffer = rq->data = NULL;
  121. return 0;
  122. unmap_rq:
  123. blk_rq_unmap_user(bio);
  124. return ret;
  125. }
  126. EXPORT_SYMBOL(blk_rq_map_user);
  127. /**
  128. * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
  129. * @q: request queue where request should be inserted
  130. * @rq: request to map data to
  131. * @iov: pointer to the iovec
  132. * @iov_count: number of elements in the iovec
  133. * @len: I/O byte count
  134. *
  135. * Description:
  136. * Data will be mapped directly for zero copy io, if possible. Otherwise
  137. * a kernel bounce buffer is used.
  138. *
  139. * A matching blk_rq_unmap_user() must be issued at the end of io, while
  140. * still in process context.
  141. *
  142. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  143. * before being submitted to the device, as pages mapped may be out of
  144. * reach. It's the callers responsibility to make sure this happens. The
  145. * original bio must be passed back in to blk_rq_unmap_user() for proper
  146. * unmapping.
  147. */
  148. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  149. struct sg_iovec *iov, int iov_count, unsigned int len)
  150. {
  151. struct bio *bio;
  152. if (!iov || iov_count <= 0)
  153. return -EINVAL;
  154. /* we don't allow misaligned data like bio_map_user() does. If the
  155. * user is using sg, they're expected to know the alignment constraints
  156. * and respect them accordingly */
  157. bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
  158. if (IS_ERR(bio))
  159. return PTR_ERR(bio);
  160. if (bio->bi_size != len) {
  161. bio_endio(bio, 0);
  162. bio_unmap_user(bio);
  163. return -EINVAL;
  164. }
  165. bio_get(bio);
  166. blk_rq_bio_prep(q, rq, bio);
  167. rq->buffer = rq->data = NULL;
  168. return 0;
  169. }
  170. EXPORT_SYMBOL(blk_rq_map_user_iov);
  171. /**
  172. * blk_rq_unmap_user - unmap a request with user data
  173. * @bio: start of bio list
  174. *
  175. * Description:
  176. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  177. * supply the original rq->bio from the blk_rq_map_user() return, since
  178. * the io completion may have changed rq->bio.
  179. */
  180. int blk_rq_unmap_user(struct bio *bio)
  181. {
  182. struct bio *mapped_bio;
  183. int ret = 0, ret2;
  184. while (bio) {
  185. mapped_bio = bio;
  186. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  187. mapped_bio = bio->bi_private;
  188. ret2 = __blk_rq_unmap_user(mapped_bio);
  189. if (ret2 && !ret)
  190. ret = ret2;
  191. mapped_bio = bio;
  192. bio = bio->bi_next;
  193. bio_put(mapped_bio);
  194. }
  195. return ret;
  196. }
  197. EXPORT_SYMBOL(blk_rq_unmap_user);
  198. /**
  199. * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
  200. * @q: request queue where request should be inserted
  201. * @rq: request to fill
  202. * @kbuf: the kernel buffer
  203. * @len: length of user data
  204. * @gfp_mask: memory allocation flags
  205. */
  206. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  207. unsigned int len, gfp_t gfp_mask)
  208. {
  209. struct bio *bio;
  210. if (len > (q->max_hw_sectors << 9))
  211. return -EINVAL;
  212. if (!len || !kbuf)
  213. return -EINVAL;
  214. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  215. if (IS_ERR(bio))
  216. return PTR_ERR(bio);
  217. if (rq_data_dir(rq) == WRITE)
  218. bio->bi_rw |= (1 << BIO_RW);
  219. blk_rq_bio_prep(q, rq, bio);
  220. blk_queue_bounce(q, &rq->bio);
  221. rq->buffer = rq->data = NULL;
  222. return 0;
  223. }
  224. EXPORT_SYMBOL(blk_rq_map_kern);