vringh.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Linux host-side vring helpers; for when the kernel needs to access
  3. * someone else's vring.
  4. *
  5. * Copyright IBM Corporation, 2013.
  6. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. * Written by: Rusty Russell <rusty@rustcorp.com.au>
  23. */
  24. #ifndef _LINUX_VRINGH_H
  25. #define _LINUX_VRINGH_H
  26. #include <uapi/linux/virtio_ring.h>
  27. #include <linux/uio.h>
  28. #include <linux/slab.h>
  29. #include <asm/barrier.h>
  30. /* virtio_ring with information needed for host access. */
  31. struct vringh {
  32. /* Guest publishes used event idx (note: we always do). */
  33. bool event_indices;
  34. /* Can we get away with weak barriers? */
  35. bool weak_barriers;
  36. /* Last available index we saw (ie. where we're up to). */
  37. u16 last_avail_idx;
  38. /* Last index we used. */
  39. u16 last_used_idx;
  40. /* How many descriptors we've completed since last need_notify(). */
  41. u32 completed;
  42. /* The vring (note: it may contain user pointers!) */
  43. struct vring vring;
  44. };
  45. /* The memory the vring can access, and what offset to apply. */
  46. struct vringh_range {
  47. u64 start, end_incl;
  48. u64 offset;
  49. };
  50. /**
  51. * struct vringh_iov - iovec mangler.
  52. *
  53. * Mangles iovec in place, and restores it.
  54. * Remaining data is iov + i, of used - i elements.
  55. */
  56. struct vringh_iov {
  57. struct iovec *iov;
  58. size_t consumed; /* Within iov[i] */
  59. unsigned i, used, max_num;
  60. };
  61. /**
  62. * struct vringh_iov - kvec mangler.
  63. *
  64. * Mangles kvec in place, and restores it.
  65. * Remaining data is iov + i, of used - i elements.
  66. */
  67. struct vringh_kiov {
  68. struct kvec *iov;
  69. size_t consumed; /* Within iov[i] */
  70. unsigned i, used, max_num;
  71. };
  72. /* Flag on max_num to indicate we're kmalloced. */
  73. #define VRINGH_IOV_ALLOCATED 0x8000000
  74. /* Helpers for userspace vrings. */
  75. int vringh_init_user(struct vringh *vrh, u32 features,
  76. unsigned int num, bool weak_barriers,
  77. struct vring_desc __user *desc,
  78. struct vring_avail __user *avail,
  79. struct vring_used __user *used);
  80. static inline void vringh_iov_init(struct vringh_iov *iov,
  81. struct iovec *iovec, unsigned num)
  82. {
  83. iov->used = iov->i = 0;
  84. iov->consumed = 0;
  85. iov->max_num = num;
  86. iov->iov = iovec;
  87. }
  88. static inline void vringh_iov_reset(struct vringh_iov *iov)
  89. {
  90. iov->iov[iov->i].iov_len += iov->consumed;
  91. iov->iov[iov->i].iov_base -= iov->consumed;
  92. iov->consumed = 0;
  93. iov->i = 0;
  94. }
  95. static inline void vringh_iov_cleanup(struct vringh_iov *iov)
  96. {
  97. if (iov->max_num & VRINGH_IOV_ALLOCATED)
  98. kfree(iov->iov);
  99. iov->max_num = iov->used = iov->i = iov->consumed = 0;
  100. iov->iov = NULL;
  101. }
  102. /* Convert a descriptor into iovecs. */
  103. int vringh_getdesc_user(struct vringh *vrh,
  104. struct vringh_iov *riov,
  105. struct vringh_iov *wiov,
  106. bool (*getrange)(struct vringh *vrh,
  107. u64 addr, struct vringh_range *r),
  108. u16 *head);
  109. /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
  110. ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
  111. /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
  112. ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
  113. const void *src, size_t len);
  114. /* Mark a descriptor as used. */
  115. int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
  116. int vringh_complete_multi_user(struct vringh *vrh,
  117. const struct vring_used_elem used[],
  118. unsigned num_used);
  119. /* Pretend we've never seen descriptor (for easy error handling). */
  120. void vringh_abandon_user(struct vringh *vrh, unsigned int num);
  121. /* Do we need to fire the eventfd to notify the other side? */
  122. int vringh_need_notify_user(struct vringh *vrh);
  123. bool vringh_notify_enable_user(struct vringh *vrh);
  124. void vringh_notify_disable_user(struct vringh *vrh);
  125. /* Helpers for kernelspace vrings. */
  126. int vringh_init_kern(struct vringh *vrh, u32 features,
  127. unsigned int num, bool weak_barriers,
  128. struct vring_desc *desc,
  129. struct vring_avail *avail,
  130. struct vring_used *used);
  131. static inline void vringh_kiov_init(struct vringh_kiov *kiov,
  132. struct kvec *kvec, unsigned num)
  133. {
  134. kiov->used = kiov->i = 0;
  135. kiov->consumed = 0;
  136. kiov->max_num = num;
  137. kiov->iov = kvec;
  138. }
  139. static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
  140. {
  141. kiov->iov[kiov->i].iov_len += kiov->consumed;
  142. kiov->iov[kiov->i].iov_base -= kiov->consumed;
  143. kiov->consumed = 0;
  144. kiov->i = 0;
  145. }
  146. static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
  147. {
  148. if (kiov->max_num & VRINGH_IOV_ALLOCATED)
  149. kfree(kiov->iov);
  150. kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
  151. kiov->iov = NULL;
  152. }
  153. int vringh_getdesc_kern(struct vringh *vrh,
  154. struct vringh_kiov *riov,
  155. struct vringh_kiov *wiov,
  156. u16 *head,
  157. gfp_t gfp);
  158. ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
  159. ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
  160. const void *src, size_t len);
  161. void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
  162. int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
  163. bool vringh_notify_enable_kern(struct vringh *vrh);
  164. void vringh_notify_disable_kern(struct vringh *vrh);
  165. int vringh_need_notify_kern(struct vringh *vrh);
  166. #endif /* _LINUX_VRINGH_H */