ring_buffer.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. /*
  2. *
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Authors:
  19. * Haiyang Zhang <haiyangz@microsoft.com>
  20. * Hank Janssen <hjanssen@microsoft.com>
  21. * K. Y. Srinivasan <kys@microsoft.com>
  22. *
  23. */
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #include <linux/kernel.h>
  26. #include <linux/mm.h>
  27. #include <linux/hyperv.h>
  28. #include "hyperv_vmbus.h"
  29. /*
  30. * hv_get_next_write_location()
  31. *
  32. * Get the next write location for the specified ring buffer
  33. *
  34. */
  35. static inline u32
  36. hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
  37. {
  38. u32 next = ring_info->ring_buffer->write_index;
  39. return next;
  40. }
  41. /*
  42. * hv_set_next_write_location()
  43. *
  44. * Set the next write location for the specified ring buffer
  45. *
  46. */
  47. static inline void
  48. hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
  49. u32 next_write_location)
  50. {
  51. ring_info->ring_buffer->write_index = next_write_location;
  52. }
  53. /*
  54. * hv_get_next_read_location()
  55. *
  56. * Get the next read location for the specified ring buffer
  57. */
  58. static inline u32
  59. hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  60. {
  61. u32 next = ring_info->ring_buffer->read_index;
  62. return next;
  63. }
  64. /*
  65. * hv_get_next_readlocation_withoffset()
  66. *
  67. * Get the next read location + offset for the specified ring buffer.
  68. * This allows the caller to skip
  69. */
  70. static inline u32
  71. hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
  72. u32 offset)
  73. {
  74. u32 next = ring_info->ring_buffer->read_index;
  75. next += offset;
  76. next %= ring_info->ring_datasize;
  77. return next;
  78. }
  79. /*
  80. *
  81. * hv_set_next_read_location()
  82. *
  83. * Set the next read location for the specified ring buffer
  84. *
  85. */
  86. static inline void
  87. hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
  88. u32 next_read_location)
  89. {
  90. ring_info->ring_buffer->read_index = next_read_location;
  91. }
  92. /*
  93. *
  94. * hv_get_ring_buffer()
  95. *
  96. * Get the start of the ring buffer
  97. */
  98. static inline void *
  99. hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
  100. {
  101. return (void *)ring_info->ring_buffer->buffer;
  102. }
  103. /*
  104. *
  105. * hv_get_ring_buffersize()
  106. *
  107. * Get the size of the ring buffer
  108. */
  109. static inline u32
  110. hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
  111. {
  112. return ring_info->ring_datasize;
  113. }
  114. /*
  115. *
  116. * hv_get_ring_bufferindices()
  117. *
  118. * Get the read and write indices as u64 of the specified ring buffer
  119. *
  120. */
  121. static inline u64
  122. hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  123. {
  124. return (u64)ring_info->ring_buffer->write_index << 32;
  125. }
  126. /*
  127. *
  128. * hv_copyfrom_ringbuffer()
  129. *
  130. * Helper routine to copy to source from ring buffer.
  131. * Assume there is enough room. Handles wrap-around in src case only!!
  132. *
  133. */
  134. static u32 hv_copyfrom_ringbuffer(
  135. struct hv_ring_buffer_info *ring_info,
  136. void *dest,
  137. u32 destlen,
  138. u32 start_read_offset)
  139. {
  140. void *ring_buffer = hv_get_ring_buffer(ring_info);
  141. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  142. u32 frag_len;
  143. /* wrap-around detected at the src */
  144. if (destlen > ring_buffer_size - start_read_offset) {
  145. frag_len = ring_buffer_size - start_read_offset;
  146. memcpy(dest, ring_buffer + start_read_offset, frag_len);
  147. memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
  148. } else
  149. memcpy(dest, ring_buffer + start_read_offset, destlen);
  150. start_read_offset += destlen;
  151. start_read_offset %= ring_buffer_size;
  152. return start_read_offset;
  153. }
  154. /*
  155. *
  156. * hv_copyto_ringbuffer()
  157. *
  158. * Helper routine to copy from source to ring buffer.
  159. * Assume there is enough room. Handles wrap-around in dest case only!!
  160. *
  161. */
  162. static u32 hv_copyto_ringbuffer(
  163. struct hv_ring_buffer_info *ring_info,
  164. u32 start_write_offset,
  165. void *src,
  166. u32 srclen)
  167. {
  168. void *ring_buffer = hv_get_ring_buffer(ring_info);
  169. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  170. u32 frag_len;
  171. /* wrap-around detected! */
  172. if (srclen > ring_buffer_size - start_write_offset) {
  173. frag_len = ring_buffer_size - start_write_offset;
  174. memcpy(ring_buffer + start_write_offset, src, frag_len);
  175. memcpy(ring_buffer, src + frag_len, srclen - frag_len);
  176. } else
  177. memcpy(ring_buffer + start_write_offset, src, srclen);
  178. start_write_offset += srclen;
  179. start_write_offset %= ring_buffer_size;
  180. return start_write_offset;
  181. }
  182. /*
  183. *
  184. * hv_ringbuffer_get_debuginfo()
  185. *
  186. * Get various debug metrics for the specified ring buffer
  187. *
  188. */
  189. void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
  190. struct hv_ring_buffer_debug_info *debug_info)
  191. {
  192. u32 bytes_avail_towrite;
  193. u32 bytes_avail_toread;
  194. if (ring_info->ring_buffer) {
  195. hv_get_ringbuffer_availbytes(ring_info,
  196. &bytes_avail_toread,
  197. &bytes_avail_towrite);
  198. debug_info->bytes_avail_toread = bytes_avail_toread;
  199. debug_info->bytes_avail_towrite = bytes_avail_towrite;
  200. debug_info->current_read_index =
  201. ring_info->ring_buffer->read_index;
  202. debug_info->current_write_index =
  203. ring_info->ring_buffer->write_index;
  204. debug_info->current_interrupt_mask =
  205. ring_info->ring_buffer->interrupt_mask;
  206. }
  207. }
  208. /*
  209. *
  210. * hv_get_ringbuffer_interrupt_mask()
  211. *
  212. * Get the interrupt mask for the specified ring buffer
  213. *
  214. */
  215. u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
  216. {
  217. return rbi->ring_buffer->interrupt_mask;
  218. }
  219. /*
  220. *
  221. * hv_ringbuffer_init()
  222. *
  223. *Initialize the ring buffer
  224. *
  225. */
  226. int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
  227. void *buffer, u32 buflen)
  228. {
  229. if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
  230. return -EINVAL;
  231. memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
  232. ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
  233. ring_info->ring_buffer->read_index =
  234. ring_info->ring_buffer->write_index = 0;
  235. ring_info->ring_size = buflen;
  236. ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
  237. spin_lock_init(&ring_info->ring_lock);
  238. return 0;
  239. }
  240. /*
  241. *
  242. * hv_ringbuffer_cleanup()
  243. *
  244. * Cleanup the ring buffer
  245. *
  246. */
  247. void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  248. {
  249. }
  250. /*
  251. *
  252. * hv_ringbuffer_write()
  253. *
  254. * Write to the ring buffer
  255. *
  256. */
  257. int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
  258. struct scatterlist *sglist, u32 sgcount)
  259. {
  260. int i = 0;
  261. u32 bytes_avail_towrite;
  262. u32 bytes_avail_toread;
  263. u32 totalbytes_towrite = 0;
  264. struct scatterlist *sg;
  265. u32 next_write_location;
  266. u64 prev_indices = 0;
  267. unsigned long flags;
  268. for_each_sg(sglist, sg, sgcount, i)
  269. {
  270. totalbytes_towrite += sg->length;
  271. }
  272. totalbytes_towrite += sizeof(u64);
  273. spin_lock_irqsave(&outring_info->ring_lock, flags);
  274. hv_get_ringbuffer_availbytes(outring_info,
  275. &bytes_avail_toread,
  276. &bytes_avail_towrite);
  277. /* If there is only room for the packet, assume it is full. */
  278. /* Otherwise, the next time around, we think the ring buffer */
  279. /* is empty since the read index == write index */
  280. if (bytes_avail_towrite <= totalbytes_towrite) {
  281. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  282. return -EAGAIN;
  283. }
  284. /* Write to the ring buffer */
  285. next_write_location = hv_get_next_write_location(outring_info);
  286. for_each_sg(sglist, sg, sgcount, i)
  287. {
  288. next_write_location = hv_copyto_ringbuffer(outring_info,
  289. next_write_location,
  290. sg_virt(sg),
  291. sg->length);
  292. }
  293. /* Set previous packet start */
  294. prev_indices = hv_get_ring_bufferindices(outring_info);
  295. next_write_location = hv_copyto_ringbuffer(outring_info,
  296. next_write_location,
  297. &prev_indices,
  298. sizeof(u64));
  299. /* Make sure we flush all writes before updating the writeIndex */
  300. smp_wmb();
  301. /* Now, update the write location */
  302. hv_set_next_write_location(outring_info, next_write_location);
  303. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  304. return 0;
  305. }
  306. /*
  307. *
  308. * hv_ringbuffer_peek()
  309. *
  310. * Read without advancing the read index
  311. *
  312. */
  313. int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
  314. void *Buffer, u32 buflen)
  315. {
  316. u32 bytes_avail_towrite;
  317. u32 bytes_avail_toread;
  318. u32 next_read_location = 0;
  319. unsigned long flags;
  320. spin_lock_irqsave(&Inring_info->ring_lock, flags);
  321. hv_get_ringbuffer_availbytes(Inring_info,
  322. &bytes_avail_toread,
  323. &bytes_avail_towrite);
  324. /* Make sure there is something to read */
  325. if (bytes_avail_toread < buflen) {
  326. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  327. return -EAGAIN;
  328. }
  329. /* Convert to byte offset */
  330. next_read_location = hv_get_next_read_location(Inring_info);
  331. next_read_location = hv_copyfrom_ringbuffer(Inring_info,
  332. Buffer,
  333. buflen,
  334. next_read_location);
  335. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  336. return 0;
  337. }
  338. /*
  339. *
  340. * hv_ringbuffer_read()
  341. *
  342. * Read and advance the read index
  343. *
  344. */
  345. int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
  346. u32 buflen, u32 offset)
  347. {
  348. u32 bytes_avail_towrite;
  349. u32 bytes_avail_toread;
  350. u32 next_read_location = 0;
  351. u64 prev_indices = 0;
  352. unsigned long flags;
  353. if (buflen <= 0)
  354. return -EINVAL;
  355. spin_lock_irqsave(&inring_info->ring_lock, flags);
  356. hv_get_ringbuffer_availbytes(inring_info,
  357. &bytes_avail_toread,
  358. &bytes_avail_towrite);
  359. /* Make sure there is something to read */
  360. if (bytes_avail_toread < buflen) {
  361. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  362. return -EAGAIN;
  363. }
  364. next_read_location =
  365. hv_get_next_readlocation_withoffset(inring_info, offset);
  366. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  367. buffer,
  368. buflen,
  369. next_read_location);
  370. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  371. &prev_indices,
  372. sizeof(u64),
  373. next_read_location);
  374. /* Make sure all reads are done before we update the read index since */
  375. /* the writer may start writing to the read area once the read index */
  376. /*is updated */
  377. smp_mb();
  378. /* Update the read index */
  379. hv_set_next_read_location(inring_info, next_read_location);
  380. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  381. return 0;
  382. }