coalesced_mmio.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * KVM coalesced MMIO
  3. *
  4. * Copyright (c) 2008 Bull S.A.S.
  5. *
  6. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  7. *
  8. */
  9. #include "iodev.h"
  10. #include <linux/kvm_host.h>
  11. #include <linux/kvm.h>
  12. #include "coalesced_mmio.h"
  13. static int coalesced_mmio_in_range(struct kvm_io_device *this,
  14. gpa_t addr, int len, int is_write)
  15. {
  16. struct kvm_coalesced_mmio_dev *dev =
  17. (struct kvm_coalesced_mmio_dev*)this->private;
  18. struct kvm_coalesced_mmio_zone *zone;
  19. int next;
  20. int i;
  21. if (!is_write)
  22. return 0;
  23. /* kvm->lock is taken by the caller and must be not released before
  24. * dev.read/write
  25. */
  26. /* Are we able to batch it ? */
  27. /* last is the first free entry
  28. * check if we don't meet the first used entry
  29. * there is always one unused entry in the buffer
  30. */
  31. next = (dev->kvm->coalesced_mmio_ring->last + 1) %
  32. KVM_COALESCED_MMIO_MAX;
  33. if (next == dev->kvm->coalesced_mmio_ring->first) {
  34. /* full */
  35. return 0;
  36. }
  37. /* is it in a batchable area ? */
  38. for (i = 0; i < dev->nb_zones; i++) {
  39. zone = &dev->zone[i];
  40. /* (addr,len) is fully included in
  41. * (zone->addr, zone->size)
  42. */
  43. if (zone->addr <= addr &&
  44. addr + len <= zone->addr + zone->size)
  45. return 1;
  46. }
  47. return 0;
  48. }
  49. static void coalesced_mmio_write(struct kvm_io_device *this,
  50. gpa_t addr, int len, const void *val)
  51. {
  52. struct kvm_coalesced_mmio_dev *dev =
  53. (struct kvm_coalesced_mmio_dev*)this->private;
  54. struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
  55. /* kvm->lock must be taken by caller before call to in_range()*/
  56. /* copy data in first free entry of the ring */
  57. ring->coalesced_mmio[ring->last].phys_addr = addr;
  58. ring->coalesced_mmio[ring->last].len = len;
  59. memcpy(ring->coalesced_mmio[ring->last].data, val, len);
  60. smp_wmb();
  61. ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
  62. }
  63. static void coalesced_mmio_destructor(struct kvm_io_device *this)
  64. {
  65. kfree(this);
  66. }
  67. int kvm_coalesced_mmio_init(struct kvm *kvm)
  68. {
  69. struct kvm_coalesced_mmio_dev *dev;
  70. dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
  71. if (!dev)
  72. return -ENOMEM;
  73. dev->dev.write = coalesced_mmio_write;
  74. dev->dev.in_range = coalesced_mmio_in_range;
  75. dev->dev.destructor = coalesced_mmio_destructor;
  76. dev->dev.private = dev;
  77. dev->kvm = kvm;
  78. kvm->coalesced_mmio_dev = dev;
  79. kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);
  80. return 0;
  81. }
  82. int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
  83. struct kvm_coalesced_mmio_zone *zone)
  84. {
  85. struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
  86. if (dev == NULL)
  87. return -EINVAL;
  88. mutex_lock(&kvm->lock);
  89. if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
  90. mutex_unlock(&kvm->lock);
  91. return -ENOBUFS;
  92. }
  93. dev->zone[dev->nb_zones] = *zone;
  94. dev->nb_zones++;
  95. mutex_unlock(&kvm->lock);
  96. return 0;
  97. }
  98. int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
  99. struct kvm_coalesced_mmio_zone *zone)
  100. {
  101. int i;
  102. struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
  103. struct kvm_coalesced_mmio_zone *z;
  104. if (dev == NULL)
  105. return -EINVAL;
  106. mutex_lock(&kvm->lock);
  107. i = dev->nb_zones;
  108. while(i) {
  109. z = &dev->zone[i - 1];
  110. /* unregister all zones
  111. * included in (zone->addr, zone->size)
  112. */
  113. if (zone->addr <= z->addr &&
  114. z->addr + z->size <= zone->addr + zone->size) {
  115. dev->nb_zones--;
  116. *z = dev->zone[dev->nb_zones];
  117. }
  118. i--;
  119. }
  120. mutex_unlock(&kvm->lock);
  121. return 0;
  122. }