|
@@ -24,23 +24,13 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
|
|
|
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
|
|
|
gpa_t addr, int len)
|
|
|
{
|
|
|
- struct kvm_coalesced_mmio_zone *zone;
|
|
|
- int i;
|
|
|
-
|
|
|
- /* is it in a batchable area ? */
|
|
|
-
|
|
|
- for (i = 0; i < dev->nb_zones; i++) {
|
|
|
- zone = &dev->zone[i];
|
|
|
-
|
|
|
- /* (addr,len) is fully included in
|
|
|
- * (zone->addr, zone->size)
|
|
|
- */
|
|
|
+ /* is it in a batchable area ?
|
|
|
+ * (addr,len) is fully included in
|
|
|
+ * (zone->addr, zone->size)
|
|
|
+ */
|
|
|
|
|
|
- if (zone->addr <= addr &&
|
|
|
- addr + len <= zone->addr + zone->size)
|
|
|
- return 1;
|
|
|
- }
|
|
|
- return 0;
|
|
|
+ return (dev->zone.addr <= addr &&
|
|
|
+ addr + len <= dev->zone.addr + dev->zone.size);
|
|
|
}
|
|
|
|
|
|
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
|
|
@@ -73,10 +63,10 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
|
|
|
if (!coalesced_mmio_in_range(dev, addr, len))
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
- spin_lock(&dev->lock);
|
|
|
+ spin_lock(&dev->kvm->ring_lock);
|
|
|
|
|
|
if (!coalesced_mmio_has_room(dev)) {
|
|
|
- spin_unlock(&dev->lock);
|
|
|
+ spin_unlock(&dev->kvm->ring_lock);
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
@@ -87,7 +77,7 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
|
|
|
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
|
|
|
smp_wmb();
|
|
|
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
|
|
|
- spin_unlock(&dev->lock);
|
|
|
+ spin_unlock(&dev->kvm->ring_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -95,6 +85,8 @@ static void coalesced_mmio_destructor(struct kvm_io_device *this)
|
|
|
{
|
|
|
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
|
|
|
|
|
+ list_del(&dev->list);
|
|
|
+
|
|
|
kfree(dev);
|
|
|
}
|
|
|
|
|
@@ -105,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
|
|
|
|
|
|
int kvm_coalesced_mmio_init(struct kvm *kvm)
|
|
|
{
|
|
|
- struct kvm_coalesced_mmio_dev *dev;
|
|
|
struct page *page;
|
|
|
int ret;
|
|
|
|
|
@@ -113,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
|
|
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
|
if (!page)
|
|
|
goto out_err;
|
|
|
- kvm->coalesced_mmio_ring = page_address(page);
|
|
|
|
|
|
- ret = -ENOMEM;
|
|
|
- dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
|
|
|
- if (!dev)
|
|
|
- goto out_free_page;
|
|
|
- spin_lock_init(&dev->lock);
|
|
|
- kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
|
|
|
- dev->kvm = kvm;
|
|
|
- kvm->coalesced_mmio_dev = dev;
|
|
|
-
|
|
|
- mutex_lock(&kvm->slots_lock);
|
|
|
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
|
|
- mutex_unlock(&kvm->slots_lock);
|
|
|
- if (ret < 0)
|
|
|
- goto out_free_dev;
|
|
|
+ ret = 0;
|
|
|
+ kvm->coalesced_mmio_ring = page_address(page);
|
|
|
|
|
|
- return ret;
|
|
|
+ /*
|
|
|
+ * We're using this spinlock to sync access to the coalesced ring.
|
|
|
+ * The list doesn't need it's own lock since device registration and
|
|
|
+ * unregistration should only happen when kvm->slots_lock is held.
|
|
|
+ */
|
|
|
+ spin_lock_init(&kvm->ring_lock);
|
|
|
+ INIT_LIST_HEAD(&kvm->coalesced_zones);
|
|
|
|
|
|
-out_free_dev:
|
|
|
- kvm->coalesced_mmio_dev = NULL;
|
|
|
- kfree(dev);
|
|
|
-out_free_page:
|
|
|
- kvm->coalesced_mmio_ring = NULL;
|
|
|
- __free_page(page);
|
|
|
out_err:
|
|
|
return ret;
|
|
|
}
|
|
@@ -151,51 +129,49 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
|
|
|
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
|
{
|
|
|
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
|
+ int ret;
|
|
|
+ struct kvm_coalesced_mmio_dev *dev;
|
|
|
|
|
|
- if (dev == NULL)
|
|
|
- return -ENXIO;
|
|
|
+ dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
|
|
|
+ if (!dev)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
|
|
|
+ dev->kvm = kvm;
|
|
|
+ dev->zone = *zone;
|
|
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
- if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
|
|
- mutex_unlock(&kvm->slots_lock);
|
|
|
- return -ENOBUFS;
|
|
|
- }
|
|
|
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out_free_dev;
|
|
|
+ list_add_tail(&dev->list, &kvm->coalesced_zones);
|
|
|
+ mutex_unlock(&kvm->slots_lock);
|
|
|
|
|
|
- dev->zone[dev->nb_zones] = *zone;
|
|
|
- dev->nb_zones++;
|
|
|
+ return ret;
|
|
|
|
|
|
+out_free_dev:
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
+
|
|
|
+ kfree(dev);
|
|
|
+
|
|
|
+ if (dev == NULL)
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
|
{
|
|
|
- int i;
|
|
|
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
|
- struct kvm_coalesced_mmio_zone *z;
|
|
|
-
|
|
|
- if (dev == NULL)
|
|
|
- return -ENXIO;
|
|
|
+ struct kvm_coalesced_mmio_dev *dev, *tmp;
|
|
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
|
|
|
- i = dev->nb_zones;
|
|
|
- while (i) {
|
|
|
- z = &dev->zone[i - 1];
|
|
|
-
|
|
|
- /* unregister all zones
|
|
|
- * included in (zone->addr, zone->size)
|
|
|
- */
|
|
|
-
|
|
|
- if (zone->addr <= z->addr &&
|
|
|
- z->addr + z->size <= zone->addr + zone->size) {
|
|
|
- dev->nb_zones--;
|
|
|
- *z = dev->zone[dev->nb_zones];
|
|
|
+ list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
|
|
|
+ if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
|
|
|
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
|
|
|
+ kvm_iodevice_destructor(&dev->dev);
|
|
|
}
|
|
|
- i--;
|
|
|
- }
|
|
|
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
|