vfio.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * VFIO-KVM bridge pseudo device
  3. *
  4. * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/file.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/vfio.h>
  20. struct kvm_vfio_group {
  21. struct list_head node;
  22. struct vfio_group *vfio_group;
  23. };
  24. struct kvm_vfio {
  25. struct list_head group_list;
  26. struct mutex lock;
  27. bool noncoherent;
  28. };
  29. static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
  30. {
  31. struct vfio_group *vfio_group;
  32. struct vfio_group *(*fn)(struct file *);
  33. fn = symbol_get(vfio_group_get_external_user);
  34. if (!fn)
  35. return ERR_PTR(-EINVAL);
  36. vfio_group = fn(filep);
  37. symbol_put(vfio_group_get_external_user);
  38. return vfio_group;
  39. }
  40. static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
  41. {
  42. void (*fn)(struct vfio_group *);
  43. fn = symbol_get(vfio_group_put_external_user);
  44. if (!fn)
  45. return;
  46. fn(vfio_group);
  47. symbol_put(vfio_group_put_external_user);
  48. }
  49. /*
  50. * Groups can use the same or different IOMMU domains. If the same then
  51. * adding a new group may change the coherency of groups we've previously
  52. * been told about. We don't want to care about any of that so we retest
  53. * each group and bail as soon as we find one that's noncoherent. This
  54. * means we only ever [un]register_noncoherent_dma once for the whole device.
  55. */
  56. static void kvm_vfio_update_coherency(struct kvm_device *dev)
  57. {
  58. struct kvm_vfio *kv = dev->private;
  59. bool noncoherent = false;
  60. struct kvm_vfio_group *kvg;
  61. mutex_lock(&kv->lock);
  62. list_for_each_entry(kvg, &kv->group_list, node) {
  63. /*
  64. * TODO: We need an interface to check the coherency of
  65. * the IOMMU domain this group is using. For now, assume
  66. * it's always noncoherent.
  67. */
  68. noncoherent = true;
  69. break;
  70. }
  71. if (noncoherent != kv->noncoherent) {
  72. kv->noncoherent = noncoherent;
  73. if (kv->noncoherent)
  74. kvm_arch_register_noncoherent_dma(dev->kvm);
  75. else
  76. kvm_arch_unregister_noncoherent_dma(dev->kvm);
  77. }
  78. mutex_unlock(&kv->lock);
  79. }
  80. static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
  81. {
  82. struct kvm_vfio *kv = dev->private;
  83. struct vfio_group *vfio_group;
  84. struct kvm_vfio_group *kvg;
  85. void __user *argp = (void __user *)arg;
  86. struct fd f;
  87. int32_t fd;
  88. int ret;
  89. switch (attr) {
  90. case KVM_DEV_VFIO_GROUP_ADD:
  91. if (get_user(fd, (int32_t __user *)argp))
  92. return -EFAULT;
  93. f = fdget(fd);
  94. if (!f.file)
  95. return -EBADF;
  96. vfio_group = kvm_vfio_group_get_external_user(f.file);
  97. fdput(f);
  98. if (IS_ERR(vfio_group))
  99. return PTR_ERR(vfio_group);
  100. mutex_lock(&kv->lock);
  101. list_for_each_entry(kvg, &kv->group_list, node) {
  102. if (kvg->vfio_group == vfio_group) {
  103. mutex_unlock(&kv->lock);
  104. kvm_vfio_group_put_external_user(vfio_group);
  105. return -EEXIST;
  106. }
  107. }
  108. kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
  109. if (!kvg) {
  110. mutex_unlock(&kv->lock);
  111. kvm_vfio_group_put_external_user(vfio_group);
  112. return -ENOMEM;
  113. }
  114. list_add_tail(&kvg->node, &kv->group_list);
  115. kvg->vfio_group = vfio_group;
  116. mutex_unlock(&kv->lock);
  117. kvm_vfio_update_coherency(dev);
  118. return 0;
  119. case KVM_DEV_VFIO_GROUP_DEL:
  120. if (get_user(fd, (int32_t __user *)argp))
  121. return -EFAULT;
  122. f = fdget(fd);
  123. if (!f.file)
  124. return -EBADF;
  125. vfio_group = kvm_vfio_group_get_external_user(f.file);
  126. fdput(f);
  127. if (IS_ERR(vfio_group))
  128. return PTR_ERR(vfio_group);
  129. ret = -ENOENT;
  130. mutex_lock(&kv->lock);
  131. list_for_each_entry(kvg, &kv->group_list, node) {
  132. if (kvg->vfio_group != vfio_group)
  133. continue;
  134. list_del(&kvg->node);
  135. kvm_vfio_group_put_external_user(kvg->vfio_group);
  136. kfree(kvg);
  137. ret = 0;
  138. break;
  139. }
  140. mutex_unlock(&kv->lock);
  141. kvm_vfio_group_put_external_user(vfio_group);
  142. kvm_vfio_update_coherency(dev);
  143. return ret;
  144. }
  145. return -ENXIO;
  146. }
  147. static int kvm_vfio_set_attr(struct kvm_device *dev,
  148. struct kvm_device_attr *attr)
  149. {
  150. switch (attr->group) {
  151. case KVM_DEV_VFIO_GROUP:
  152. return kvm_vfio_set_group(dev, attr->attr, attr->addr);
  153. }
  154. return -ENXIO;
  155. }
  156. static int kvm_vfio_has_attr(struct kvm_device *dev,
  157. struct kvm_device_attr *attr)
  158. {
  159. switch (attr->group) {
  160. case KVM_DEV_VFIO_GROUP:
  161. switch (attr->attr) {
  162. case KVM_DEV_VFIO_GROUP_ADD:
  163. case KVM_DEV_VFIO_GROUP_DEL:
  164. return 0;
  165. }
  166. break;
  167. }
  168. return -ENXIO;
  169. }
  170. static void kvm_vfio_destroy(struct kvm_device *dev)
  171. {
  172. struct kvm_vfio *kv = dev->private;
  173. struct kvm_vfio_group *kvg, *tmp;
  174. list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
  175. kvm_vfio_group_put_external_user(kvg->vfio_group);
  176. list_del(&kvg->node);
  177. kfree(kvg);
  178. }
  179. kvm_vfio_update_coherency(dev);
  180. kfree(kv);
  181. kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
  182. }
  183. static int kvm_vfio_create(struct kvm_device *dev, u32 type)
  184. {
  185. struct kvm_device *tmp;
  186. struct kvm_vfio *kv;
  187. /* Only one VFIO "device" per VM */
  188. list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
  189. if (tmp->ops == &kvm_vfio_ops)
  190. return -EBUSY;
  191. kv = kzalloc(sizeof(*kv), GFP_KERNEL);
  192. if (!kv)
  193. return -ENOMEM;
  194. INIT_LIST_HEAD(&kv->group_list);
  195. mutex_init(&kv->lock);
  196. dev->private = kv;
  197. return 0;
  198. }
  199. struct kvm_device_ops kvm_vfio_ops = {
  200. .name = "kvm-vfio",
  201. .create = kvm_vfio_create,
  202. .destroy = kvm_vfio_destroy,
  203. .set_attr = kvm_vfio_set_attr,
  204. .has_attr = kvm_vfio_has_attr,
  205. };