passthrough.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * PCI Backend - Provides restricted access to the real PCI bus topology
  3. * to the frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/list.h>
  8. #include <linux/pci.h>
  9. #include <linux/mutex.h>
  10. #include "pciback.h"
  11. struct passthrough_dev_data {
  12. /* Access to dev_list must be protected by lock */
  13. struct list_head dev_list;
  14. struct mutex lock;
  15. };
  16. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  17. unsigned int domain,
  18. unsigned int bus,
  19. unsigned int devfn)
  20. {
  21. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  22. struct pci_dev_entry *dev_entry;
  23. struct pci_dev *dev = NULL;
  24. mutex_lock(&dev_data->lock);
  25. list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
  26. if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
  27. && bus == (unsigned int)dev_entry->dev->bus->number
  28. && devfn == dev_entry->dev->devfn) {
  29. dev = dev_entry->dev;
  30. break;
  31. }
  32. }
  33. mutex_unlock(&dev_data->lock);
  34. return dev;
  35. }
  36. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  37. struct pci_dev *dev,
  38. int devid, publish_pci_dev_cb publish_cb)
  39. {
  40. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  41. struct pci_dev_entry *dev_entry;
  42. unsigned int domain, bus, devfn;
  43. int err;
  44. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  45. if (!dev_entry)
  46. return -ENOMEM;
  47. dev_entry->dev = dev;
  48. mutex_lock(&dev_data->lock);
  49. list_add_tail(&dev_entry->list, &dev_data->dev_list);
  50. mutex_unlock(&dev_data->lock);
  51. /* Publish this device. */
  52. domain = (unsigned int)pci_domain_nr(dev->bus);
  53. bus = (unsigned int)dev->bus->number;
  54. devfn = dev->devfn;
  55. err = publish_cb(pdev, domain, bus, devfn, devid);
  56. return err;
  57. }
  58. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  59. struct pci_dev *dev)
  60. {
  61. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  62. struct pci_dev_entry *dev_entry, *t;
  63. struct pci_dev *found_dev = NULL;
  64. mutex_lock(&dev_data->lock);
  65. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  66. if (dev_entry->dev == dev) {
  67. list_del(&dev_entry->list);
  68. found_dev = dev_entry->dev;
  69. kfree(dev_entry);
  70. }
  71. }
  72. mutex_unlock(&dev_data->lock);
  73. if (found_dev)
  74. pcistub_put_pci_dev(found_dev);
  75. }
  76. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  77. {
  78. struct passthrough_dev_data *dev_data;
  79. dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
  80. if (!dev_data)
  81. return -ENOMEM;
  82. mutex_init(&dev_data->lock);
  83. INIT_LIST_HEAD(&dev_data->dev_list);
  84. pdev->pci_dev_data = dev_data;
  85. return 0;
  86. }
  87. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  88. publish_pci_root_cb publish_root_cb)
  89. {
  90. int err = 0;
  91. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  92. struct pci_dev_entry *dev_entry, *e;
  93. struct pci_dev *dev;
  94. int found;
  95. unsigned int domain, bus;
  96. mutex_lock(&dev_data->lock);
  97. list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
  98. /* Only publish this device as a root if none of its
  99. * parent bridges are exported
  100. */
  101. found = 0;
  102. dev = dev_entry->dev->bus->self;
  103. for (; !found && dev != NULL; dev = dev->bus->self) {
  104. list_for_each_entry(e, &dev_data->dev_list, list) {
  105. if (dev == e->dev) {
  106. found = 1;
  107. break;
  108. }
  109. }
  110. }
  111. domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
  112. bus = (unsigned int)dev_entry->dev->bus->number;
  113. if (!found) {
  114. err = publish_root_cb(pdev, domain, bus);
  115. if (err)
  116. break;
  117. }
  118. }
  119. mutex_unlock(&dev_data->lock);
  120. return err;
  121. }
  122. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  123. {
  124. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  125. struct pci_dev_entry *dev_entry, *t;
  126. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  127. list_del(&dev_entry->list);
  128. pcistub_put_pci_dev(dev_entry->dev);
  129. kfree(dev_entry);
  130. }
  131. kfree(dev_data);
  132. pdev->pci_dev_data = NULL;
  133. }
  134. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  135. struct xen_pcibk_device *pdev,
  136. unsigned int *domain, unsigned int *bus,
  137. unsigned int *devfn)
  138. {
  139. *domain = pci_domain_nr(pcidev->bus);
  140. *bus = pcidev->bus->number;
  141. *devfn = pcidev->devfn;
  142. return 1;
  143. }
  144. const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
  145. .name = "passthrough",
  146. .init = __xen_pcibk_init_devices,
  147. .free = __xen_pcibk_release_devices,
  148. .find = __xen_pcibk_get_pcifront_dev,
  149. .publish = __xen_pcibk_publish_pci_roots,
  150. .release = __xen_pcibk_release_pci_dev,
  151. .add = __xen_pcibk_add_pci_dev,
  152. .get = __xen_pcibk_get_pci_dev,
  153. };