passthrough.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * PCI Backend - Provides restricted access to the real PCI bus topology
  3. * to the frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/list.h>
  8. #include <linux/pci.h>
  9. #include <linux/spinlock.h>
  10. #include "pciback.h"
  11. struct passthrough_dev_data {
  12. /* Access to dev_list must be protected by lock */
  13. struct list_head dev_list;
  14. spinlock_t lock;
  15. };
  16. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  17. unsigned int domain,
  18. unsigned int bus,
  19. unsigned int devfn)
  20. {
  21. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  22. struct pci_dev_entry *dev_entry;
  23. struct pci_dev *dev = NULL;
  24. unsigned long flags;
  25. spin_lock_irqsave(&dev_data->lock, flags);
  26. list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
  27. if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
  28. && bus == (unsigned int)dev_entry->dev->bus->number
  29. && devfn == dev_entry->dev->devfn) {
  30. dev = dev_entry->dev;
  31. break;
  32. }
  33. }
  34. spin_unlock_irqrestore(&dev_data->lock, flags);
  35. return dev;
  36. }
  37. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  38. struct pci_dev *dev,
  39. int devid, publish_pci_dev_cb publish_cb)
  40. {
  41. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  42. struct pci_dev_entry *dev_entry;
  43. unsigned long flags;
  44. unsigned int domain, bus, devfn;
  45. int err;
  46. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  47. if (!dev_entry)
  48. return -ENOMEM;
  49. dev_entry->dev = dev;
  50. spin_lock_irqsave(&dev_data->lock, flags);
  51. list_add_tail(&dev_entry->list, &dev_data->dev_list);
  52. spin_unlock_irqrestore(&dev_data->lock, flags);
  53. /* Publish this device. */
  54. domain = (unsigned int)pci_domain_nr(dev->bus);
  55. bus = (unsigned int)dev->bus->number;
  56. devfn = dev->devfn;
  57. err = publish_cb(pdev, domain, bus, devfn, devid);
  58. return err;
  59. }
  60. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  61. struct pci_dev *dev)
  62. {
  63. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  64. struct pci_dev_entry *dev_entry, *t;
  65. struct pci_dev *found_dev = NULL;
  66. unsigned long flags;
  67. spin_lock_irqsave(&dev_data->lock, flags);
  68. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  69. if (dev_entry->dev == dev) {
  70. list_del(&dev_entry->list);
  71. found_dev = dev_entry->dev;
  72. kfree(dev_entry);
  73. }
  74. }
  75. spin_unlock_irqrestore(&dev_data->lock, flags);
  76. if (found_dev)
  77. pcistub_put_pci_dev(found_dev);
  78. }
  79. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  80. {
  81. struct passthrough_dev_data *dev_data;
  82. dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
  83. if (!dev_data)
  84. return -ENOMEM;
  85. spin_lock_init(&dev_data->lock);
  86. INIT_LIST_HEAD(&dev_data->dev_list);
  87. pdev->pci_dev_data = dev_data;
  88. return 0;
  89. }
  90. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  91. publish_pci_root_cb publish_root_cb)
  92. {
  93. int err = 0;
  94. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  95. struct pci_dev_entry *dev_entry, *e, *tmp;
  96. struct pci_dev *dev;
  97. int found;
  98. unsigned int domain, bus;
  99. spin_lock(&dev_data->lock);
  100. list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
  101. /* Only publish this device as a root if none of its
  102. * parent bridges are exported
  103. */
  104. found = 0;
  105. dev = dev_entry->dev->bus->self;
  106. for (; !found && dev != NULL; dev = dev->bus->self) {
  107. list_for_each_entry(e, &dev_data->dev_list, list) {
  108. if (dev == e->dev) {
  109. found = 1;
  110. break;
  111. }
  112. }
  113. }
  114. domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
  115. bus = (unsigned int)dev_entry->dev->bus->number;
  116. if (!found) {
  117. spin_unlock(&dev_data->lock);
  118. err = publish_root_cb(pdev, domain, bus);
  119. if (err)
  120. break;
  121. spin_lock(&dev_data->lock);
  122. }
  123. }
  124. if (!err)
  125. spin_unlock(&dev_data->lock);
  126. return err;
  127. }
  128. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  129. {
  130. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  131. struct pci_dev_entry *dev_entry, *t;
  132. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  133. list_del(&dev_entry->list);
  134. pcistub_put_pci_dev(dev_entry->dev);
  135. kfree(dev_entry);
  136. }
  137. kfree(dev_data);
  138. pdev->pci_dev_data = NULL;
  139. }
  140. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  141. struct xen_pcibk_device *pdev,
  142. unsigned int *domain, unsigned int *bus,
  143. unsigned int *devfn)
  144. {
  145. *domain = pci_domain_nr(pcidev->bus);
  146. *bus = pcidev->bus->number;
  147. *devfn = pcidev->devfn;
  148. return 1;
  149. }
  150. struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
  151. .name = "passthrough",
  152. .init = __xen_pcibk_init_devices,
  153. .free = __xen_pcibk_release_devices,
  154. .find = __xen_pcibk_get_pcifront_dev,
  155. .publish = __xen_pcibk_publish_pci_roots,
  156. .release = __xen_pcibk_release_pci_dev,
  157. .add = __xen_pcibk_add_pci_dev,
  158. .get = __xen_pcibk_get_pci_dev,
  159. };