vpci.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /*
  2. * PCI Backend - Provides a Virtual PCI bus (with real devices)
  3. * to the frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/list.h>
  8. #include <linux/slab.h>
  9. #include <linux/pci.h>
  10. #include <linux/spinlock.h>
  11. #include "pciback.h"
  12. #define PCI_SLOT_MAX 32
  13. #define DRV_NAME "xen-pciback"
  14. struct vpci_dev_data {
  15. /* Access to dev_list must be protected by lock */
  16. struct list_head dev_list[PCI_SLOT_MAX];
  17. spinlock_t lock;
  18. };
  19. static inline struct list_head *list_first(struct list_head *head)
  20. {
  21. return head->next;
  22. }
  23. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  24. unsigned int domain,
  25. unsigned int bus,
  26. unsigned int devfn)
  27. {
  28. struct pci_dev_entry *entry;
  29. struct pci_dev *dev = NULL;
  30. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  31. unsigned long flags;
  32. if (domain != 0 || bus != 0)
  33. return NULL;
  34. if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
  35. spin_lock_irqsave(&vpci_dev->lock, flags);
  36. list_for_each_entry(entry,
  37. &vpci_dev->dev_list[PCI_SLOT(devfn)],
  38. list) {
  39. if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
  40. dev = entry->dev;
  41. break;
  42. }
  43. }
  44. spin_unlock_irqrestore(&vpci_dev->lock, flags);
  45. }
  46. return dev;
  47. }
  48. static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
  49. {
  50. if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
  51. && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
  52. return 1;
  53. return 0;
  54. }
  55. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  56. struct pci_dev *dev, int devid,
  57. publish_pci_dev_cb publish_cb)
  58. {
  59. int err = 0, slot, func = -1;
  60. struct pci_dev_entry *t, *dev_entry;
  61. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  62. unsigned long flags;
  63. if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
  64. err = -EFAULT;
  65. xenbus_dev_fatal(pdev->xdev, err,
  66. "Can't export bridges on the virtual PCI bus");
  67. goto out;
  68. }
  69. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  70. if (!dev_entry) {
  71. err = -ENOMEM;
  72. xenbus_dev_fatal(pdev->xdev, err,
  73. "Error adding entry to virtual PCI bus");
  74. goto out;
  75. }
  76. dev_entry->dev = dev;
  77. spin_lock_irqsave(&vpci_dev->lock, flags);
  78. /* Keep multi-function devices together on the virtual PCI bus */
  79. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  80. if (!list_empty(&vpci_dev->dev_list[slot])) {
  81. t = list_entry(list_first(&vpci_dev->dev_list[slot]),
  82. struct pci_dev_entry, list);
  83. if (match_slot(dev, t->dev)) {
  84. pr_info(DRV_NAME ": vpci: %s: "
  85. "assign to virtual slot %d func %d\n",
  86. pci_name(dev), slot,
  87. PCI_FUNC(dev->devfn));
  88. list_add_tail(&dev_entry->list,
  89. &vpci_dev->dev_list[slot]);
  90. func = PCI_FUNC(dev->devfn);
  91. goto unlock;
  92. }
  93. }
  94. }
  95. /* Assign to a new slot on the virtual PCI bus */
  96. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  97. if (list_empty(&vpci_dev->dev_list[slot])) {
  98. printk(KERN_INFO DRV_NAME
  99. ": vpci: %s: assign to virtual slot %d\n",
  100. pci_name(dev), slot);
  101. list_add_tail(&dev_entry->list,
  102. &vpci_dev->dev_list[slot]);
  103. func = PCI_FUNC(dev->devfn);
  104. goto unlock;
  105. }
  106. }
  107. err = -ENOMEM;
  108. xenbus_dev_fatal(pdev->xdev, err,
  109. "No more space on root virtual PCI bus");
  110. unlock:
  111. spin_unlock_irqrestore(&vpci_dev->lock, flags);
  112. /* Publish this device. */
  113. if (!err)
  114. err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
  115. out:
  116. return err;
  117. }
  118. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  119. struct pci_dev *dev)
  120. {
  121. int slot;
  122. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  123. struct pci_dev *found_dev = NULL;
  124. unsigned long flags;
  125. spin_lock_irqsave(&vpci_dev->lock, flags);
  126. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  127. struct pci_dev_entry *e, *tmp;
  128. list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
  129. list) {
  130. if (e->dev == dev) {
  131. list_del(&e->list);
  132. found_dev = e->dev;
  133. kfree(e);
  134. goto out;
  135. }
  136. }
  137. }
  138. out:
  139. spin_unlock_irqrestore(&vpci_dev->lock, flags);
  140. if (found_dev)
  141. pcistub_put_pci_dev(found_dev);
  142. }
  143. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  144. {
  145. int slot;
  146. struct vpci_dev_data *vpci_dev;
  147. vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
  148. if (!vpci_dev)
  149. return -ENOMEM;
  150. spin_lock_init(&vpci_dev->lock);
  151. for (slot = 0; slot < PCI_SLOT_MAX; slot++)
  152. INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
  153. pdev->pci_dev_data = vpci_dev;
  154. return 0;
  155. }
  156. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  157. publish_pci_root_cb publish_cb)
  158. {
  159. /* The Virtual PCI bus has only one root */
  160. return publish_cb(pdev, 0, 0);
  161. }
  162. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  163. {
  164. int slot;
  165. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  166. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  167. struct pci_dev_entry *e, *tmp;
  168. list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
  169. list) {
  170. list_del(&e->list);
  171. pcistub_put_pci_dev(e->dev);
  172. kfree(e);
  173. }
  174. }
  175. kfree(vpci_dev);
  176. pdev->pci_dev_data = NULL;
  177. }
  178. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  179. struct xen_pcibk_device *pdev,
  180. unsigned int *domain, unsigned int *bus,
  181. unsigned int *devfn)
  182. {
  183. struct pci_dev_entry *entry;
  184. struct pci_dev *dev = NULL;
  185. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  186. unsigned long flags;
  187. int found = 0, slot;
  188. spin_lock_irqsave(&vpci_dev->lock, flags);
  189. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  190. list_for_each_entry(entry,
  191. &vpci_dev->dev_list[slot],
  192. list) {
  193. dev = entry->dev;
  194. if (dev && dev->bus->number == pcidev->bus->number
  195. && pci_domain_nr(dev->bus) ==
  196. pci_domain_nr(pcidev->bus)
  197. && dev->devfn == pcidev->devfn) {
  198. found = 1;
  199. *domain = 0;
  200. *bus = 0;
  201. *devfn = PCI_DEVFN(slot,
  202. PCI_FUNC(pcidev->devfn));
  203. }
  204. }
  205. }
  206. spin_unlock_irqrestore(&vpci_dev->lock, flags);
  207. return found;
  208. }
  209. struct xen_pcibk_backend xen_pcibk_vpci_backend = {
  210. .name = "vpci",
  211. .init = __xen_pcibk_init_devices,
  212. .free = __xen_pcibk_release_devices,
  213. .find = __xen_pcibk_get_pcifront_dev,
  214. .publish = __xen_pcibk_publish_pci_roots,
  215. .release = __xen_pcibk_release_pci_dev,
  216. .add = __xen_pcibk_add_pci_dev,
  217. .get = __xen_pcibk_get_pci_dev,
  218. };