xen.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
  3. * x86 PCI core to support the Xen PCI Frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/pci.h>
  10. #include <linux/acpi.h>
  11. #include <linux/io.h>
  12. #include <asm/pci_x86.h>
  13. #include <asm/xen/hypervisor.h>
  14. #include <xen/features.h>
  15. #include <xen/events.h>
  16. #include <asm/xen/pci.h>
  17. #ifdef CONFIG_ACPI
  18. static int xen_hvm_register_pirq(u32 gsi, int triggering)
  19. {
  20. int rc, irq;
  21. struct physdev_map_pirq map_irq;
  22. int shareable = 0;
  23. char *name;
  24. if (!xen_hvm_domain())
  25. return -1;
  26. map_irq.domid = DOMID_SELF;
  27. map_irq.type = MAP_PIRQ_TYPE_GSI;
  28. map_irq.index = gsi;
  29. map_irq.pirq = -1;
  30. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  31. if (rc) {
  32. printk(KERN_WARNING "xen map irq failed %d\n", rc);
  33. return -1;
  34. }
  35. if (triggering == ACPI_EDGE_SENSITIVE) {
  36. shareable = 0;
  37. name = "ioapic-edge";
  38. } else {
  39. shareable = 1;
  40. name = "ioapic-level";
  41. }
  42. irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name);
  43. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  44. return irq;
  45. }
  46. static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
  47. int trigger, int polarity)
  48. {
  49. return xen_hvm_register_pirq(gsi, trigger);
  50. }
  51. #endif
  52. #if defined(CONFIG_PCI_MSI)
  53. #include <linux/msi.h>
  54. #include <asm/msidef.h>
  55. struct xen_pci_frontend_ops *xen_pci_frontend;
  56. EXPORT_SYMBOL_GPL(xen_pci_frontend);
  57. static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
  58. struct msi_msg *msg)
  59. {
  60. /* We set vector == 0 to tell the hypervisor we don't care about it,
  61. * but we want a pirq setup instead.
  62. * We use the dest_id field to pass the pirq that we want. */
  63. msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
  64. msg->address_lo =
  65. MSI_ADDR_BASE_LO |
  66. MSI_ADDR_DEST_MODE_PHYSICAL |
  67. MSI_ADDR_REDIRECTION_CPU |
  68. MSI_ADDR_DEST_ID(pirq);
  69. msg->data =
  70. MSI_DATA_TRIGGER_EDGE |
  71. MSI_DATA_LEVEL_ASSERT |
  72. /* delivery mode reserved */
  73. (3 << 8) |
  74. MSI_DATA_VECTOR(0);
  75. }
  76. static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  77. {
  78. int irq, pirq, ret = 0;
  79. struct msi_desc *msidesc;
  80. struct msi_msg msg;
  81. list_for_each_entry(msidesc, &dev->msi_list, list) {
  82. xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
  83. "msi-x" : "msi", &irq, &pirq);
  84. if (irq < 0 || pirq < 0)
  85. goto error;
  86. printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
  87. xen_msi_compose_msg(dev, pirq, &msg);
  88. ret = set_irq_msi(irq, msidesc);
  89. if (ret < 0)
  90. goto error_while;
  91. write_msi_msg(irq, &msg);
  92. }
  93. return 0;
  94. error_while:
  95. unbind_from_irqhandler(irq, NULL);
  96. error:
  97. if (ret == -ENODEV)
  98. dev_err(&dev->dev, "Xen PCI frontend has not registered" \
  99. " MSI/MSI-X support!\n");
  100. return ret;
  101. }
  102. /*
  103. * For MSI interrupts we have to use drivers/xen/event.s functions to
  104. * allocate an irq_desc and setup the right */
  105. static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  106. {
  107. int irq, ret, i;
  108. struct msi_desc *msidesc;
  109. int *v;
  110. v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
  111. if (!v)
  112. return -ENOMEM;
  113. if (!xen_initial_domain()) {
  114. if (type == PCI_CAP_ID_MSIX)
  115. ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
  116. else
  117. ret = xen_pci_frontend_enable_msi(dev, &v);
  118. if (ret)
  119. goto error;
  120. }
  121. i = 0;
  122. list_for_each_entry(msidesc, &dev->msi_list, list) {
  123. irq = xen_allocate_pirq(v[i], 0, /* not sharable */
  124. (type == PCI_CAP_ID_MSIX) ?
  125. "pcifront-msi-x" : "pcifront-msi");
  126. if (irq < 0)
  127. return -1;
  128. ret = set_irq_msi(irq, msidesc);
  129. if (ret)
  130. goto error_while;
  131. i++;
  132. }
  133. kfree(v);
  134. return 0;
  135. error_while:
  136. unbind_from_irqhandler(irq, NULL);
  137. error:
  138. if (ret == -ENODEV)
  139. dev_err(&dev->dev, "Xen PCI frontend has not registered" \
  140. " MSI/MSI-X support!\n");
  141. kfree(v);
  142. return ret;
  143. }
  144. static void xen_teardown_msi_irqs(struct pci_dev *dev)
  145. {
  146. /* Only do this when were are in non-privileged mode.*/
  147. if (!xen_initial_domain()) {
  148. struct msi_desc *msidesc;
  149. msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
  150. if (msidesc->msi_attrib.is_msix)
  151. xen_pci_frontend_disable_msix(dev);
  152. else
  153. xen_pci_frontend_disable_msi(dev);
  154. }
  155. }
  156. static void xen_teardown_msi_irq(unsigned int irq)
  157. {
  158. xen_destroy_irq(irq);
  159. }
  160. #endif
  161. static int xen_pcifront_enable_irq(struct pci_dev *dev)
  162. {
  163. int rc;
  164. int share = 1;
  165. dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
  166. if (dev->irq < 0)
  167. return -EINVAL;
  168. if (dev->irq < NR_IRQS_LEGACY)
  169. share = 0;
  170. rc = xen_allocate_pirq(dev->irq, share, "pcifront");
  171. if (rc < 0) {
  172. dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
  173. dev->irq, rc);
  174. return rc;
  175. }
  176. return 0;
  177. }
  178. int __init pci_xen_init(void)
  179. {
  180. if (!xen_pv_domain() || xen_initial_domain())
  181. return -ENODEV;
  182. printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
  183. pcibios_set_cache_line_size();
  184. pcibios_enable_irq = xen_pcifront_enable_irq;
  185. pcibios_disable_irq = NULL;
  186. #ifdef CONFIG_ACPI
  187. /* Keep ACPI out of the picture */
  188. acpi_noirq = 1;
  189. #endif
  190. #ifdef CONFIG_PCI_MSI
  191. x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
  192. x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
  193. x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
  194. #endif
  195. return 0;
  196. }
  197. int __init pci_xen_hvm_init(void)
  198. {
  199. if (!xen_feature(XENFEAT_hvm_pirqs))
  200. return 0;
  201. #ifdef CONFIG_ACPI
  202. /*
  203. * We don't want to change the actual ACPI delivery model,
  204. * just how GSIs get registered.
  205. */
  206. __acpi_register_gsi = acpi_register_gsi_xen_hvm;
  207. #endif
  208. #ifdef CONFIG_PCI_MSI
  209. x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
  210. x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
  211. #endif
  212. return 0;
  213. }
  214. #ifdef CONFIG_XEN_DOM0
  215. static int xen_register_pirq(u32 gsi, int triggering)
  216. {
  217. int rc, irq;
  218. struct physdev_map_pirq map_irq;
  219. int shareable = 0;
  220. char *name;
  221. if (!xen_pv_domain())
  222. return -1;
  223. if (triggering == ACPI_EDGE_SENSITIVE) {
  224. shareable = 0;
  225. name = "ioapic-edge";
  226. } else {
  227. shareable = 1;
  228. name = "ioapic-level";
  229. }
  230. irq = xen_allocate_pirq(gsi, shareable, name);
  231. printk(KERN_DEBUG "xen: --> irq=%d\n", irq);
  232. if (irq < 0)
  233. goto out;
  234. map_irq.domid = DOMID_SELF;
  235. map_irq.type = MAP_PIRQ_TYPE_GSI;
  236. map_irq.index = gsi;
  237. map_irq.pirq = irq;
  238. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  239. if (rc) {
  240. printk(KERN_WARNING "xen map irq failed %d\n", rc);
  241. return -1;
  242. }
  243. out:
  244. return irq;
  245. }
  246. static int xen_register_gsi(u32 gsi, int triggering, int polarity)
  247. {
  248. int rc, irq;
  249. struct physdev_setup_gsi setup_gsi;
  250. if (!xen_pv_domain())
  251. return -1;
  252. printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
  253. gsi, triggering, polarity);
  254. irq = xen_register_pirq(gsi, triggering);
  255. setup_gsi.gsi = gsi;
  256. setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
  257. setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  258. rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
  259. if (rc == -EEXIST)
  260. printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
  261. else if (rc) {
  262. printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
  263. gsi, rc);
  264. }
  265. return irq;
  266. }
  267. static __init void xen_setup_acpi_sci(void)
  268. {
  269. int rc;
  270. int trigger, polarity;
  271. int gsi = acpi_sci_override_gsi;
  272. if (!gsi)
  273. return;
  274. rc = acpi_get_override_irq(gsi, &trigger, &polarity);
  275. if (rc) {
  276. printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
  277. " sci, rc=%d\n", rc);
  278. return;
  279. }
  280. trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
  281. polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
  282. printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
  283. "polarity=%d\n", gsi, trigger, polarity);
  284. gsi = xen_register_gsi(gsi, trigger, polarity);
  285. printk(KERN_INFO "xen: acpi sci %d\n", gsi);
  286. return;
  287. }
  288. static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
  289. int trigger, int polarity)
  290. {
  291. return xen_register_gsi(gsi, trigger, polarity);
  292. }
  293. static int __init pci_xen_initial_domain(void)
  294. {
  295. xen_setup_acpi_sci();
  296. __acpi_register_gsi = acpi_register_gsi_xen;
  297. return 0;
  298. }
  299. void __init xen_setup_pirqs(void)
  300. {
  301. int irq;
  302. pci_xen_initial_domain();
  303. if (0 == nr_ioapics) {
  304. for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
  305. xen_allocate_pirq(irq, 0, "xt-pic");
  306. return;
  307. }
  308. /* Pre-allocate legacy irqs */
  309. for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
  310. int trigger, polarity;
  311. if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
  312. continue;
  313. xen_register_pirq(irq,
  314. trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
  315. }
  316. }
  317. #endif