xen.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /*
  2. * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
  3. * x86 PCI core to support the Xen PCI Frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/pci.h>
  10. #include <linux/acpi.h>
  11. #include <linux/io.h>
  12. #include <asm/io_apic.h>
  13. #include <asm/pci_x86.h>
  14. #include <asm/xen/hypervisor.h>
  15. #include <xen/features.h>
  16. #include <xen/events.h>
  17. #include <asm/xen/pci.h>
  18. #ifdef CONFIG_ACPI
  19. static int xen_hvm_register_pirq(u32 gsi, int triggering)
  20. {
  21. int rc, irq;
  22. struct physdev_map_pirq map_irq;
  23. int shareable = 0;
  24. char *name;
  25. if (!xen_hvm_domain())
  26. return -1;
  27. map_irq.domid = DOMID_SELF;
  28. map_irq.type = MAP_PIRQ_TYPE_GSI;
  29. map_irq.index = gsi;
  30. map_irq.pirq = -1;
  31. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  32. if (rc) {
  33. printk(KERN_WARNING "xen map irq failed %d\n", rc);
  34. return -1;
  35. }
  36. if (triggering == ACPI_EDGE_SENSITIVE) {
  37. shareable = 0;
  38. name = "ioapic-edge";
  39. } else {
  40. shareable = 1;
  41. name = "ioapic-level";
  42. }
  43. irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name);
  44. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  45. return irq;
  46. }
  47. static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
  48. int trigger, int polarity)
  49. {
  50. return xen_hvm_register_pirq(gsi, trigger);
  51. }
  52. #endif
  53. #if defined(CONFIG_PCI_MSI)
  54. #include <linux/msi.h>
  55. #include <asm/msidef.h>
  56. struct xen_pci_frontend_ops *xen_pci_frontend;
  57. EXPORT_SYMBOL_GPL(xen_pci_frontend);
  58. static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
  59. struct msi_msg *msg)
  60. {
  61. /* We set vector == 0 to tell the hypervisor we don't care about it,
  62. * but we want a pirq setup instead.
  63. * We use the dest_id field to pass the pirq that we want. */
  64. msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
  65. msg->address_lo =
  66. MSI_ADDR_BASE_LO |
  67. MSI_ADDR_DEST_MODE_PHYSICAL |
  68. MSI_ADDR_REDIRECTION_CPU |
  69. MSI_ADDR_DEST_ID(pirq);
  70. msg->data =
  71. MSI_DATA_TRIGGER_EDGE |
  72. MSI_DATA_LEVEL_ASSERT |
  73. /* delivery mode reserved */
  74. (3 << 8) |
  75. MSI_DATA_VECTOR(0);
  76. }
  77. static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  78. {
  79. int irq, pirq, ret = 0;
  80. struct msi_desc *msidesc;
  81. struct msi_msg msg;
  82. list_for_each_entry(msidesc, &dev->msi_list, list) {
  83. xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
  84. "msi-x" : "msi", &irq, &pirq);
  85. if (irq < 0 || pirq < 0)
  86. goto error;
  87. printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
  88. xen_msi_compose_msg(dev, pirq, &msg);
  89. ret = set_irq_msi(irq, msidesc);
  90. if (ret < 0)
  91. goto error_while;
  92. write_msi_msg(irq, &msg);
  93. }
  94. return 0;
  95. error_while:
  96. unbind_from_irqhandler(irq, NULL);
  97. error:
  98. if (ret == -ENODEV)
  99. dev_err(&dev->dev, "Xen PCI frontend has not registered" \
  100. " MSI/MSI-X support!\n");
  101. return ret;
  102. }
  103. /*
  104. * For MSI interrupts we have to use drivers/xen/event.s functions to
  105. * allocate an irq_desc and setup the right */
  106. static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  107. {
  108. int irq, ret, i;
  109. struct msi_desc *msidesc;
  110. int *v;
  111. v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
  112. if (!v)
  113. return -ENOMEM;
  114. if (type == PCI_CAP_ID_MSIX)
  115. ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
  116. else
  117. ret = xen_pci_frontend_enable_msi(dev, &v);
  118. if (ret)
  119. goto error;
  120. i = 0;
  121. list_for_each_entry(msidesc, &dev->msi_list, list) {
  122. irq = xen_allocate_pirq(v[i], 0, /* not sharable */
  123. (type == PCI_CAP_ID_MSIX) ?
  124. "pcifront-msi-x" : "pcifront-msi");
  125. if (irq < 0) {
  126. ret = -1;
  127. goto free;
  128. }
  129. ret = set_irq_msi(irq, msidesc);
  130. if (ret)
  131. goto error_while;
  132. i++;
  133. }
  134. kfree(v);
  135. return 0;
  136. error_while:
  137. unbind_from_irqhandler(irq, NULL);
  138. error:
  139. if (ret == -ENODEV)
  140. dev_err(&dev->dev, "Xen PCI frontend has not registered" \
  141. " MSI/MSI-X support!\n");
  142. free:
  143. kfree(v);
  144. return ret;
  145. }
  146. static void xen_teardown_msi_irqs(struct pci_dev *dev)
  147. {
  148. struct msi_desc *msidesc;
  149. msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
  150. if (msidesc->msi_attrib.is_msix)
  151. xen_pci_frontend_disable_msix(dev);
  152. else
  153. xen_pci_frontend_disable_msi(dev);
  154. }
  155. static void xen_teardown_msi_irq(unsigned int irq)
  156. {
  157. xen_destroy_irq(irq);
  158. }
  159. static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  160. {
  161. int irq, ret;
  162. struct msi_desc *msidesc;
  163. list_for_each_entry(msidesc, &dev->msi_list, list) {
  164. irq = xen_create_msi_irq(dev, msidesc, type);
  165. if (irq < 0)
  166. return -1;
  167. ret = set_irq_msi(irq, msidesc);
  168. if (ret)
  169. goto error;
  170. }
  171. return 0;
  172. error:
  173. xen_destroy_irq(irq);
  174. return ret;
  175. }
  176. #endif
  177. static int xen_pcifront_enable_irq(struct pci_dev *dev)
  178. {
  179. int rc;
  180. int share = 1;
  181. dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
  182. if (dev->irq < 0)
  183. return -EINVAL;
  184. if (dev->irq < NR_IRQS_LEGACY)
  185. share = 0;
  186. rc = xen_allocate_pirq(dev->irq, share, "pcifront");
  187. if (rc < 0) {
  188. dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
  189. dev->irq, rc);
  190. return rc;
  191. }
  192. return 0;
  193. }
  194. int __init pci_xen_init(void)
  195. {
  196. if (!xen_pv_domain() || xen_initial_domain())
  197. return -ENODEV;
  198. printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
  199. pcibios_set_cache_line_size();
  200. pcibios_enable_irq = xen_pcifront_enable_irq;
  201. pcibios_disable_irq = NULL;
  202. #ifdef CONFIG_ACPI
  203. /* Keep ACPI out of the picture */
  204. acpi_noirq = 1;
  205. #endif
  206. #ifdef CONFIG_PCI_MSI
  207. x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
  208. x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
  209. x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
  210. #endif
  211. return 0;
  212. }
  213. int __init pci_xen_hvm_init(void)
  214. {
  215. if (!xen_feature(XENFEAT_hvm_pirqs))
  216. return 0;
  217. #ifdef CONFIG_ACPI
  218. /*
  219. * We don't want to change the actual ACPI delivery model,
  220. * just how GSIs get registered.
  221. */
  222. __acpi_register_gsi = acpi_register_gsi_xen_hvm;
  223. #endif
  224. #ifdef CONFIG_PCI_MSI
  225. x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
  226. x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
  227. #endif
  228. return 0;
  229. }
  230. #ifdef CONFIG_XEN_DOM0
  231. static int xen_register_pirq(u32 gsi, int triggering)
  232. {
  233. int rc, irq;
  234. struct physdev_map_pirq map_irq;
  235. int shareable = 0;
  236. char *name;
  237. if (!xen_pv_domain())
  238. return -1;
  239. if (triggering == ACPI_EDGE_SENSITIVE) {
  240. shareable = 0;
  241. name = "ioapic-edge";
  242. } else {
  243. shareable = 1;
  244. name = "ioapic-level";
  245. }
  246. irq = xen_allocate_pirq(gsi, shareable, name);
  247. printk(KERN_DEBUG "xen: --> irq=%d\n", irq);
  248. if (irq < 0)
  249. goto out;
  250. map_irq.domid = DOMID_SELF;
  251. map_irq.type = MAP_PIRQ_TYPE_GSI;
  252. map_irq.index = gsi;
  253. map_irq.pirq = irq;
  254. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  255. if (rc) {
  256. printk(KERN_WARNING "xen map irq failed %d\n", rc);
  257. return -1;
  258. }
  259. out:
  260. return irq;
  261. }
  262. static int xen_register_gsi(u32 gsi, int triggering, int polarity)
  263. {
  264. int rc, irq;
  265. struct physdev_setup_gsi setup_gsi;
  266. if (!xen_pv_domain())
  267. return -1;
  268. printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
  269. gsi, triggering, polarity);
  270. irq = xen_register_pirq(gsi, triggering);
  271. setup_gsi.gsi = gsi;
  272. setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
  273. setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  274. rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
  275. if (rc == -EEXIST)
  276. printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
  277. else if (rc) {
  278. printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
  279. gsi, rc);
  280. }
  281. return irq;
  282. }
  283. static __init void xen_setup_acpi_sci(void)
  284. {
  285. int rc;
  286. int trigger, polarity;
  287. int gsi = acpi_sci_override_gsi;
  288. if (!gsi)
  289. return;
  290. rc = acpi_get_override_irq(gsi, &trigger, &polarity);
  291. if (rc) {
  292. printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
  293. " sci, rc=%d\n", rc);
  294. return;
  295. }
  296. trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
  297. polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
  298. printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
  299. "polarity=%d\n", gsi, trigger, polarity);
  300. gsi = xen_register_gsi(gsi, trigger, polarity);
  301. printk(KERN_INFO "xen: acpi sci %d\n", gsi);
  302. return;
  303. }
  304. static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
  305. int trigger, int polarity)
  306. {
  307. return xen_register_gsi(gsi, trigger, polarity);
  308. }
  309. static int __init pci_xen_initial_domain(void)
  310. {
  311. #ifdef CONFIG_PCI_MSI
  312. x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
  313. x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
  314. #endif
  315. xen_setup_acpi_sci();
  316. __acpi_register_gsi = acpi_register_gsi_xen;
  317. return 0;
  318. }
  319. void __init xen_setup_pirqs(void)
  320. {
  321. int irq;
  322. pci_xen_initial_domain();
  323. if (0 == nr_ioapics) {
  324. for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
  325. xen_allocate_pirq(irq, 0, "xt-pic");
  326. return;
  327. }
  328. /* Pre-allocate legacy irqs */
  329. for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
  330. int trigger, polarity;
  331. if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
  332. continue;
  333. xen_register_pirq(irq,
  334. trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
  335. }
  336. }
  337. #endif