pciback_ops.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * PCI Backend Operations - respond to PCI requests from Frontend
  3. *
  4. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/module.h>
  8. #include <linux/wait.h>
  9. #include <linux/bitops.h>
  10. #include <xen/events.h>
  11. #include <linux/sched.h>
  12. #include "pciback.h"
  13. int verbose_request;
  14. module_param(verbose_request, int, 0644);
  15. static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
  16. /* Ensure a device is has the fake IRQ handler "turned on/off" and is
  17. * ready to be exported. This MUST be run after xen_pcibk_reset_device
  18. * which does the actual PCI device enable/disable.
  19. */
  20. static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
  21. {
  22. struct xen_pcibk_dev_data *dev_data;
  23. int rc;
  24. int enable = 0;
  25. dev_data = pci_get_drvdata(dev);
  26. if (!dev_data)
  27. return;
  28. /* We don't deal with bridges */
  29. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
  30. return;
  31. if (reset) {
  32. dev_data->enable_intx = 0;
  33. dev_data->ack_intr = 0;
  34. }
  35. enable = dev_data->enable_intx;
  36. /* Asked to disable, but ISR isn't runnig */
  37. if (!enable && !dev_data->isr_on)
  38. return;
  39. /* Squirrel away the IRQs in the dev_data. We need this
  40. * b/c when device transitions to MSI, the dev->irq is
  41. * overwritten with the MSI vector.
  42. */
  43. if (enable)
  44. dev_data->irq = dev->irq;
  45. /*
  46. * SR-IOV devices in all use MSI-X and have no legacy
  47. * interrupts, so inhibit creating a fake IRQ handler for them.
  48. */
  49. if (dev_data->irq == 0)
  50. goto out;
  51. dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
  52. dev_data->irq_name,
  53. dev_data->irq,
  54. pci_is_enabled(dev) ? "on" : "off",
  55. dev->msi_enabled ? "MSI" : "",
  56. dev->msix_enabled ? "MSI/X" : "",
  57. dev_data->isr_on ? "enable" : "disable",
  58. enable ? "enable" : "disable");
  59. if (enable) {
  60. rc = request_irq(dev_data->irq,
  61. xen_pcibk_guest_interrupt, IRQF_SHARED,
  62. dev_data->irq_name, dev);
  63. if (rc) {
  64. dev_err(&dev->dev, "%s: failed to install fake IRQ " \
  65. "handler for IRQ %d! (rc:%d)\n",
  66. dev_data->irq_name, dev_data->irq, rc);
  67. goto out;
  68. }
  69. } else {
  70. free_irq(dev_data->irq, dev);
  71. dev_data->irq = 0;
  72. }
  73. dev_data->isr_on = enable;
  74. dev_data->ack_intr = enable;
  75. out:
  76. dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
  77. dev_data->irq_name,
  78. dev_data->irq,
  79. pci_is_enabled(dev) ? "on" : "off",
  80. dev->msi_enabled ? "MSI" : "",
  81. dev->msix_enabled ? "MSI/X" : "",
  82. enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
  83. (dev_data->isr_on ? "failed to disable" : "disabled"));
  84. }
  85. /* Ensure a device is "turned off" and ready to be exported.
  86. * (Also see xen_pcibk_config_reset to ensure virtual configuration space is
  87. * ready to be re-exported)
  88. */
  89. void xen_pcibk_reset_device(struct pci_dev *dev)
  90. {
  91. u16 cmd;
  92. xen_pcibk_control_isr(dev, 1 /* reset device */);
  93. /* Disable devices (but not bridges) */
  94. if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
  95. #ifdef CONFIG_PCI_MSI
  96. /* The guest could have been abruptly killed without
  97. * disabling MSI/MSI-X interrupts.*/
  98. if (dev->msix_enabled)
  99. pci_disable_msix(dev);
  100. if (dev->msi_enabled)
  101. pci_disable_msi(dev);
  102. #endif
  103. if (pci_is_enabled(dev))
  104. pci_disable_device(dev);
  105. pci_write_config_word(dev, PCI_COMMAND, 0);
  106. dev->is_busmaster = 0;
  107. } else {
  108. pci_read_config_word(dev, PCI_COMMAND, &cmd);
  109. if (cmd & (PCI_COMMAND_INVALIDATE)) {
  110. cmd &= ~(PCI_COMMAND_INVALIDATE);
  111. pci_write_config_word(dev, PCI_COMMAND, cmd);
  112. dev->is_busmaster = 0;
  113. }
  114. }
  115. }
  116. #ifdef CONFIG_PCI_MSI
  117. static
  118. int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
  119. struct pci_dev *dev, struct xen_pci_op *op)
  120. {
  121. struct xen_pcibk_dev_data *dev_data;
  122. int status;
  123. if (unlikely(verbose_request))
  124. printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
  125. status = pci_enable_msi(dev);
  126. if (status) {
  127. pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
  128. pci_name(dev), pdev->xdev->otherend_id,
  129. status);
  130. op->value = 0;
  131. return XEN_PCI_ERR_op_failed;
  132. }
  133. /* The value the guest needs is actually the IDT vector, not the
  134. * the local domain's IRQ number. */
  135. op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
  136. if (unlikely(verbose_request))
  137. printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
  138. op->value);
  139. dev_data = pci_get_drvdata(dev);
  140. if (dev_data)
  141. dev_data->ack_intr = 0;
  142. return 0;
  143. }
  144. static
  145. int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
  146. struct pci_dev *dev, struct xen_pci_op *op)
  147. {
  148. struct xen_pcibk_dev_data *dev_data;
  149. if (unlikely(verbose_request))
  150. printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
  151. pci_name(dev));
  152. pci_disable_msi(dev);
  153. op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
  154. if (unlikely(verbose_request))
  155. printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
  156. op->value);
  157. dev_data = pci_get_drvdata(dev);
  158. if (dev_data)
  159. dev_data->ack_intr = 1;
  160. return 0;
  161. }
  162. static
  163. int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
  164. struct pci_dev *dev, struct xen_pci_op *op)
  165. {
  166. struct xen_pcibk_dev_data *dev_data;
  167. int i, result;
  168. struct msix_entry *entries;
  169. if (unlikely(verbose_request))
  170. printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
  171. pci_name(dev));
  172. if (op->value > SH_INFO_MAX_VEC)
  173. return -EINVAL;
  174. entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
  175. if (entries == NULL)
  176. return -ENOMEM;
  177. for (i = 0; i < op->value; i++) {
  178. entries[i].entry = op->msix_entries[i].entry;
  179. entries[i].vector = op->msix_entries[i].vector;
  180. }
  181. result = pci_enable_msix(dev, entries, op->value);
  182. if (result == 0) {
  183. for (i = 0; i < op->value; i++) {
  184. op->msix_entries[i].entry = entries[i].entry;
  185. if (entries[i].vector)
  186. op->msix_entries[i].vector =
  187. xen_pirq_from_irq(entries[i].vector);
  188. if (unlikely(verbose_request))
  189. printk(KERN_DEBUG DRV_NAME ": %s: " \
  190. "MSI-X[%d]: %d\n",
  191. pci_name(dev), i,
  192. op->msix_entries[i].vector);
  193. }
  194. } else
  195. pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n",
  196. pci_name(dev), pdev->xdev->otherend_id,
  197. result);
  198. kfree(entries);
  199. op->value = result;
  200. dev_data = pci_get_drvdata(dev);
  201. if (dev_data)
  202. dev_data->ack_intr = 0;
  203. return result > 0 ? 0 : result;
  204. }
  205. static
  206. int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
  207. struct pci_dev *dev, struct xen_pci_op *op)
  208. {
  209. struct xen_pcibk_dev_data *dev_data;
  210. if (unlikely(verbose_request))
  211. printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
  212. pci_name(dev));
  213. pci_disable_msix(dev);
  214. /*
  215. * SR-IOV devices (which don't have any legacy IRQ) have
  216. * an undefined IRQ value of zero.
  217. */
  218. op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
  219. if (unlikely(verbose_request))
  220. printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
  221. op->value);
  222. dev_data = pci_get_drvdata(dev);
  223. if (dev_data)
  224. dev_data->ack_intr = 1;
  225. return 0;
  226. }
  227. #endif
  228. /*
  229. * Now the same evtchn is used for both pcifront conf_read_write request
  230. * as well as pcie aer front end ack. We use a new work_queue to schedule
  231. * xen_pcibk conf_read_write service for avoiding confict with aer_core
  232. * do_recovery job which also use the system default work_queue
  233. */
  234. void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
  235. {
  236. /* Check that frontend is requesting an operation and that we are not
  237. * already processing a request */
  238. if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
  239. && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
  240. queue_work(xen_pcibk_wq, &pdev->op_work);
  241. }
  242. /*_XEN_PCIB_active should have been cleared by pcifront. And also make
  243. sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
  244. if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
  245. && test_bit(_PCIB_op_pending, &pdev->flags)) {
  246. wake_up(&xen_pcibk_aer_wait_queue);
  247. }
  248. }
  249. /* Performing the configuration space reads/writes must not be done in atomic
  250. * context because some of the pci_* functions can sleep (mostly due to ACPI
  251. * use of semaphores). This function is intended to be called from a work
  252. * queue in process context taking a struct xen_pcibk_device as a parameter */
  253. void xen_pcibk_do_op(struct work_struct *data)
  254. {
  255. struct xen_pcibk_device *pdev =
  256. container_of(data, struct xen_pcibk_device, op_work);
  257. struct pci_dev *dev;
  258. struct xen_pcibk_dev_data *dev_data = NULL;
  259. struct xen_pci_op *op = &pdev->sh_info->op;
  260. int test_intx = 0;
  261. dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
  262. if (dev == NULL)
  263. op->err = XEN_PCI_ERR_dev_not_found;
  264. else {
  265. dev_data = pci_get_drvdata(dev);
  266. if (dev_data)
  267. test_intx = dev_data->enable_intx;
  268. switch (op->cmd) {
  269. case XEN_PCI_OP_conf_read:
  270. op->err = xen_pcibk_config_read(dev,
  271. op->offset, op->size, &op->value);
  272. break;
  273. case XEN_PCI_OP_conf_write:
  274. op->err = xen_pcibk_config_write(dev,
  275. op->offset, op->size, op->value);
  276. break;
  277. #ifdef CONFIG_PCI_MSI
  278. case XEN_PCI_OP_enable_msi:
  279. op->err = xen_pcibk_enable_msi(pdev, dev, op);
  280. break;
  281. case XEN_PCI_OP_disable_msi:
  282. op->err = xen_pcibk_disable_msi(pdev, dev, op);
  283. break;
  284. case XEN_PCI_OP_enable_msix:
  285. op->err = xen_pcibk_enable_msix(pdev, dev, op);
  286. break;
  287. case XEN_PCI_OP_disable_msix:
  288. op->err = xen_pcibk_disable_msix(pdev, dev, op);
  289. break;
  290. #endif
  291. default:
  292. op->err = XEN_PCI_ERR_not_implemented;
  293. break;
  294. }
  295. }
  296. if (!op->err && dev && dev_data) {
  297. /* Transition detected */
  298. if ((dev_data->enable_intx != test_intx))
  299. xen_pcibk_control_isr(dev, 0 /* no reset */);
  300. }
  301. /* Tell the driver domain that we're done. */
  302. wmb();
  303. clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
  304. notify_remote_via_irq(pdev->evtchn_irq);
  305. /* Mark that we're done. */
  306. smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
  307. clear_bit(_PDEVF_op_active, &pdev->flags);
  308. smp_mb__after_clear_bit(); /* /before/ final check for work */
  309. /* Check to see if the driver domain tried to start another request in
  310. * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
  311. */
  312. xen_pcibk_test_and_schedule_op(pdev);
  313. }
  314. irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
  315. {
  316. struct xen_pcibk_device *pdev = dev_id;
  317. xen_pcibk_test_and_schedule_op(pdev);
  318. return IRQ_HANDLED;
  319. }
  320. static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
  321. {
  322. struct pci_dev *dev = (struct pci_dev *)dev_id;
  323. struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
  324. if (dev_data->isr_on && dev_data->ack_intr) {
  325. dev_data->handled++;
  326. if ((dev_data->handled % 1000) == 0) {
  327. if (xen_test_irq_shared(irq)) {
  328. pr_info("%s IRQ line is not shared "
  329. "with other domains. Turning ISR off\n",
  330. dev_data->irq_name);
  331. dev_data->ack_intr = 0;
  332. }
  333. }
  334. return IRQ_HANDLED;
  335. }
  336. return IRQ_NONE;
  337. }