assigned-dev.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. /*
  2. * Kernel-based Virtual Machine - device assignment support
  3. *
  4. * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See
  7. * the COPYING file in the top-level directory.
  8. *
  9. */
  10. #include <linux/kvm_host.h>
  11. #include <linux/kvm.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/errno.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/pci.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/slab.h>
  19. #include "irq.h"
  20. static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
  21. int assigned_dev_id)
  22. {
  23. struct list_head *ptr;
  24. struct kvm_assigned_dev_kernel *match;
  25. list_for_each(ptr, head) {
  26. match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
  27. if (match->assigned_dev_id == assigned_dev_id)
  28. return match;
  29. }
  30. return NULL;
  31. }
  32. static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
  33. *assigned_dev, int irq)
  34. {
  35. int i, index;
  36. struct msix_entry *host_msix_entries;
  37. host_msix_entries = assigned_dev->host_msix_entries;
  38. index = -1;
  39. for (i = 0; i < assigned_dev->entries_nr; i++)
  40. if (irq == host_msix_entries[i].vector) {
  41. index = i;
  42. break;
  43. }
  44. if (index < 0) {
  45. printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
  46. return 0;
  47. }
  48. return index;
  49. }
  50. static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
  51. {
  52. struct kvm_assigned_dev_kernel *assigned_dev;
  53. int i;
  54. assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
  55. interrupt_work);
  56. spin_lock_irq(&assigned_dev->assigned_dev_lock);
  57. if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
  58. struct kvm_guest_msix_entry *guest_entries =
  59. assigned_dev->guest_msix_entries;
  60. for (i = 0; i < assigned_dev->entries_nr; i++) {
  61. if (!(guest_entries[i].flags &
  62. KVM_ASSIGNED_MSIX_PENDING))
  63. continue;
  64. guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
  65. kvm_set_irq(assigned_dev->kvm,
  66. assigned_dev->irq_source_id,
  67. guest_entries[i].vector, 1);
  68. }
  69. } else
  70. kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
  71. assigned_dev->guest_irq, 1);
  72. spin_unlock_irq(&assigned_dev->assigned_dev_lock);
  73. }
  74. static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
  75. {
  76. unsigned long flags;
  77. struct kvm_assigned_dev_kernel *assigned_dev =
  78. (struct kvm_assigned_dev_kernel *) dev_id;
  79. spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
  80. if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
  81. int index = find_index_from_host_irq(assigned_dev, irq);
  82. if (index < 0)
  83. goto out;
  84. assigned_dev->guest_msix_entries[index].flags |=
  85. KVM_ASSIGNED_MSIX_PENDING;
  86. }
  87. schedule_work(&assigned_dev->interrupt_work);
  88. if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
  89. disable_irq_nosync(irq);
  90. assigned_dev->host_irq_disabled = true;
  91. }
  92. out:
  93. spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
  94. return IRQ_HANDLED;
  95. }
  96. /* Ack the irq line for an assigned device */
  97. static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
  98. {
  99. struct kvm_assigned_dev_kernel *dev;
  100. unsigned long flags;
  101. if (kian->gsi == -1)
  102. return;
  103. dev = container_of(kian, struct kvm_assigned_dev_kernel,
  104. ack_notifier);
  105. kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
  106. /* The guest irq may be shared so this ack may be
  107. * from another device.
  108. */
  109. spin_lock_irqsave(&dev->assigned_dev_lock, flags);
  110. if (dev->host_irq_disabled) {
  111. enable_irq(dev->host_irq);
  112. dev->host_irq_disabled = false;
  113. }
  114. spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
  115. }
  116. static void deassign_guest_irq(struct kvm *kvm,
  117. struct kvm_assigned_dev_kernel *assigned_dev)
  118. {
  119. kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
  120. assigned_dev->ack_notifier.gsi = -1;
  121. if (assigned_dev->irq_source_id != -1)
  122. kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
  123. assigned_dev->irq_source_id = -1;
  124. assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
  125. }
  126. /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
  127. static void deassign_host_irq(struct kvm *kvm,
  128. struct kvm_assigned_dev_kernel *assigned_dev)
  129. {
  130. /*
  131. * In kvm_free_device_irq, cancel_work_sync return true if:
  132. * 1. work is scheduled, and then cancelled.
  133. * 2. work callback is executed.
  134. *
  135. * The first one ensured that the irq is disabled and no more events
  136. * would happen. But for the second one, the irq may be enabled (e.g.
  137. * for MSI). So we disable irq here to prevent further events.
  138. *
  139. * Notice this maybe result in nested disable if the interrupt type is
  140. * INTx, but it's OK for we are going to free it.
  141. *
  142. * If this function is a part of VM destroy, please ensure that till
  143. * now, the kvm state is still legal for probably we also have to wait
  144. * interrupt_work done.
  145. */
  146. if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
  147. int i;
  148. for (i = 0; i < assigned_dev->entries_nr; i++)
  149. disable_irq_nosync(assigned_dev->
  150. host_msix_entries[i].vector);
  151. cancel_work_sync(&assigned_dev->interrupt_work);
  152. for (i = 0; i < assigned_dev->entries_nr; i++)
  153. free_irq(assigned_dev->host_msix_entries[i].vector,
  154. (void *)assigned_dev);
  155. assigned_dev->entries_nr = 0;
  156. kfree(assigned_dev->host_msix_entries);
  157. kfree(assigned_dev->guest_msix_entries);
  158. pci_disable_msix(assigned_dev->dev);
  159. } else {
  160. /* Deal with MSI and INTx */
  161. disable_irq_nosync(assigned_dev->host_irq);
  162. cancel_work_sync(&assigned_dev->interrupt_work);
  163. free_irq(assigned_dev->host_irq, (void *)assigned_dev);
  164. if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
  165. pci_disable_msi(assigned_dev->dev);
  166. }
  167. assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
  168. }
  169. static int kvm_deassign_irq(struct kvm *kvm,
  170. struct kvm_assigned_dev_kernel *assigned_dev,
  171. unsigned long irq_requested_type)
  172. {
  173. unsigned long guest_irq_type, host_irq_type;
  174. if (!irqchip_in_kernel(kvm))
  175. return -EINVAL;
  176. /* no irq assignment to deassign */
  177. if (!assigned_dev->irq_requested_type)
  178. return -ENXIO;
  179. host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
  180. guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
  181. if (host_irq_type)
  182. deassign_host_irq(kvm, assigned_dev);
  183. if (guest_irq_type)
  184. deassign_guest_irq(kvm, assigned_dev);
  185. return 0;
  186. }
  187. static void kvm_free_assigned_irq(struct kvm *kvm,
  188. struct kvm_assigned_dev_kernel *assigned_dev)
  189. {
  190. kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
  191. }
  192. static void kvm_free_assigned_device(struct kvm *kvm,
  193. struct kvm_assigned_dev_kernel
  194. *assigned_dev)
  195. {
  196. kvm_free_assigned_irq(kvm, assigned_dev);
  197. pci_reset_function(assigned_dev->dev);
  198. pci_release_regions(assigned_dev->dev);
  199. pci_disable_device(assigned_dev->dev);
  200. pci_dev_put(assigned_dev->dev);
  201. list_del(&assigned_dev->list);
  202. kfree(assigned_dev);
  203. }
  204. void kvm_free_all_assigned_devices(struct kvm *kvm)
  205. {
  206. struct list_head *ptr, *ptr2;
  207. struct kvm_assigned_dev_kernel *assigned_dev;
  208. list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
  209. assigned_dev = list_entry(ptr,
  210. struct kvm_assigned_dev_kernel,
  211. list);
  212. kvm_free_assigned_device(kvm, assigned_dev);
  213. }
  214. }
  215. static int assigned_device_enable_host_intx(struct kvm *kvm,
  216. struct kvm_assigned_dev_kernel *dev)
  217. {
  218. dev->host_irq = dev->dev->irq;
  219. /* Even though this is PCI, we don't want to use shared
  220. * interrupts. Sharing host devices with guest-assigned devices
  221. * on the same interrupt line is not a happy situation: there
  222. * are going to be long delays in accepting, acking, etc.
  223. */
  224. if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
  225. 0, "kvm_assigned_intx_device", (void *)dev))
  226. return -EIO;
  227. return 0;
  228. }
  229. #ifdef __KVM_HAVE_MSI
  230. static int assigned_device_enable_host_msi(struct kvm *kvm,
  231. struct kvm_assigned_dev_kernel *dev)
  232. {
  233. int r;
  234. if (!dev->dev->msi_enabled) {
  235. r = pci_enable_msi(dev->dev);
  236. if (r)
  237. return r;
  238. }
  239. dev->host_irq = dev->dev->irq;
  240. if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
  241. "kvm_assigned_msi_device", (void *)dev)) {
  242. pci_disable_msi(dev->dev);
  243. return -EIO;
  244. }
  245. return 0;
  246. }
  247. #endif
  248. #ifdef __KVM_HAVE_MSIX
  249. static int assigned_device_enable_host_msix(struct kvm *kvm,
  250. struct kvm_assigned_dev_kernel *dev)
  251. {
  252. int i, r = -EINVAL;
  253. /* host_msix_entries and guest_msix_entries should have been
  254. * initialized */
  255. if (dev->entries_nr == 0)
  256. return r;
  257. r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
  258. if (r)
  259. return r;
  260. for (i = 0; i < dev->entries_nr; i++) {
  261. r = request_irq(dev->host_msix_entries[i].vector,
  262. kvm_assigned_dev_intr, 0,
  263. "kvm_assigned_msix_device",
  264. (void *)dev);
  265. if (r)
  266. goto err;
  267. }
  268. return 0;
  269. err:
  270. for (i -= 1; i >= 0; i--)
  271. free_irq(dev->host_msix_entries[i].vector, (void *)dev);
  272. pci_disable_msix(dev->dev);
  273. return r;
  274. }
  275. #endif
  276. static int assigned_device_enable_guest_intx(struct kvm *kvm,
  277. struct kvm_assigned_dev_kernel *dev,
  278. struct kvm_assigned_irq *irq)
  279. {
  280. dev->guest_irq = irq->guest_irq;
  281. dev->ack_notifier.gsi = irq->guest_irq;
  282. return 0;
  283. }
  284. #ifdef __KVM_HAVE_MSI
  285. static int assigned_device_enable_guest_msi(struct kvm *kvm,
  286. struct kvm_assigned_dev_kernel *dev,
  287. struct kvm_assigned_irq *irq)
  288. {
  289. dev->guest_irq = irq->guest_irq;
  290. dev->ack_notifier.gsi = -1;
  291. dev->host_irq_disabled = false;
  292. return 0;
  293. }
  294. #endif
  295. #ifdef __KVM_HAVE_MSIX
  296. static int assigned_device_enable_guest_msix(struct kvm *kvm,
  297. struct kvm_assigned_dev_kernel *dev,
  298. struct kvm_assigned_irq *irq)
  299. {
  300. dev->guest_irq = irq->guest_irq;
  301. dev->ack_notifier.gsi = -1;
  302. dev->host_irq_disabled = false;
  303. return 0;
  304. }
  305. #endif
  306. static int assign_host_irq(struct kvm *kvm,
  307. struct kvm_assigned_dev_kernel *dev,
  308. __u32 host_irq_type)
  309. {
  310. int r = -EEXIST;
  311. if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
  312. return r;
  313. switch (host_irq_type) {
  314. case KVM_DEV_IRQ_HOST_INTX:
  315. r = assigned_device_enable_host_intx(kvm, dev);
  316. break;
  317. #ifdef __KVM_HAVE_MSI
  318. case KVM_DEV_IRQ_HOST_MSI:
  319. r = assigned_device_enable_host_msi(kvm, dev);
  320. break;
  321. #endif
  322. #ifdef __KVM_HAVE_MSIX
  323. case KVM_DEV_IRQ_HOST_MSIX:
  324. r = assigned_device_enable_host_msix(kvm, dev);
  325. break;
  326. #endif
  327. default:
  328. r = -EINVAL;
  329. }
  330. if (!r)
  331. dev->irq_requested_type |= host_irq_type;
  332. return r;
  333. }
  334. static int assign_guest_irq(struct kvm *kvm,
  335. struct kvm_assigned_dev_kernel *dev,
  336. struct kvm_assigned_irq *irq,
  337. unsigned long guest_irq_type)
  338. {
  339. int id;
  340. int r = -EEXIST;
  341. if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
  342. return r;
  343. id = kvm_request_irq_source_id(kvm);
  344. if (id < 0)
  345. return id;
  346. dev->irq_source_id = id;
  347. switch (guest_irq_type) {
  348. case KVM_DEV_IRQ_GUEST_INTX:
  349. r = assigned_device_enable_guest_intx(kvm, dev, irq);
  350. break;
  351. #ifdef __KVM_HAVE_MSI
  352. case KVM_DEV_IRQ_GUEST_MSI:
  353. r = assigned_device_enable_guest_msi(kvm, dev, irq);
  354. break;
  355. #endif
  356. #ifdef __KVM_HAVE_MSIX
  357. case KVM_DEV_IRQ_GUEST_MSIX:
  358. r = assigned_device_enable_guest_msix(kvm, dev, irq);
  359. break;
  360. #endif
  361. default:
  362. r = -EINVAL;
  363. }
  364. if (!r) {
  365. dev->irq_requested_type |= guest_irq_type;
  366. kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
  367. } else
  368. kvm_free_irq_source_id(kvm, dev->irq_source_id);
  369. return r;
  370. }
  371. /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
  372. static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
  373. struct kvm_assigned_irq *assigned_irq)
  374. {
  375. int r = -EINVAL;
  376. struct kvm_assigned_dev_kernel *match;
  377. unsigned long host_irq_type, guest_irq_type;
  378. if (!irqchip_in_kernel(kvm))
  379. return r;
  380. mutex_lock(&kvm->lock);
  381. r = -ENODEV;
  382. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  383. assigned_irq->assigned_dev_id);
  384. if (!match)
  385. goto out;
  386. host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
  387. guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
  388. r = -EINVAL;
  389. /* can only assign one type at a time */
  390. if (hweight_long(host_irq_type) > 1)
  391. goto out;
  392. if (hweight_long(guest_irq_type) > 1)
  393. goto out;
  394. if (host_irq_type == 0 && guest_irq_type == 0)
  395. goto out;
  396. r = 0;
  397. if (host_irq_type)
  398. r = assign_host_irq(kvm, match, host_irq_type);
  399. if (r)
  400. goto out;
  401. if (guest_irq_type)
  402. r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
  403. out:
  404. mutex_unlock(&kvm->lock);
  405. return r;
  406. }
  407. static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
  408. struct kvm_assigned_irq
  409. *assigned_irq)
  410. {
  411. int r = -ENODEV;
  412. struct kvm_assigned_dev_kernel *match;
  413. mutex_lock(&kvm->lock);
  414. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  415. assigned_irq->assigned_dev_id);
  416. if (!match)
  417. goto out;
  418. r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
  419. out:
  420. mutex_unlock(&kvm->lock);
  421. return r;
  422. }
  423. static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
  424. struct kvm_assigned_pci_dev *assigned_dev)
  425. {
  426. int r = 0, idx;
  427. struct kvm_assigned_dev_kernel *match;
  428. struct pci_dev *dev;
  429. mutex_lock(&kvm->lock);
  430. idx = srcu_read_lock(&kvm->srcu);
  431. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  432. assigned_dev->assigned_dev_id);
  433. if (match) {
  434. /* device already assigned */
  435. r = -EEXIST;
  436. goto out;
  437. }
  438. match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
  439. if (match == NULL) {
  440. printk(KERN_INFO "%s: Couldn't allocate memory\n",
  441. __func__);
  442. r = -ENOMEM;
  443. goto out;
  444. }
  445. dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
  446. assigned_dev->busnr,
  447. assigned_dev->devfn);
  448. if (!dev) {
  449. printk(KERN_INFO "%s: host device not found\n", __func__);
  450. r = -EINVAL;
  451. goto out_free;
  452. }
  453. if (pci_enable_device(dev)) {
  454. printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
  455. r = -EBUSY;
  456. goto out_put;
  457. }
  458. r = pci_request_regions(dev, "kvm_assigned_device");
  459. if (r) {
  460. printk(KERN_INFO "%s: Could not get access to device regions\n",
  461. __func__);
  462. goto out_disable;
  463. }
  464. pci_reset_function(dev);
  465. match->assigned_dev_id = assigned_dev->assigned_dev_id;
  466. match->host_segnr = assigned_dev->segnr;
  467. match->host_busnr = assigned_dev->busnr;
  468. match->host_devfn = assigned_dev->devfn;
  469. match->flags = assigned_dev->flags;
  470. match->dev = dev;
  471. spin_lock_init(&match->assigned_dev_lock);
  472. match->irq_source_id = -1;
  473. match->kvm = kvm;
  474. match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
  475. INIT_WORK(&match->interrupt_work,
  476. kvm_assigned_dev_interrupt_work_handler);
  477. list_add(&match->list, &kvm->arch.assigned_dev_head);
  478. if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
  479. if (!kvm->arch.iommu_domain) {
  480. r = kvm_iommu_map_guest(kvm);
  481. if (r)
  482. goto out_list_del;
  483. }
  484. r = kvm_assign_device(kvm, match);
  485. if (r)
  486. goto out_list_del;
  487. }
  488. out:
  489. srcu_read_unlock(&kvm->srcu, idx);
  490. mutex_unlock(&kvm->lock);
  491. return r;
  492. out_list_del:
  493. list_del(&match->list);
  494. pci_release_regions(dev);
  495. out_disable:
  496. pci_disable_device(dev);
  497. out_put:
  498. pci_dev_put(dev);
  499. out_free:
  500. kfree(match);
  501. srcu_read_unlock(&kvm->srcu, idx);
  502. mutex_unlock(&kvm->lock);
  503. return r;
  504. }
  505. static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
  506. struct kvm_assigned_pci_dev *assigned_dev)
  507. {
  508. int r = 0;
  509. struct kvm_assigned_dev_kernel *match;
  510. mutex_lock(&kvm->lock);
  511. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  512. assigned_dev->assigned_dev_id);
  513. if (!match) {
  514. printk(KERN_INFO "%s: device hasn't been assigned before, "
  515. "so cannot be deassigned\n", __func__);
  516. r = -EINVAL;
  517. goto out;
  518. }
  519. if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
  520. kvm_deassign_device(kvm, match);
  521. kvm_free_assigned_device(kvm, match);
  522. out:
  523. mutex_unlock(&kvm->lock);
  524. return r;
  525. }
  526. #ifdef __KVM_HAVE_MSIX
  527. static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
  528. struct kvm_assigned_msix_nr *entry_nr)
  529. {
  530. int r = 0;
  531. struct kvm_assigned_dev_kernel *adev;
  532. mutex_lock(&kvm->lock);
  533. adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  534. entry_nr->assigned_dev_id);
  535. if (!adev) {
  536. r = -EINVAL;
  537. goto msix_nr_out;
  538. }
  539. if (adev->entries_nr == 0) {
  540. adev->entries_nr = entry_nr->entry_nr;
  541. if (adev->entries_nr == 0 ||
  542. adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
  543. r = -EINVAL;
  544. goto msix_nr_out;
  545. }
  546. adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
  547. entry_nr->entry_nr,
  548. GFP_KERNEL);
  549. if (!adev->host_msix_entries) {
  550. r = -ENOMEM;
  551. goto msix_nr_out;
  552. }
  553. adev->guest_msix_entries = kzalloc(
  554. sizeof(struct kvm_guest_msix_entry) *
  555. entry_nr->entry_nr, GFP_KERNEL);
  556. if (!adev->guest_msix_entries) {
  557. kfree(adev->host_msix_entries);
  558. r = -ENOMEM;
  559. goto msix_nr_out;
  560. }
  561. } else /* Not allowed set MSI-X number twice */
  562. r = -EINVAL;
  563. msix_nr_out:
  564. mutex_unlock(&kvm->lock);
  565. return r;
  566. }
  567. static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
  568. struct kvm_assigned_msix_entry *entry)
  569. {
  570. int r = 0, i;
  571. struct kvm_assigned_dev_kernel *adev;
  572. mutex_lock(&kvm->lock);
  573. adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  574. entry->assigned_dev_id);
  575. if (!adev) {
  576. r = -EINVAL;
  577. goto msix_entry_out;
  578. }
  579. for (i = 0; i < adev->entries_nr; i++)
  580. if (adev->guest_msix_entries[i].vector == 0 ||
  581. adev->guest_msix_entries[i].entry == entry->entry) {
  582. adev->guest_msix_entries[i].entry = entry->entry;
  583. adev->guest_msix_entries[i].vector = entry->gsi;
  584. adev->host_msix_entries[i].entry = entry->entry;
  585. break;
  586. }
  587. if (i == adev->entries_nr) {
  588. r = -ENOSPC;
  589. goto msix_entry_out;
  590. }
  591. msix_entry_out:
  592. mutex_unlock(&kvm->lock);
  593. return r;
  594. }
  595. #endif
  596. long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
  597. unsigned long arg)
  598. {
  599. void __user *argp = (void __user *)arg;
  600. int r = -ENOTTY;
  601. switch (ioctl) {
  602. case KVM_ASSIGN_PCI_DEVICE: {
  603. struct kvm_assigned_pci_dev assigned_dev;
  604. r = -EFAULT;
  605. if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
  606. goto out;
  607. r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
  608. if (r)
  609. goto out;
  610. break;
  611. }
  612. case KVM_ASSIGN_IRQ: {
  613. r = -EOPNOTSUPP;
  614. break;
  615. }
  616. #ifdef KVM_CAP_ASSIGN_DEV_IRQ
  617. case KVM_ASSIGN_DEV_IRQ: {
  618. struct kvm_assigned_irq assigned_irq;
  619. r = -EFAULT;
  620. if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
  621. goto out;
  622. r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
  623. if (r)
  624. goto out;
  625. break;
  626. }
  627. case KVM_DEASSIGN_DEV_IRQ: {
  628. struct kvm_assigned_irq assigned_irq;
  629. r = -EFAULT;
  630. if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
  631. goto out;
  632. r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
  633. if (r)
  634. goto out;
  635. break;
  636. }
  637. #endif
  638. #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
  639. case KVM_DEASSIGN_PCI_DEVICE: {
  640. struct kvm_assigned_pci_dev assigned_dev;
  641. r = -EFAULT;
  642. if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
  643. goto out;
  644. r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
  645. if (r)
  646. goto out;
  647. break;
  648. }
  649. #endif
  650. #ifdef KVM_CAP_IRQ_ROUTING
  651. case KVM_SET_GSI_ROUTING: {
  652. struct kvm_irq_routing routing;
  653. struct kvm_irq_routing __user *urouting;
  654. struct kvm_irq_routing_entry *entries;
  655. r = -EFAULT;
  656. if (copy_from_user(&routing, argp, sizeof(routing)))
  657. goto out;
  658. r = -EINVAL;
  659. if (routing.nr >= KVM_MAX_IRQ_ROUTES)
  660. goto out;
  661. if (routing.flags)
  662. goto out;
  663. r = -ENOMEM;
  664. entries = vmalloc(routing.nr * sizeof(*entries));
  665. if (!entries)
  666. goto out;
  667. r = -EFAULT;
  668. urouting = argp;
  669. if (copy_from_user(entries, urouting->entries,
  670. routing.nr * sizeof(*entries)))
  671. goto out_free_irq_routing;
  672. r = kvm_set_irq_routing(kvm, entries, routing.nr,
  673. routing.flags);
  674. out_free_irq_routing:
  675. vfree(entries);
  676. break;
  677. }
  678. #endif /* KVM_CAP_IRQ_ROUTING */
  679. #ifdef __KVM_HAVE_MSIX
  680. case KVM_ASSIGN_SET_MSIX_NR: {
  681. struct kvm_assigned_msix_nr entry_nr;
  682. r = -EFAULT;
  683. if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
  684. goto out;
  685. r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
  686. if (r)
  687. goto out;
  688. break;
  689. }
  690. case KVM_ASSIGN_SET_MSIX_ENTRY: {
  691. struct kvm_assigned_msix_entry entry;
  692. r = -EFAULT;
  693. if (copy_from_user(&entry, argp, sizeof entry))
  694. goto out;
  695. r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
  696. if (r)
  697. goto out;
  698. break;
  699. }
  700. #endif
  701. }
  702. out:
  703. return r;
  704. }