vtd.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Copyright IBM Corporation, 2008
  19. * Author: Allen M. Kay <allen.m.kay@intel.com>
  20. * Author: Weidong Han <weidong.han@intel.com>
  21. * Author: Ben-Ami Yassour <benami@il.ibm.com>
  22. */
  23. #include <linux/list.h>
  24. #include <linux/kvm_host.h>
  25. #include <linux/pci.h>
  26. #include <linux/dmar.h>
  27. #include <linux/intel-iommu.h>
  28. static int kvm_iommu_unmap_memslots(struct kvm *kvm);
  29. static void kvm_iommu_put_pages(struct kvm *kvm,
  30. gfn_t base_gfn, unsigned long npages);
  31. int kvm_iommu_map_pages(struct kvm *kvm,
  32. gfn_t base_gfn, unsigned long npages)
  33. {
  34. gfn_t gfn = base_gfn;
  35. pfn_t pfn;
  36. int i, r = 0;
  37. struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
  38. /* check if iommu exists and in use */
  39. if (!domain)
  40. return 0;
  41. for (i = 0; i < npages; i++) {
  42. /* check if already mapped */
  43. if (intel_iommu_iova_to_phys(domain,
  44. gfn_to_gpa(gfn)))
  45. continue;
  46. pfn = gfn_to_pfn(kvm, gfn);
  47. r = intel_iommu_map_address(domain,
  48. gfn_to_gpa(gfn),
  49. pfn_to_hpa(pfn),
  50. PAGE_SIZE,
  51. DMA_PTE_READ | DMA_PTE_WRITE);
  52. if (r) {
  53. printk(KERN_ERR "kvm_iommu_map_address:"
  54. "iommu failed to map pfn=%lx\n", pfn);
  55. goto unmap_pages;
  56. }
  57. gfn++;
  58. }
  59. return 0;
  60. unmap_pages:
  61. kvm_iommu_put_pages(kvm, base_gfn, i);
  62. return r;
  63. }
  64. static int kvm_iommu_map_memslots(struct kvm *kvm)
  65. {
  66. int i, r;
  67. down_read(&kvm->slots_lock);
  68. for (i = 0; i < kvm->nmemslots; i++) {
  69. r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
  70. kvm->memslots[i].npages);
  71. if (r)
  72. break;
  73. }
  74. up_read(&kvm->slots_lock);
  75. return r;
  76. }
  77. int kvm_assign_device(struct kvm *kvm,
  78. struct kvm_assigned_dev_kernel *assigned_dev)
  79. {
  80. struct pci_dev *pdev = NULL;
  81. struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
  82. int r;
  83. /* check if iommu exists and in use */
  84. if (!domain)
  85. return 0;
  86. pdev = assigned_dev->dev;
  87. if (pdev == NULL)
  88. return -ENODEV;
  89. r = intel_iommu_attach_device(domain, pdev);
  90. if (r) {
  91. printk(KERN_ERR "assign device %x:%x.%x failed",
  92. pdev->bus->number,
  93. PCI_SLOT(pdev->devfn),
  94. PCI_FUNC(pdev->devfn));
  95. return r;
  96. }
  97. printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
  98. assigned_dev->host_busnr,
  99. PCI_SLOT(assigned_dev->host_devfn),
  100. PCI_FUNC(assigned_dev->host_devfn));
  101. return 0;
  102. }
  103. int kvm_deassign_device(struct kvm *kvm,
  104. struct kvm_assigned_dev_kernel *assigned_dev)
  105. {
  106. struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
  107. struct pci_dev *pdev = NULL;
  108. /* check if iommu exists and in use */
  109. if (!domain)
  110. return 0;
  111. pdev = assigned_dev->dev;
  112. if (pdev == NULL)
  113. return -ENODEV;
  114. intel_iommu_detach_device(domain, pdev);
  115. printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
  116. assigned_dev->host_busnr,
  117. PCI_SLOT(assigned_dev->host_devfn),
  118. PCI_FUNC(assigned_dev->host_devfn));
  119. return 0;
  120. }
  121. int kvm_iommu_map_guest(struct kvm *kvm)
  122. {
  123. int r;
  124. if (!intel_iommu_found()) {
  125. printk(KERN_ERR "%s: intel iommu not found\n", __func__);
  126. return -ENODEV;
  127. }
  128. kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
  129. if (!kvm->arch.intel_iommu_domain)
  130. return -ENOMEM;
  131. r = kvm_iommu_map_memslots(kvm);
  132. if (r)
  133. goto out_unmap;
  134. return 0;
  135. out_unmap:
  136. kvm_iommu_unmap_memslots(kvm);
  137. return r;
  138. }
  139. static void kvm_iommu_put_pages(struct kvm *kvm,
  140. gfn_t base_gfn, unsigned long npages)
  141. {
  142. gfn_t gfn = base_gfn;
  143. pfn_t pfn;
  144. struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
  145. unsigned long i;
  146. u64 phys;
  147. /* check if iommu exists and in use */
  148. if (!domain)
  149. return;
  150. for (i = 0; i < npages; i++) {
  151. phys = intel_iommu_iova_to_phys(domain,
  152. gfn_to_gpa(gfn));
  153. pfn = phys >> PAGE_SHIFT;
  154. kvm_release_pfn_clean(pfn);
  155. gfn++;
  156. }
  157. intel_iommu_unmap_address(domain,
  158. gfn_to_gpa(base_gfn),
  159. PAGE_SIZE * npages);
  160. }
  161. static int kvm_iommu_unmap_memslots(struct kvm *kvm)
  162. {
  163. int i;
  164. down_read(&kvm->slots_lock);
  165. for (i = 0; i < kvm->nmemslots; i++) {
  166. kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
  167. kvm->memslots[i].npages);
  168. }
  169. up_read(&kvm->slots_lock);
  170. return 0;
  171. }
  172. int kvm_iommu_unmap_guest(struct kvm *kvm)
  173. {
  174. struct kvm_assigned_dev_kernel *entry;
  175. struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
  176. /* check if iommu exists and in use */
  177. if (!domain)
  178. return 0;
  179. list_for_each_entry(entry, &kvm->arch.assigned_dev_head, list) {
  180. printk(KERN_DEBUG "VT-d unmap: host bdf = %x:%x:%x\n",
  181. entry->host_busnr,
  182. PCI_SLOT(entry->host_devfn),
  183. PCI_FUNC(entry->host_devfn));
  184. /* detach kvm dmar domain */
  185. intel_iommu_detach_device(domain, entry->dev);
  186. }
  187. kvm_iommu_unmap_memslots(kvm);
  188. intel_iommu_free_domain(domain);
  189. return 0;
  190. }