iommu.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Copyright IBM Corporation, 2008
  19. * Author: Allen M. Kay <allen.m.kay@intel.com>
  20. * Author: Weidong Han <weidong.han@intel.com>
  21. * Author: Ben-Ami Yassour <benami@il.ibm.com>
  22. */
  23. #include <linux/list.h>
  24. #include <linux/kvm_host.h>
  25. #include <linux/pci.h>
  26. #include <linux/dmar.h>
  27. #include <linux/iommu.h>
  28. #include <linux/intel-iommu.h>
  29. static int kvm_iommu_unmap_memslots(struct kvm *kvm);
  30. static void kvm_iommu_put_pages(struct kvm *kvm,
  31. gfn_t base_gfn, unsigned long npages);
  32. static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
  33. gfn_t gfn, unsigned long size)
  34. {
  35. gfn_t end_gfn;
  36. pfn_t pfn;
  37. pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
  38. end_gfn = gfn + (size >> PAGE_SHIFT);
  39. gfn += 1;
  40. if (is_error_pfn(pfn))
  41. return pfn;
  42. while (gfn < end_gfn)
  43. gfn_to_pfn_memslot(kvm, slot, gfn++);
  44. return pfn;
  45. }
  46. int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  47. {
  48. gfn_t gfn, end_gfn;
  49. pfn_t pfn;
  50. int r = 0;
  51. struct iommu_domain *domain = kvm->arch.iommu_domain;
  52. int flags;
  53. /* check if iommu exists and in use */
  54. if (!domain)
  55. return 0;
  56. gfn = slot->base_gfn;
  57. end_gfn = gfn + slot->npages;
  58. flags = IOMMU_READ | IOMMU_WRITE;
  59. if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
  60. flags |= IOMMU_CACHE;
  61. while (gfn < end_gfn) {
  62. unsigned long page_size;
  63. /* Check if already mapped */
  64. if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
  65. gfn += 1;
  66. continue;
  67. }
  68. /* Get the page size we could use to map */
  69. page_size = kvm_host_page_size(kvm, gfn);
  70. /* Make sure the page_size does not exceed the memslot */
  71. while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
  72. page_size >>= 1;
  73. /* Make sure gfn is aligned to the page size we want to map */
  74. while ((gfn << PAGE_SHIFT) & (page_size - 1))
  75. page_size >>= 1;
  76. /*
  77. * Pin all pages we are about to map in memory. This is
  78. * important because we unmap and unpin in 4kb steps later.
  79. */
  80. pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
  81. if (is_error_pfn(pfn)) {
  82. gfn += 1;
  83. continue;
  84. }
  85. /* Map into IO address space */
  86. r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
  87. get_order(page_size), flags);
  88. if (r) {
  89. printk(KERN_ERR "kvm_iommu_map_address:"
  90. "iommu failed to map pfn=%lx\n", pfn);
  91. goto unmap_pages;
  92. }
  93. gfn += page_size >> PAGE_SHIFT;
  94. }
  95. return 0;
  96. unmap_pages:
  97. kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
  98. return r;
  99. }
  100. static int kvm_iommu_map_memslots(struct kvm *kvm)
  101. {
  102. int i, r = 0;
  103. struct kvm_memslots *slots;
  104. slots = kvm_memslots(kvm);
  105. for (i = 0; i < slots->nmemslots; i++) {
  106. r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
  107. if (r)
  108. break;
  109. }
  110. return r;
  111. }
  112. int kvm_assign_device(struct kvm *kvm,
  113. struct kvm_assigned_dev_kernel *assigned_dev)
  114. {
  115. struct pci_dev *pdev = NULL;
  116. struct iommu_domain *domain = kvm->arch.iommu_domain;
  117. int r, last_flags;
  118. /* check if iommu exists and in use */
  119. if (!domain)
  120. return 0;
  121. pdev = assigned_dev->dev;
  122. if (pdev == NULL)
  123. return -ENODEV;
  124. r = iommu_attach_device(domain, &pdev->dev);
  125. if (r) {
  126. printk(KERN_ERR "assign device %x:%x:%x.%x failed",
  127. pci_domain_nr(pdev->bus),
  128. pdev->bus->number,
  129. PCI_SLOT(pdev->devfn),
  130. PCI_FUNC(pdev->devfn));
  131. return r;
  132. }
  133. last_flags = kvm->arch.iommu_flags;
  134. if (iommu_domain_has_cap(kvm->arch.iommu_domain,
  135. IOMMU_CAP_CACHE_COHERENCY))
  136. kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
  137. /* Check if need to update IOMMU page table for guest memory */
  138. if ((last_flags ^ kvm->arch.iommu_flags) ==
  139. KVM_IOMMU_CACHE_COHERENCY) {
  140. kvm_iommu_unmap_memslots(kvm);
  141. r = kvm_iommu_map_memslots(kvm);
  142. if (r)
  143. goto out_unmap;
  144. }
  145. printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
  146. assigned_dev->host_segnr,
  147. assigned_dev->host_busnr,
  148. PCI_SLOT(assigned_dev->host_devfn),
  149. PCI_FUNC(assigned_dev->host_devfn));
  150. return 0;
  151. out_unmap:
  152. kvm_iommu_unmap_memslots(kvm);
  153. return r;
  154. }
  155. int kvm_deassign_device(struct kvm *kvm,
  156. struct kvm_assigned_dev_kernel *assigned_dev)
  157. {
  158. struct iommu_domain *domain = kvm->arch.iommu_domain;
  159. struct pci_dev *pdev = NULL;
  160. /* check if iommu exists and in use */
  161. if (!domain)
  162. return 0;
  163. pdev = assigned_dev->dev;
  164. if (pdev == NULL)
  165. return -ENODEV;
  166. iommu_detach_device(domain, &pdev->dev);
  167. printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
  168. assigned_dev->host_segnr,
  169. assigned_dev->host_busnr,
  170. PCI_SLOT(assigned_dev->host_devfn),
  171. PCI_FUNC(assigned_dev->host_devfn));
  172. return 0;
  173. }
  174. int kvm_iommu_map_guest(struct kvm *kvm)
  175. {
  176. int r;
  177. if (!iommu_found()) {
  178. printk(KERN_ERR "%s: iommu not found\n", __func__);
  179. return -ENODEV;
  180. }
  181. kvm->arch.iommu_domain = iommu_domain_alloc();
  182. if (!kvm->arch.iommu_domain)
  183. return -ENOMEM;
  184. r = kvm_iommu_map_memslots(kvm);
  185. if (r)
  186. goto out_unmap;
  187. return 0;
  188. out_unmap:
  189. kvm_iommu_unmap_memslots(kvm);
  190. return r;
  191. }
  192. static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
  193. {
  194. unsigned long i;
  195. for (i = 0; i < npages; ++i)
  196. kvm_release_pfn_clean(pfn + i);
  197. }
  198. static void kvm_iommu_put_pages(struct kvm *kvm,
  199. gfn_t base_gfn, unsigned long npages)
  200. {
  201. struct iommu_domain *domain;
  202. gfn_t end_gfn, gfn;
  203. pfn_t pfn;
  204. u64 phys;
  205. domain = kvm->arch.iommu_domain;
  206. end_gfn = base_gfn + npages;
  207. gfn = base_gfn;
  208. /* check if iommu exists and in use */
  209. if (!domain)
  210. return;
  211. while (gfn < end_gfn) {
  212. unsigned long unmap_pages;
  213. int order;
  214. /* Get physical address */
  215. phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
  216. pfn = phys >> PAGE_SHIFT;
  217. /* Unmap address from IO address space */
  218. order = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
  219. unmap_pages = 1ULL << order;
  220. /* Unpin all pages we just unmapped to not leak any memory */
  221. kvm_unpin_pages(kvm, pfn, unmap_pages);
  222. gfn += unmap_pages;
  223. }
  224. }
  225. static int kvm_iommu_unmap_memslots(struct kvm *kvm)
  226. {
  227. int i;
  228. struct kvm_memslots *slots;
  229. slots = kvm_memslots(kvm);
  230. for (i = 0; i < slots->nmemslots; i++) {
  231. kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
  232. slots->memslots[i].npages);
  233. }
  234. return 0;
  235. }
  236. int kvm_iommu_unmap_guest(struct kvm *kvm)
  237. {
  238. struct iommu_domain *domain = kvm->arch.iommu_domain;
  239. /* check if iommu exists and in use */
  240. if (!domain)
  241. return 0;
  242. kvm_iommu_unmap_memslots(kvm);
  243. iommu_domain_free(domain);
  244. return 0;
  245. }