iommu.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Copyright IBM Corporation, 2008
  19. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  20. *
  21. * Author: Allen M. Kay <allen.m.kay@intel.com>
  22. * Author: Weidong Han <weidong.han@intel.com>
  23. * Author: Ben-Ami Yassour <benami@il.ibm.com>
  24. */
  25. #include <linux/list.h>
  26. #include <linux/kvm_host.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/stat.h>
  30. #include <linux/dmar.h>
  31. #include <linux/iommu.h>
  32. #include <linux/intel-iommu.h>
  33. static bool allow_unsafe_assigned_interrupts;
  34. module_param_named(allow_unsafe_assigned_interrupts,
  35. allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
  37. "Enable device assignment on platforms without interrupt remapping support.");
  38. static int kvm_iommu_unmap_memslots(struct kvm *kvm);
  39. static void kvm_iommu_put_pages(struct kvm *kvm,
  40. gfn_t base_gfn, unsigned long npages);
  41. static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
  42. unsigned long size)
  43. {
  44. gfn_t end_gfn;
  45. pfn_t pfn;
  46. pfn = gfn_to_pfn_memslot(slot, gfn);
  47. end_gfn = gfn + (size >> PAGE_SHIFT);
  48. gfn += 1;
  49. if (is_error_noslot_pfn(pfn))
  50. return pfn;
  51. while (gfn < end_gfn)
  52. gfn_to_pfn_memslot(slot, gfn++);
  53. return pfn;
  54. }
  55. int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  56. {
  57. gfn_t gfn, end_gfn;
  58. pfn_t pfn;
  59. int r = 0;
  60. struct iommu_domain *domain = kvm->arch.iommu_domain;
  61. int flags;
  62. /* check if iommu exists and in use */
  63. if (!domain)
  64. return 0;
  65. gfn = slot->base_gfn;
  66. end_gfn = gfn + slot->npages;
  67. flags = IOMMU_READ;
  68. if (!(slot->flags & KVM_MEM_READONLY))
  69. flags |= IOMMU_WRITE;
  70. if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
  71. flags |= IOMMU_CACHE;
  72. while (gfn < end_gfn) {
  73. unsigned long page_size;
  74. /* Check if already mapped */
  75. if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
  76. gfn += 1;
  77. continue;
  78. }
  79. /* Get the page size we could use to map */
  80. page_size = kvm_host_page_size(kvm, gfn);
  81. /* Make sure the page_size does not exceed the memslot */
  82. while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
  83. page_size >>= 1;
  84. /* Make sure gfn is aligned to the page size we want to map */
  85. while ((gfn << PAGE_SHIFT) & (page_size - 1))
  86. page_size >>= 1;
  87. /*
  88. * Pin all pages we are about to map in memory. This is
  89. * important because we unmap and unpin in 4kb steps later.
  90. */
  91. pfn = kvm_pin_pages(slot, gfn, page_size);
  92. if (is_error_noslot_pfn(pfn)) {
  93. gfn += 1;
  94. continue;
  95. }
  96. /* Map into IO address space */
  97. r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
  98. page_size, flags);
  99. if (r) {
  100. printk(KERN_ERR "kvm_iommu_map_address:"
  101. "iommu failed to map pfn=%llx\n", pfn);
  102. goto unmap_pages;
  103. }
  104. gfn += page_size >> PAGE_SHIFT;
  105. }
  106. return 0;
  107. unmap_pages:
  108. kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
  109. return r;
  110. }
  111. static int kvm_iommu_map_memslots(struct kvm *kvm)
  112. {
  113. int idx, r = 0;
  114. struct kvm_memslots *slots;
  115. struct kvm_memory_slot *memslot;
  116. idx = srcu_read_lock(&kvm->srcu);
  117. slots = kvm_memslots(kvm);
  118. kvm_for_each_memslot(memslot, slots) {
  119. r = kvm_iommu_map_pages(kvm, memslot);
  120. if (r)
  121. break;
  122. }
  123. srcu_read_unlock(&kvm->srcu, idx);
  124. return r;
  125. }
  126. int kvm_assign_device(struct kvm *kvm,
  127. struct kvm_assigned_dev_kernel *assigned_dev)
  128. {
  129. struct pci_dev *pdev = NULL;
  130. struct iommu_domain *domain = kvm->arch.iommu_domain;
  131. int r, last_flags;
  132. /* check if iommu exists and in use */
  133. if (!domain)
  134. return 0;
  135. pdev = assigned_dev->dev;
  136. if (pdev == NULL)
  137. return -ENODEV;
  138. r = iommu_attach_device(domain, &pdev->dev);
  139. if (r) {
  140. dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
  141. return r;
  142. }
  143. last_flags = kvm->arch.iommu_flags;
  144. if (iommu_domain_has_cap(kvm->arch.iommu_domain,
  145. IOMMU_CAP_CACHE_COHERENCY))
  146. kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
  147. /* Check if need to update IOMMU page table for guest memory */
  148. if ((last_flags ^ kvm->arch.iommu_flags) ==
  149. KVM_IOMMU_CACHE_COHERENCY) {
  150. kvm_iommu_unmap_memslots(kvm);
  151. r = kvm_iommu_map_memslots(kvm);
  152. if (r)
  153. goto out_unmap;
  154. }
  155. pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
  156. printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
  157. assigned_dev->host_segnr,
  158. assigned_dev->host_busnr,
  159. PCI_SLOT(assigned_dev->host_devfn),
  160. PCI_FUNC(assigned_dev->host_devfn));
  161. return 0;
  162. out_unmap:
  163. kvm_iommu_unmap_memslots(kvm);
  164. return r;
  165. }
  166. int kvm_deassign_device(struct kvm *kvm,
  167. struct kvm_assigned_dev_kernel *assigned_dev)
  168. {
  169. struct iommu_domain *domain = kvm->arch.iommu_domain;
  170. struct pci_dev *pdev = NULL;
  171. /* check if iommu exists and in use */
  172. if (!domain)
  173. return 0;
  174. pdev = assigned_dev->dev;
  175. if (pdev == NULL)
  176. return -ENODEV;
  177. iommu_detach_device(domain, &pdev->dev);
  178. pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
  179. printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
  180. assigned_dev->host_segnr,
  181. assigned_dev->host_busnr,
  182. PCI_SLOT(assigned_dev->host_devfn),
  183. PCI_FUNC(assigned_dev->host_devfn));
  184. return 0;
  185. }
  186. int kvm_iommu_map_guest(struct kvm *kvm)
  187. {
  188. int r;
  189. if (!iommu_present(&pci_bus_type)) {
  190. printk(KERN_ERR "%s: iommu not found\n", __func__);
  191. return -ENODEV;
  192. }
  193. mutex_lock(&kvm->slots_lock);
  194. kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
  195. if (!kvm->arch.iommu_domain) {
  196. r = -ENOMEM;
  197. goto out_unlock;
  198. }
  199. if (!allow_unsafe_assigned_interrupts &&
  200. !iommu_domain_has_cap(kvm->arch.iommu_domain,
  201. IOMMU_CAP_INTR_REMAP)) {
  202. printk(KERN_WARNING "%s: No interrupt remapping support,"
  203. " disallowing device assignment."
  204. " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
  205. " module option.\n", __func__);
  206. iommu_domain_free(kvm->arch.iommu_domain);
  207. kvm->arch.iommu_domain = NULL;
  208. r = -EPERM;
  209. goto out_unlock;
  210. }
  211. r = kvm_iommu_map_memslots(kvm);
  212. if (r)
  213. kvm_iommu_unmap_memslots(kvm);
  214. out_unlock:
  215. mutex_unlock(&kvm->slots_lock);
  216. return r;
  217. }
  218. static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
  219. {
  220. unsigned long i;
  221. for (i = 0; i < npages; ++i)
  222. kvm_release_pfn_clean(pfn + i);
  223. }
  224. static void kvm_iommu_put_pages(struct kvm *kvm,
  225. gfn_t base_gfn, unsigned long npages)
  226. {
  227. struct iommu_domain *domain;
  228. gfn_t end_gfn, gfn;
  229. pfn_t pfn;
  230. u64 phys;
  231. domain = kvm->arch.iommu_domain;
  232. end_gfn = base_gfn + npages;
  233. gfn = base_gfn;
  234. /* check if iommu exists and in use */
  235. if (!domain)
  236. return;
  237. while (gfn < end_gfn) {
  238. unsigned long unmap_pages;
  239. size_t size;
  240. /* Get physical address */
  241. phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
  242. if (!phys) {
  243. gfn++;
  244. continue;
  245. }
  246. pfn = phys >> PAGE_SHIFT;
  247. /* Unmap address from IO address space */
  248. size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
  249. unmap_pages = 1ULL << get_order(size);
  250. /* Unpin all pages we just unmapped to not leak any memory */
  251. kvm_unpin_pages(kvm, pfn, unmap_pages);
  252. gfn += unmap_pages;
  253. }
  254. }
  255. void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  256. {
  257. kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
  258. }
  259. static int kvm_iommu_unmap_memslots(struct kvm *kvm)
  260. {
  261. int idx;
  262. struct kvm_memslots *slots;
  263. struct kvm_memory_slot *memslot;
  264. idx = srcu_read_lock(&kvm->srcu);
  265. slots = kvm_memslots(kvm);
  266. kvm_for_each_memslot(memslot, slots)
  267. kvm_iommu_unmap_pages(kvm, memslot);
  268. srcu_read_unlock(&kvm->srcu, idx);
  269. return 0;
  270. }
  271. int kvm_iommu_unmap_guest(struct kvm *kvm)
  272. {
  273. struct iommu_domain *domain = kvm->arch.iommu_domain;
  274. /* check if iommu exists and in use */
  275. if (!domain)
  276. return 0;
  277. mutex_lock(&kvm->slots_lock);
  278. kvm_iommu_unmap_memslots(kvm);
  279. kvm->arch.iommu_domain = NULL;
  280. mutex_unlock(&kvm->slots_lock);
  281. iommu_domain_free(domain);
  282. return 0;
  283. }