iommu.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #define pr_fmt(fmt) "%s: " fmt, __func__
  19. #include <linux/device.h>
  20. #include <linux/kernel.h>
  21. #include <linux/bug.h>
  22. #include <linux/types.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/errno.h>
  26. #include <linux/iommu.h>
  27. static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
  28. {
  29. }
  30. /**
  31. * bus_set_iommu - set iommu-callbacks for the bus
  32. * @bus: bus.
  33. * @ops: the callbacks provided by the iommu-driver
  34. *
  35. * This function is called by an iommu driver to set the iommu methods
  36. * used for a particular bus. Drivers for devices on that bus can use
  37. * the iommu-api after these ops are registered.
  38. * This special function is needed because IOMMUs are usually devices on
  39. * the bus itself, so the iommu drivers are not initialized when the bus
  40. * is set up. With this function the iommu-driver can set the iommu-ops
  41. * afterwards.
  42. */
  43. int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
  44. {
  45. if (bus->iommu_ops != NULL)
  46. return -EBUSY;
  47. bus->iommu_ops = ops;
  48. /* Do IOMMU specific setup for this bus-type */
  49. iommu_bus_init(bus, ops);
  50. return 0;
  51. }
  52. EXPORT_SYMBOL_GPL(bus_set_iommu);
  53. bool iommu_present(struct bus_type *bus)
  54. {
  55. return bus->iommu_ops != NULL;
  56. }
  57. EXPORT_SYMBOL_GPL(iommu_present);
  58. /**
  59. * iommu_set_fault_handler() - set a fault handler for an iommu domain
  60. * @domain: iommu domain
  61. * @handler: fault handler
  62. *
  63. * This function should be used by IOMMU users which want to be notified
  64. * whenever an IOMMU fault happens.
  65. *
  66. * The fault handler itself should return 0 on success, and an appropriate
  67. * error code otherwise.
  68. */
  69. void iommu_set_fault_handler(struct iommu_domain *domain,
  70. iommu_fault_handler_t handler)
  71. {
  72. BUG_ON(!domain);
  73. domain->handler = handler;
  74. }
  75. EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
  76. struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
  77. {
  78. struct iommu_domain *domain;
  79. int ret;
  80. if (bus == NULL || bus->iommu_ops == NULL)
  81. return NULL;
  82. domain = kmalloc(sizeof(*domain), GFP_KERNEL);
  83. if (!domain)
  84. return NULL;
  85. domain->ops = bus->iommu_ops;
  86. ret = domain->ops->domain_init(domain);
  87. if (ret)
  88. goto out_free;
  89. return domain;
  90. out_free:
  91. kfree(domain);
  92. return NULL;
  93. }
  94. EXPORT_SYMBOL_GPL(iommu_domain_alloc);
  95. void iommu_domain_free(struct iommu_domain *domain)
  96. {
  97. if (likely(domain->ops->domain_destroy != NULL))
  98. domain->ops->domain_destroy(domain);
  99. kfree(domain);
  100. }
  101. EXPORT_SYMBOL_GPL(iommu_domain_free);
  102. int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
  103. {
  104. if (unlikely(domain->ops->attach_dev == NULL))
  105. return -ENODEV;
  106. return domain->ops->attach_dev(domain, dev);
  107. }
  108. EXPORT_SYMBOL_GPL(iommu_attach_device);
  109. void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
  110. {
  111. if (unlikely(domain->ops->detach_dev == NULL))
  112. return;
  113. domain->ops->detach_dev(domain, dev);
  114. }
  115. EXPORT_SYMBOL_GPL(iommu_detach_device);
  116. phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
  117. unsigned long iova)
  118. {
  119. if (unlikely(domain->ops->iova_to_phys == NULL))
  120. return 0;
  121. return domain->ops->iova_to_phys(domain, iova);
  122. }
  123. EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
  124. int iommu_domain_has_cap(struct iommu_domain *domain,
  125. unsigned long cap)
  126. {
  127. if (unlikely(domain->ops->domain_has_cap == NULL))
  128. return 0;
  129. return domain->ops->domain_has_cap(domain, cap);
  130. }
  131. EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
  132. int iommu_map(struct iommu_domain *domain, unsigned long iova,
  133. phys_addr_t paddr, size_t size, int prot)
  134. {
  135. unsigned long orig_iova = iova;
  136. unsigned int min_pagesz;
  137. size_t orig_size = size;
  138. int ret = 0;
  139. if (unlikely(domain->ops->map == NULL))
  140. return -ENODEV;
  141. /* find out the minimum page size supported */
  142. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  143. /*
  144. * both the virtual address and the physical one, as well as
  145. * the size of the mapping, must be aligned (at least) to the
  146. * size of the smallest page supported by the hardware
  147. */
  148. if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
  149. pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
  150. "0x%x\n", iova, (unsigned long)paddr,
  151. (unsigned long)size, min_pagesz);
  152. return -EINVAL;
  153. }
  154. pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
  155. (unsigned long)paddr, (unsigned long)size);
  156. while (size) {
  157. unsigned long pgsize, addr_merge = iova | paddr;
  158. unsigned int pgsize_idx;
  159. /* Max page size that still fits into 'size' */
  160. pgsize_idx = __fls(size);
  161. /* need to consider alignment requirements ? */
  162. if (likely(addr_merge)) {
  163. /* Max page size allowed by both iova and paddr */
  164. unsigned int align_pgsize_idx = __ffs(addr_merge);
  165. pgsize_idx = min(pgsize_idx, align_pgsize_idx);
  166. }
  167. /* build a mask of acceptable page sizes */
  168. pgsize = (1UL << (pgsize_idx + 1)) - 1;
  169. /* throw away page sizes not supported by the hardware */
  170. pgsize &= domain->ops->pgsize_bitmap;
  171. /* make sure we're still sane */
  172. BUG_ON(!pgsize);
  173. /* pick the biggest page */
  174. pgsize_idx = __fls(pgsize);
  175. pgsize = 1UL << pgsize_idx;
  176. pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
  177. (unsigned long)paddr, pgsize);
  178. ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
  179. if (ret)
  180. break;
  181. iova += pgsize;
  182. paddr += pgsize;
  183. size -= pgsize;
  184. }
  185. /* unroll mapping in case something went wrong */
  186. if (ret)
  187. iommu_unmap(domain, orig_iova, orig_size - size);
  188. return ret;
  189. }
  190. EXPORT_SYMBOL_GPL(iommu_map);
  191. size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
  192. {
  193. size_t unmapped_page, unmapped = 0;
  194. unsigned int min_pagesz;
  195. if (unlikely(domain->ops->unmap == NULL))
  196. return -ENODEV;
  197. /* find out the minimum page size supported */
  198. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  199. /*
  200. * The virtual address, as well as the size of the mapping, must be
  201. * aligned (at least) to the size of the smallest page supported
  202. * by the hardware
  203. */
  204. if (!IS_ALIGNED(iova | size, min_pagesz)) {
  205. pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
  206. iova, (unsigned long)size, min_pagesz);
  207. return -EINVAL;
  208. }
  209. pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
  210. (unsigned long)size);
  211. /*
  212. * Keep iterating until we either unmap 'size' bytes (or more)
  213. * or we hit an area that isn't mapped.
  214. */
  215. while (unmapped < size) {
  216. size_t left = size - unmapped;
  217. unmapped_page = domain->ops->unmap(domain, iova, left);
  218. if (!unmapped_page)
  219. break;
  220. pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
  221. (unsigned long)unmapped_page);
  222. iova += unmapped_page;
  223. unmapped += unmapped_page;
  224. }
  225. return unmapped;
  226. }
  227. EXPORT_SYMBOL_GPL(iommu_unmap);