iommu.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #define pr_fmt(fmt) "%s: " fmt, __func__
  19. #include <linux/device.h>
  20. #include <linux/kernel.h>
  21. #include <linux/bug.h>
  22. #include <linux/types.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/errno.h>
  26. #include <linux/iommu.h>
  27. static ssize_t show_iommu_group(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. unsigned int groupid;
  31. if (iommu_device_group(dev, &groupid))
  32. return 0;
  33. return sprintf(buf, "%u", groupid);
  34. }
  35. static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
  36. static int add_iommu_group(struct device *dev, void *data)
  37. {
  38. unsigned int groupid;
  39. if (iommu_device_group(dev, &groupid) == 0)
  40. return device_create_file(dev, &dev_attr_iommu_group);
  41. return 0;
  42. }
  43. static int remove_iommu_group(struct device *dev)
  44. {
  45. unsigned int groupid;
  46. if (iommu_device_group(dev, &groupid) == 0)
  47. device_remove_file(dev, &dev_attr_iommu_group);
  48. return 0;
  49. }
  50. static int iommu_device_notifier(struct notifier_block *nb,
  51. unsigned long action, void *data)
  52. {
  53. struct device *dev = data;
  54. if (action == BUS_NOTIFY_ADD_DEVICE)
  55. return add_iommu_group(dev, NULL);
  56. else if (action == BUS_NOTIFY_DEL_DEVICE)
  57. return remove_iommu_group(dev);
  58. return 0;
  59. }
  60. static struct notifier_block iommu_device_nb = {
  61. .notifier_call = iommu_device_notifier,
  62. };
  63. static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
  64. {
  65. bus_register_notifier(bus, &iommu_device_nb);
  66. bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
  67. }
  68. /**
  69. * bus_set_iommu - set iommu-callbacks for the bus
  70. * @bus: bus.
  71. * @ops: the callbacks provided by the iommu-driver
  72. *
  73. * This function is called by an iommu driver to set the iommu methods
  74. * used for a particular bus. Drivers for devices on that bus can use
  75. * the iommu-api after these ops are registered.
  76. * This special function is needed because IOMMUs are usually devices on
  77. * the bus itself, so the iommu drivers are not initialized when the bus
  78. * is set up. With this function the iommu-driver can set the iommu-ops
  79. * afterwards.
  80. */
  81. int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
  82. {
  83. if (bus->iommu_ops != NULL)
  84. return -EBUSY;
  85. bus->iommu_ops = ops;
  86. /* Do IOMMU specific setup for this bus-type */
  87. iommu_bus_init(bus, ops);
  88. return 0;
  89. }
  90. EXPORT_SYMBOL_GPL(bus_set_iommu);
  91. bool iommu_present(struct bus_type *bus)
  92. {
  93. return bus->iommu_ops != NULL;
  94. }
  95. EXPORT_SYMBOL_GPL(iommu_present);
  96. /**
  97. * iommu_set_fault_handler() - set a fault handler for an iommu domain
  98. * @domain: iommu domain
  99. * @handler: fault handler
  100. *
  101. * This function should be used by IOMMU users which want to be notified
  102. * whenever an IOMMU fault happens.
  103. *
  104. * The fault handler itself should return 0 on success, and an appropriate
  105. * error code otherwise.
  106. */
  107. void iommu_set_fault_handler(struct iommu_domain *domain,
  108. iommu_fault_handler_t handler)
  109. {
  110. BUG_ON(!domain);
  111. domain->handler = handler;
  112. }
  113. EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
  114. struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
  115. {
  116. struct iommu_domain *domain;
  117. int ret;
  118. if (bus == NULL || bus->iommu_ops == NULL)
  119. return NULL;
  120. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  121. if (!domain)
  122. return NULL;
  123. domain->ops = bus->iommu_ops;
  124. ret = domain->ops->domain_init(domain);
  125. if (ret)
  126. goto out_free;
  127. return domain;
  128. out_free:
  129. kfree(domain);
  130. return NULL;
  131. }
  132. EXPORT_SYMBOL_GPL(iommu_domain_alloc);
  133. void iommu_domain_free(struct iommu_domain *domain)
  134. {
  135. if (likely(domain->ops->domain_destroy != NULL))
  136. domain->ops->domain_destroy(domain);
  137. kfree(domain);
  138. }
  139. EXPORT_SYMBOL_GPL(iommu_domain_free);
  140. int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
  141. {
  142. if (unlikely(domain->ops->attach_dev == NULL))
  143. return -ENODEV;
  144. return domain->ops->attach_dev(domain, dev);
  145. }
  146. EXPORT_SYMBOL_GPL(iommu_attach_device);
  147. void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
  148. {
  149. if (unlikely(domain->ops->detach_dev == NULL))
  150. return;
  151. domain->ops->detach_dev(domain, dev);
  152. }
  153. EXPORT_SYMBOL_GPL(iommu_detach_device);
  154. phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
  155. unsigned long iova)
  156. {
  157. if (unlikely(domain->ops->iova_to_phys == NULL))
  158. return 0;
  159. return domain->ops->iova_to_phys(domain, iova);
  160. }
  161. EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
  162. int iommu_domain_has_cap(struct iommu_domain *domain,
  163. unsigned long cap)
  164. {
  165. if (unlikely(domain->ops->domain_has_cap == NULL))
  166. return 0;
  167. return domain->ops->domain_has_cap(domain, cap);
  168. }
  169. EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
  170. int iommu_map(struct iommu_domain *domain, unsigned long iova,
  171. phys_addr_t paddr, size_t size, int prot)
  172. {
  173. unsigned long orig_iova = iova;
  174. unsigned int min_pagesz;
  175. size_t orig_size = size;
  176. int ret = 0;
  177. if (unlikely(domain->ops->map == NULL))
  178. return -ENODEV;
  179. /* find out the minimum page size supported */
  180. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  181. /*
  182. * both the virtual address and the physical one, as well as
  183. * the size of the mapping, must be aligned (at least) to the
  184. * size of the smallest page supported by the hardware
  185. */
  186. if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
  187. pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
  188. "0x%x\n", iova, (unsigned long)paddr,
  189. (unsigned long)size, min_pagesz);
  190. return -EINVAL;
  191. }
  192. pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
  193. (unsigned long)paddr, (unsigned long)size);
  194. while (size) {
  195. unsigned long pgsize, addr_merge = iova | paddr;
  196. unsigned int pgsize_idx;
  197. /* Max page size that still fits into 'size' */
  198. pgsize_idx = __fls(size);
  199. /* need to consider alignment requirements ? */
  200. if (likely(addr_merge)) {
  201. /* Max page size allowed by both iova and paddr */
  202. unsigned int align_pgsize_idx = __ffs(addr_merge);
  203. pgsize_idx = min(pgsize_idx, align_pgsize_idx);
  204. }
  205. /* build a mask of acceptable page sizes */
  206. pgsize = (1UL << (pgsize_idx + 1)) - 1;
  207. /* throw away page sizes not supported by the hardware */
  208. pgsize &= domain->ops->pgsize_bitmap;
  209. /* make sure we're still sane */
  210. BUG_ON(!pgsize);
  211. /* pick the biggest page */
  212. pgsize_idx = __fls(pgsize);
  213. pgsize = 1UL << pgsize_idx;
  214. pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
  215. (unsigned long)paddr, pgsize);
  216. ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
  217. if (ret)
  218. break;
  219. iova += pgsize;
  220. paddr += pgsize;
  221. size -= pgsize;
  222. }
  223. /* unroll mapping in case something went wrong */
  224. if (ret)
  225. iommu_unmap(domain, orig_iova, orig_size - size);
  226. return ret;
  227. }
  228. EXPORT_SYMBOL_GPL(iommu_map);
  229. size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
  230. {
  231. size_t unmapped_page, unmapped = 0;
  232. unsigned int min_pagesz;
  233. if (unlikely(domain->ops->unmap == NULL))
  234. return -ENODEV;
  235. /* find out the minimum page size supported */
  236. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  237. /*
  238. * The virtual address, as well as the size of the mapping, must be
  239. * aligned (at least) to the size of the smallest page supported
  240. * by the hardware
  241. */
  242. if (!IS_ALIGNED(iova | size, min_pagesz)) {
  243. pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
  244. iova, (unsigned long)size, min_pagesz);
  245. return -EINVAL;
  246. }
  247. pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
  248. (unsigned long)size);
  249. /*
  250. * Keep iterating until we either unmap 'size' bytes (or more)
  251. * or we hit an area that isn't mapped.
  252. */
  253. while (unmapped < size) {
  254. size_t left = size - unmapped;
  255. unmapped_page = domain->ops->unmap(domain, iova, left);
  256. if (!unmapped_page)
  257. break;
  258. pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
  259. (unsigned long)unmapped_page);
  260. iova += unmapped_page;
  261. unmapped += unmapped_page;
  262. }
  263. return unmapped;
  264. }
  265. EXPORT_SYMBOL_GPL(iommu_unmap);
  266. int iommu_device_group(struct device *dev, unsigned int *groupid)
  267. {
  268. if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
  269. return dev->bus->iommu_ops->device_group(dev, groupid);
  270. return -ENODEV;
  271. }
  272. EXPORT_SYMBOL_GPL(iommu_device_group);