iommu.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #define pr_fmt(fmt) "%s: " fmt, __func__
  19. #include <linux/device.h>
  20. #include <linux/kernel.h>
  21. #include <linux/bug.h>
  22. #include <linux/types.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/errno.h>
  26. #include <linux/iommu.h>
  27. static ssize_t show_iommu_group(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. unsigned int groupid;
  31. if (iommu_device_group(dev, &groupid))
  32. return 0;
  33. return sprintf(buf, "%u", groupid);
  34. }
  35. static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
  36. static int add_iommu_group(struct device *dev, void *data)
  37. {
  38. unsigned int groupid;
  39. if (iommu_device_group(dev, &groupid) == 0)
  40. return device_create_file(dev, &dev_attr_iommu_group);
  41. return 0;
  42. }
  43. static int remove_iommu_group(struct device *dev)
  44. {
  45. unsigned int groupid;
  46. if (iommu_device_group(dev, &groupid) == 0)
  47. device_remove_file(dev, &dev_attr_iommu_group);
  48. return 0;
  49. }
  50. static int iommu_device_notifier(struct notifier_block *nb,
  51. unsigned long action, void *data)
  52. {
  53. struct device *dev = data;
  54. if (action == BUS_NOTIFY_ADD_DEVICE)
  55. return add_iommu_group(dev, NULL);
  56. else if (action == BUS_NOTIFY_DEL_DEVICE)
  57. return remove_iommu_group(dev);
  58. return 0;
  59. }
  60. static struct notifier_block iommu_device_nb = {
  61. .notifier_call = iommu_device_notifier,
  62. };
  63. static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
  64. {
  65. bus_register_notifier(bus, &iommu_device_nb);
  66. bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
  67. }
  68. /**
  69. * bus_set_iommu - set iommu-callbacks for the bus
  70. * @bus: bus.
  71. * @ops: the callbacks provided by the iommu-driver
  72. *
  73. * This function is called by an iommu driver to set the iommu methods
  74. * used for a particular bus. Drivers for devices on that bus can use
  75. * the iommu-api after these ops are registered.
  76. * This special function is needed because IOMMUs are usually devices on
  77. * the bus itself, so the iommu drivers are not initialized when the bus
  78. * is set up. With this function the iommu-driver can set the iommu-ops
  79. * afterwards.
  80. */
  81. int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
  82. {
  83. if (bus->iommu_ops != NULL)
  84. return -EBUSY;
  85. bus->iommu_ops = ops;
  86. /* Do IOMMU specific setup for this bus-type */
  87. iommu_bus_init(bus, ops);
  88. return 0;
  89. }
  90. EXPORT_SYMBOL_GPL(bus_set_iommu);
  91. bool iommu_present(struct bus_type *bus)
  92. {
  93. return bus->iommu_ops != NULL;
  94. }
  95. EXPORT_SYMBOL_GPL(iommu_present);
  96. /**
  97. * iommu_set_fault_handler() - set a fault handler for an iommu domain
  98. * @domain: iommu domain
  99. * @handler: fault handler
  100. * @token: user data, will be passed back to the fault handler
  101. *
  102. * This function should be used by IOMMU users which want to be notified
  103. * whenever an IOMMU fault happens.
  104. *
  105. * The fault handler itself should return 0 on success, and an appropriate
  106. * error code otherwise.
  107. */
  108. void iommu_set_fault_handler(struct iommu_domain *domain,
  109. iommu_fault_handler_t handler,
  110. void *token)
  111. {
  112. BUG_ON(!domain);
  113. domain->handler = handler;
  114. domain->handler_token = token;
  115. }
  116. EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
  117. struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
  118. {
  119. struct iommu_domain *domain;
  120. int ret;
  121. if (bus == NULL || bus->iommu_ops == NULL)
  122. return NULL;
  123. domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  124. if (!domain)
  125. return NULL;
  126. domain->ops = bus->iommu_ops;
  127. ret = domain->ops->domain_init(domain);
  128. if (ret)
  129. goto out_free;
  130. return domain;
  131. out_free:
  132. kfree(domain);
  133. return NULL;
  134. }
  135. EXPORT_SYMBOL_GPL(iommu_domain_alloc);
  136. void iommu_domain_free(struct iommu_domain *domain)
  137. {
  138. if (likely(domain->ops->domain_destroy != NULL))
  139. domain->ops->domain_destroy(domain);
  140. kfree(domain);
  141. }
  142. EXPORT_SYMBOL_GPL(iommu_domain_free);
  143. int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
  144. {
  145. if (unlikely(domain->ops->attach_dev == NULL))
  146. return -ENODEV;
  147. return domain->ops->attach_dev(domain, dev);
  148. }
  149. EXPORT_SYMBOL_GPL(iommu_attach_device);
  150. void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
  151. {
  152. if (unlikely(domain->ops->detach_dev == NULL))
  153. return;
  154. domain->ops->detach_dev(domain, dev);
  155. }
  156. EXPORT_SYMBOL_GPL(iommu_detach_device);
  157. phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
  158. unsigned long iova)
  159. {
  160. if (unlikely(domain->ops->iova_to_phys == NULL))
  161. return 0;
  162. return domain->ops->iova_to_phys(domain, iova);
  163. }
  164. EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
  165. int iommu_domain_has_cap(struct iommu_domain *domain,
  166. unsigned long cap)
  167. {
  168. if (unlikely(domain->ops->domain_has_cap == NULL))
  169. return 0;
  170. return domain->ops->domain_has_cap(domain, cap);
  171. }
  172. EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
  173. int iommu_map(struct iommu_domain *domain, unsigned long iova,
  174. phys_addr_t paddr, size_t size, int prot)
  175. {
  176. unsigned long orig_iova = iova;
  177. unsigned int min_pagesz;
  178. size_t orig_size = size;
  179. int ret = 0;
  180. if (unlikely(domain->ops->map == NULL))
  181. return -ENODEV;
  182. /* find out the minimum page size supported */
  183. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  184. /*
  185. * both the virtual address and the physical one, as well as
  186. * the size of the mapping, must be aligned (at least) to the
  187. * size of the smallest page supported by the hardware
  188. */
  189. if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
  190. pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
  191. "0x%x\n", iova, (unsigned long)paddr,
  192. (unsigned long)size, min_pagesz);
  193. return -EINVAL;
  194. }
  195. pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
  196. (unsigned long)paddr, (unsigned long)size);
  197. while (size) {
  198. unsigned long pgsize, addr_merge = iova | paddr;
  199. unsigned int pgsize_idx;
  200. /* Max page size that still fits into 'size' */
  201. pgsize_idx = __fls(size);
  202. /* need to consider alignment requirements ? */
  203. if (likely(addr_merge)) {
  204. /* Max page size allowed by both iova and paddr */
  205. unsigned int align_pgsize_idx = __ffs(addr_merge);
  206. pgsize_idx = min(pgsize_idx, align_pgsize_idx);
  207. }
  208. /* build a mask of acceptable page sizes */
  209. pgsize = (1UL << (pgsize_idx + 1)) - 1;
  210. /* throw away page sizes not supported by the hardware */
  211. pgsize &= domain->ops->pgsize_bitmap;
  212. /* make sure we're still sane */
  213. BUG_ON(!pgsize);
  214. /* pick the biggest page */
  215. pgsize_idx = __fls(pgsize);
  216. pgsize = 1UL << pgsize_idx;
  217. pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
  218. (unsigned long)paddr, pgsize);
  219. ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
  220. if (ret)
  221. break;
  222. iova += pgsize;
  223. paddr += pgsize;
  224. size -= pgsize;
  225. }
  226. /* unroll mapping in case something went wrong */
  227. if (ret)
  228. iommu_unmap(domain, orig_iova, orig_size - size);
  229. return ret;
  230. }
  231. EXPORT_SYMBOL_GPL(iommu_map);
  232. size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
  233. {
  234. size_t unmapped_page, unmapped = 0;
  235. unsigned int min_pagesz;
  236. if (unlikely(domain->ops->unmap == NULL))
  237. return -ENODEV;
  238. /* find out the minimum page size supported */
  239. min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
  240. /*
  241. * The virtual address, as well as the size of the mapping, must be
  242. * aligned (at least) to the size of the smallest page supported
  243. * by the hardware
  244. */
  245. if (!IS_ALIGNED(iova | size, min_pagesz)) {
  246. pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
  247. iova, (unsigned long)size, min_pagesz);
  248. return -EINVAL;
  249. }
  250. pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
  251. (unsigned long)size);
  252. /*
  253. * Keep iterating until we either unmap 'size' bytes (or more)
  254. * or we hit an area that isn't mapped.
  255. */
  256. while (unmapped < size) {
  257. size_t left = size - unmapped;
  258. unmapped_page = domain->ops->unmap(domain, iova, left);
  259. if (!unmapped_page)
  260. break;
  261. pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
  262. (unsigned long)unmapped_page);
  263. iova += unmapped_page;
  264. unmapped += unmapped_page;
  265. }
  266. return unmapped;
  267. }
  268. EXPORT_SYMBOL_GPL(iommu_unmap);
  269. int iommu_device_group(struct device *dev, unsigned int *groupid)
  270. {
  271. if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
  272. return dev->bus->iommu_ops->device_group(dev, groupid);
  273. return -ENODEV;
  274. }
  275. EXPORT_SYMBOL_GPL(iommu_device_group);