Pārlūkot izejas kodu

Merge branch 'iommu/page-sizes' into x86/amd

Conflicts:
	drivers/iommu/amd_iommu.c
Joerg Roedel 13 gadi atpakaļ
vecāks
revīzija
a06ec394c9

+ 24 - 8
drivers/iommu/amd_iommu.c

@@ -44,6 +44,24 @@
 
 
 #define LOOP_TIMEOUT	100000
 #define LOOP_TIMEOUT	100000
 
 
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define AMD_IOMMU_PGSIZES	(~0xFFFUL)
+
 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 static DEFINE_RWLOCK(amd_iommu_devtable_lock);
 
 
 /* A list of preallocated protection domains */
 /* A list of preallocated protection domains */
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
 }
 }
 
 
 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
-			 phys_addr_t paddr, int gfp_order, int iommu_prot)
+			 phys_addr_t paddr, size_t page_size, int iommu_prot)
 {
 {
-	unsigned long page_size = 0x1000UL << gfp_order;
 	struct protection_domain *domain = dom->priv;
 	struct protection_domain *domain = dom->priv;
 	int prot = 0;
 	int prot = 0;
 	int ret;
 	int ret;
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
 	return ret;
 	return ret;
 }
 }
 
 
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
-			   int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+			   size_t page_size)
 {
 {
 	struct protection_domain *domain = dom->priv;
 	struct protection_domain *domain = dom->priv;
-	unsigned long page_size, unmap_size;
+	size_t unmap_size;
 
 
 	if (domain->mode == PAGE_MODE_NONE)
 	if (domain->mode == PAGE_MODE_NONE)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	page_size  = 0x1000UL << gfp_order;
-
 	mutex_lock(&domain->api_lock);
 	mutex_lock(&domain->api_lock);
 	unmap_size = iommu_unmap_page(domain, iova, page_size);
 	unmap_size = iommu_unmap_page(domain, iova, page_size);
 	mutex_unlock(&domain->api_lock);
 	mutex_unlock(&domain->api_lock);
 
 
 	domain_flush_tlb_pde(domain);
 	domain_flush_tlb_pde(domain);
 
 
-	return get_order(unmap_size);
+	return unmap_size;
 }
 }
 
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
 	.unmap = amd_iommu_unmap,
 	.unmap = amd_iommu_unmap,
 	.iova_to_phys = amd_iommu_iova_to_phys,
 	.iova_to_phys = amd_iommu_iova_to_phys,
 	.domain_has_cap = amd_iommu_domain_has_cap,
 	.domain_has_cap = amd_iommu_domain_has_cap,
+	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
 };
 };
 
 
 /*****************************************************************************
 /*****************************************************************************

+ 23 - 7
drivers/iommu/intel-iommu.c

@@ -78,6 +78,24 @@
 #define LEVEL_STRIDE		(9)
 #define LEVEL_STRIDE		(9)
 #define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)
 #define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)
 
 
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)
+
 static inline int agaw_to_level(int agaw)
 static inline int agaw_to_level(int agaw)
 {
 {
 	return agaw + 2;
 	return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
 
 
 static int intel_iommu_map(struct iommu_domain *domain,
 static int intel_iommu_map(struct iommu_domain *domain,
 			   unsigned long iova, phys_addr_t hpa,
 			   unsigned long iova, phys_addr_t hpa,
-			   int gfp_order, int iommu_prot)
+			   size_t size, int iommu_prot)
 {
 {
 	struct dmar_domain *dmar_domain = domain->priv;
 	struct dmar_domain *dmar_domain = domain->priv;
 	u64 max_addr;
 	u64 max_addr;
 	int prot = 0;
 	int prot = 0;
-	size_t size;
 	int ret;
 	int ret;
 
 
 	if (iommu_prot & IOMMU_READ)
 	if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
 	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
 	if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
 		prot |= DMA_PTE_SNP;
 		prot |= DMA_PTE_SNP;
 
 
-	size     = PAGE_SIZE << gfp_order;
 	max_addr = iova + size;
 	max_addr = iova + size;
 	if (dmar_domain->max_addr < max_addr) {
 	if (dmar_domain->max_addr < max_addr) {
 		u64 end;
 		u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
 	return ret;
 	return ret;
 }
 }
 
 
-static int intel_iommu_unmap(struct iommu_domain *domain,
-			     unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+			     unsigned long iova, size_t size)
 {
 {
 	struct dmar_domain *dmar_domain = domain->priv;
 	struct dmar_domain *dmar_domain = domain->priv;
-	size_t size = PAGE_SIZE << gfp_order;
 	int order;
 	int order;
 
 
 	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
 	order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
 	if (dmar_domain->max_addr == iova + size)
 	if (dmar_domain->max_addr == iova + size)
 		dmar_domain->max_addr = iova;
 		dmar_domain->max_addr = iova;
 
 
-	return order;
+	return PAGE_SIZE << order;
 }
 }
 
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
 	.unmap		= intel_iommu_unmap,
 	.unmap		= intel_iommu_unmap,
 	.iova_to_phys	= intel_iommu_iova_to_phys,
 	.iova_to_phys	= intel_iommu_iova_to_phys,
 	.domain_has_cap = intel_iommu_domain_has_cap,
 	.domain_has_cap = intel_iommu_domain_has_cap,
+	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
 };
 };
 
 
 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)

+ 107 - 12
drivers/iommu/iommu.c

@@ -16,6 +16,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
  */
 
 
+#define pr_fmt(fmt)    "%s: " fmt, __func__
+
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/bug.h>
 #include <linux/bug.h>
@@ -157,32 +159,125 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
 
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
-	      phys_addr_t paddr, int gfp_order, int prot)
+	      phys_addr_t paddr, size_t size, int prot)
 {
 {
-	size_t size;
+	unsigned long orig_iova = iova;
+	unsigned int min_pagesz;
+	size_t orig_size = size;
+	int ret = 0;
 
 
 	if (unlikely(domain->ops->map == NULL))
 	if (unlikely(domain->ops->map == NULL))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	size         = PAGE_SIZE << gfp_order;
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+	/*
+	 * both the virtual address and the physical one, as well as
+	 * the size of the mapping, must be aligned (at least) to the
+	 * size of the smallest page supported by the hardware
+	 */
+	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+			"0x%x\n", iova, (unsigned long)paddr,
+			(unsigned long)size, min_pagesz);
+		return -EINVAL;
+	}
+
+	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+				(unsigned long)paddr, (unsigned long)size);
+
+	while (size) {
+		unsigned long pgsize, addr_merge = iova | paddr;
+		unsigned int pgsize_idx;
+
+		/* Max page size that still fits into 'size' */
+		pgsize_idx = __fls(size);
+
+		/* need to consider alignment requirements ? */
+		if (likely(addr_merge)) {
+			/* Max page size allowed by both iova and paddr */
+			unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+		}
+
+		/* build a mask of acceptable page sizes */
+		pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+		/* throw away page sizes not supported by the hardware */
+		pgsize &= domain->ops->pgsize_bitmap;
 
 
-	BUG_ON(!IS_ALIGNED(iova | paddr, size));
+		/* make sure we're still sane */
+		BUG_ON(!pgsize);
 
 
-	return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+		/* pick the biggest page */
+		pgsize_idx = __fls(pgsize);
+		pgsize = 1UL << pgsize_idx;
+
+		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+					(unsigned long)paddr, pgsize);
+
+		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+		if (ret)
+			break;
+
+		iova += pgsize;
+		paddr += pgsize;
+		size -= pgsize;
+	}
+
+	/* unroll mapping in case something went wrong */
+	if (ret)
+		iommu_unmap(domain, orig_iova, orig_size - size);
+
+	return ret;
 }
 }
 EXPORT_SYMBOL_GPL(iommu_map);
 EXPORT_SYMBOL_GPL(iommu_map);
 
 
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 {
 {
-	size_t size;
+	size_t unmapped_page, unmapped = 0;
+	unsigned int min_pagesz;
 
 
 	if (unlikely(domain->ops->unmap == NULL))
 	if (unlikely(domain->ops->unmap == NULL))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	size         = PAGE_SIZE << gfp_order;
-
-	BUG_ON(!IS_ALIGNED(iova, size));
-
-	return domain->ops->unmap(domain, iova, gfp_order);
+	/* find out the minimum page size supported */
+	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+	/*
+	 * The virtual address, as well as the size of the mapping, must be
+	 * aligned (at least) to the size of the smallest page supported
+	 * by the hardware
+	 */
+	if (!IS_ALIGNED(iova | size, min_pagesz)) {
+		pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+					iova, (unsigned long)size, min_pagesz);
+		return -EINVAL;
+	}
+
+	pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+							(unsigned long)size);
+
+	/*
+	 * Keep iterating until we either unmap 'size' bytes (or more)
+	 * or we hit an area that isn't mapped.
+	 */
+	while (unmapped < size) {
+		size_t left = size - unmapped;
+
+		unmapped_page = domain->ops->unmap(domain, iova, left);
+		if (!unmapped_page)
+			break;
+
+		pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+					(unsigned long)unmapped_page);
+
+		iova += unmapped_page;
+		unmapped += unmapped_page;
+	}
+
+	return unmapped;
 }
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
 EXPORT_SYMBOL_GPL(iommu_unmap);

+ 12 - 13
drivers/iommu/msm_iommu.c

@@ -42,6 +42,9 @@ __asm__ __volatile__ (							\
 #define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
 #define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
 #define RCP15_NMRR(reg)		MRC(reg, p15, 0, c10, c2, 1)
 #define RCP15_NMRR(reg)		MRC(reg, p15, 0, c10, c2, 1)
 
 
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
 static int msm_iommu_tex_class[4];
 static int msm_iommu_tex_class[4];
 
 
 DEFINE_SPINLOCK(msm_iommu_lock);
 DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
 }
 }
 
 
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
-			 phys_addr_t pa, int order, int prot)
+			 phys_addr_t pa, size_t len, int prot)
 {
 {
 	struct msm_priv *priv;
 	struct msm_priv *priv;
 	unsigned long flags;
 	unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 	unsigned long *sl_pte;
 	unsigned long *sl_pte;
 	unsigned long sl_offset;
 	unsigned long sl_offset;
 	unsigned int pgprot;
 	unsigned int pgprot;
-	size_t len = 0x1000UL << order;
 	int ret = 0, tex, sh;
 	int ret = 0, tex, sh;
 
 
 	spin_lock_irqsave(&msm_iommu_lock, flags);
 	spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
 	return ret;
 	return ret;
 }
 }
 
 
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
-			    int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+			    size_t len)
 {
 {
 	struct msm_priv *priv;
 	struct msm_priv *priv;
 	unsigned long flags;
 	unsigned long flags;
@@ -474,7 +476,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 	unsigned long *sl_table;
 	unsigned long *sl_table;
 	unsigned long *sl_pte;
 	unsigned long *sl_pte;
 	unsigned long sl_offset;
 	unsigned long sl_offset;
-	size_t len = 0x1000UL << order;
 	int i, ret = 0;
 	int i, ret = 0;
 
 
 	spin_lock_irqsave(&msm_iommu_lock, flags);
 	spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -544,15 +545,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 
 
 	ret = __flush_iotlb(domain);
 	ret = __flush_iotlb(domain);
 
 
-	/*
-	 * the IOMMU API requires us to return the order of the unmapped
-	 * page (on success).
-	 */
-	if (!ret)
-		ret = order;
 fail:
 fail:
 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
 	spin_unlock_irqrestore(&msm_iommu_lock, flags);
-	return ret;
+
+	/* the IOMMU API requires us to return how many bytes were unmapped */
+	len = ret ? 0 : len;
+	return len;
 }
 }
 
 
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +682,8 @@ static struct iommu_ops msm_iommu_ops = {
 	.map = msm_iommu_map,
 	.map = msm_iommu_map,
 	.unmap = msm_iommu_unmap,
 	.unmap = msm_iommu_unmap,
 	.iova_to_phys = msm_iommu_iova_to_phys,
 	.iova_to_phys = msm_iommu_iova_to_phys,
-	.domain_has_cap = msm_iommu_domain_has_cap
+	.domain_has_cap = msm_iommu_domain_has_cap,
+	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
 };
 };
 
 
 static int __init get_tex_class(int icp, int ocp, int mt, int nos)
 static int __init get_tex_class(int icp, int ocp, int mt, int nos)

+ 9 - 9
drivers/iommu/omap-iommu.c

@@ -33,6 +33,9 @@
 	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
 	     (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);	\
 	     __i++)
 	     __i++)
 
 
+/* bitmap of the page sizes currently supported */
+#define OMAP_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
 /**
 /**
  * struct omap_iommu_domain - omap iommu domain
  * struct omap_iommu_domain - omap iommu domain
  * @pgtable:	the page table
  * @pgtable:	the page table
@@ -1019,12 +1022,11 @@ static void iopte_cachep_ctor(void *iopte)
 }
 }
 
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-			 phys_addr_t pa, int order, int prot)
+			 phys_addr_t pa, size_t bytes, int prot)
 {
 {
 	struct omap_iommu_domain *omap_domain = domain->priv;
 	struct omap_iommu_domain *omap_domain = domain->priv;
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
 	struct device *dev = oiommu->dev;
 	struct device *dev = oiommu->dev;
-	size_t bytes = PAGE_SIZE << order;
 	struct iotlb_entry e;
 	struct iotlb_entry e;
 	int omap_pgsz;
 	int omap_pgsz;
 	u32 ret, flags;
 	u32 ret, flags;
@@ -1049,19 +1051,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 	return ret;
 	return ret;
 }
 }
 
 
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-			    int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+			    size_t size)
 {
 {
 	struct omap_iommu_domain *omap_domain = domain->priv;
 	struct omap_iommu_domain *omap_domain = domain->priv;
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
 	struct omap_iommu *oiommu = omap_domain->iommu_dev;
 	struct device *dev = oiommu->dev;
 	struct device *dev = oiommu->dev;
-	size_t unmap_size;
-
-	dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
 
 
-	unmap_size = iopgtable_clear_entry(oiommu, da);
+	dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
 
 
-	return unmap_size ? get_order(unmap_size) : -EINVAL;
+	return iopgtable_clear_entry(oiommu, da);
 }
 }
 
 
 static int
 static int
@@ -1211,6 +1210,7 @@ static struct iommu_ops omap_iommu_ops = {
 	.unmap		= omap_iommu_unmap,
 	.unmap		= omap_iommu_unmap,
 	.iova_to_phys	= omap_iommu_iova_to_phys,
 	.iova_to_phys	= omap_iommu_iova_to_phys,
 	.domain_has_cap	= omap_iommu_domain_has_cap,
 	.domain_has_cap	= omap_iommu_domain_has_cap,
+	.pgsize_bitmap	= OMAP_IOMMU_PGSIZES,
 };
 };
 
 
 static int __init omap_iommu_init(void)
 static int __init omap_iommu_init(void)

+ 6 - 11
drivers/iommu/omap-iovmm.c

@@ -410,7 +410,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
 	unsigned int i, j;
 	unsigned int i, j;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	u32 da = new->da_start;
 	u32 da = new->da_start;
-	int order;
 
 
 	if (!domain || !sgt)
 	if (!domain || !sgt)
 		return -EINVAL;
 		return -EINVAL;
@@ -429,12 +428,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
 		if (bytes_to_iopgsz(bytes) < 0)
 		if (bytes_to_iopgsz(bytes) < 0)
 			goto err_out;
 			goto err_out;
 
 
-		order = get_order(bytes);
-
 		pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
 		pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
 			 i, da, pa, bytes);
 			 i, da, pa, bytes);
 
 
-		err = iommu_map(domain, da, pa, order, flags);
+		err = iommu_map(domain, da, pa, bytes, flags);
 		if (err)
 		if (err)
 			goto err_out;
 			goto err_out;
 
 
@@ -449,10 +446,9 @@ err_out:
 		size_t bytes;
 		size_t bytes;
 
 
 		bytes = sg->length + sg->offset;
 		bytes = sg->length + sg->offset;
-		order = get_order(bytes);
 
 
 		/* ignore failures.. we're already handling one */
 		/* ignore failures.. we're already handling one */
-		iommu_unmap(domain, da, order);
+		iommu_unmap(domain, da, bytes);
 
 
 		da += bytes;
 		da += bytes;
 	}
 	}
@@ -467,7 +463,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
 	size_t total = area->da_end - area->da_start;
 	size_t total = area->da_end - area->da_start;
 	const struct sg_table *sgt = area->sgt;
 	const struct sg_table *sgt = area->sgt;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
-	int i, err;
+	int i;
+	size_t unmapped;
 
 
 	BUG_ON(!sgtable_ok(sgt));
 	BUG_ON(!sgtable_ok(sgt));
 	BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
 	BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +472,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
 	start = area->da_start;
 	start = area->da_start;
 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 		size_t bytes;
 		size_t bytes;
-		int order;
 
 
 		bytes = sg->length + sg->offset;
 		bytes = sg->length + sg->offset;
-		order = get_order(bytes);
 
 
-		err = iommu_unmap(domain, start, order);
-		if (err < 0)
+		unmapped = iommu_unmap(domain, start, bytes);
+		if (unmapped < bytes)
 			break;
 			break;
 
 
 		dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
 		dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",

+ 20 - 6
include/linux/iommu.h

@@ -48,19 +48,33 @@ struct iommu_domain {
 
 
 #ifdef CONFIG_IOMMU_API
 #ifdef CONFIG_IOMMU_API
 
 
+/**
+ * struct iommu_ops - iommu ops and capabilities
+ * @domain_init: init iommu domain
+ * @domain_destroy: destroy iommu domain
+ * @attach_dev: attach device to an iommu domain
+ * @detach_dev: detach device from an iommu domain
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @iova_to_phys: translate iova to physical address
+ * @domain_has_cap: domain capabilities query
+ * @commit: commit iommu domain
+ * @pgsize_bitmap: bitmap of supported page sizes
+ */
 struct iommu_ops {
 struct iommu_ops {
 	int (*domain_init)(struct iommu_domain *domain);
 	int (*domain_init)(struct iommu_domain *domain);
 	void (*domain_destroy)(struct iommu_domain *domain);
 	void (*domain_destroy)(struct iommu_domain *domain);
 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
 	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
 	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
 	int (*map)(struct iommu_domain *domain, unsigned long iova,
 	int (*map)(struct iommu_domain *domain, unsigned long iova,
-		   phys_addr_t paddr, int gfp_order, int prot);
-	int (*unmap)(struct iommu_domain *domain, unsigned long iova,
-		     int gfp_order);
+		   phys_addr_t paddr, size_t size, int prot);
+	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+		     size_t size);
 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
 				    unsigned long iova);
 				    unsigned long iova);
 	int (*domain_has_cap)(struct iommu_domain *domain,
 	int (*domain_has_cap)(struct iommu_domain *domain,
 			      unsigned long cap);
 			      unsigned long cap);
+	unsigned long pgsize_bitmap;
 };
 };
 
 
 extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
 extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
@@ -72,9 +86,9 @@ extern int iommu_attach_device(struct iommu_domain *domain,
 extern void iommu_detach_device(struct iommu_domain *domain,
 extern void iommu_detach_device(struct iommu_domain *domain,
 				struct device *dev);
 				struct device *dev);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
-		     phys_addr_t paddr, int gfp_order, int prot);
-extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-		       int gfp_order);
+		     phys_addr_t paddr, size_t size, int prot);
+extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+		       size_t size);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
 				      unsigned long iova);
 				      unsigned long iova);
 extern int iommu_domain_has_cap(struct iommu_domain *domain,
 extern int iommu_domain_has_cap(struct iommu_domain *domain,

+ 4 - 4
virt/kvm/iommu.c

@@ -113,7 +113,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
 
 
 		/* Map into IO address space */
 		/* Map into IO address space */
 		r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
 		r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
-			      get_order(page_size), flags);
+			      page_size, flags);
 		if (r) {
 		if (r) {
 			printk(KERN_ERR "kvm_iommu_map_address:"
 			printk(KERN_ERR "kvm_iommu_map_address:"
 			       "iommu failed to map pfn=%llx\n", pfn);
 			       "iommu failed to map pfn=%llx\n", pfn);
@@ -292,15 +292,15 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
 
 
 	while (gfn < end_gfn) {
 	while (gfn < end_gfn) {
 		unsigned long unmap_pages;
 		unsigned long unmap_pages;
-		int order;
+		size_t size;
 
 
 		/* Get physical address */
 		/* Get physical address */
 		phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
 		phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
 		pfn  = phys >> PAGE_SHIFT;
 		pfn  = phys >> PAGE_SHIFT;
 
 
 		/* Unmap address from IO address space */
 		/* Unmap address from IO address space */
-		order       = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
-		unmap_pages = 1ULL << order;
+		size       = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
+		unmap_pages = 1ULL << get_order(size);
 
 
 		/* Unpin all pages we just unmapped to not leak any memory */
 		/* Unpin all pages we just unmapped to not leak any memory */
 		kvm_unpin_pages(kvm, pfn, unmap_pages);
 		kvm_unpin_pages(kvm, pfn, unmap_pages);