|
@@ -27,6 +27,15 @@
|
|
|
|
|
|
static struct kmem_cache *iovm_area_cachep;
|
|
static struct kmem_cache *iovm_area_cachep;
|
|
|
|
|
|
|
|
+/* return the offset of the first scatterlist entry in a sg table */
|
|
|
|
+static unsigned int sgtable_offset(const struct sg_table *sgt)
|
|
|
|
+{
|
|
|
|
+ if (!sgt || !sgt->nents)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ return sgt->sgl->offset;
|
|
|
|
+}
|
|
|
|
+
|
|
/* return total bytes of sg buffers */
|
|
/* return total bytes of sg buffers */
|
|
static size_t sgtable_len(const struct sg_table *sgt)
|
|
static size_t sgtable_len(const struct sg_table *sgt)
|
|
{
|
|
{
|
|
@@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
|
|
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
size_t bytes;
|
|
size_t bytes;
|
|
|
|
|
|
- bytes = sg->length;
|
|
|
|
|
|
+ bytes = sg->length + sg->offset;
|
|
|
|
|
|
if (!iopgsz_ok(bytes)) {
|
|
if (!iopgsz_ok(bytes)) {
|
|
- pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
|
|
|
|
- __func__, i, bytes);
|
|
|
|
|
|
+ pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
|
|
|
|
+ __func__, i, bytes, sg->offset);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (i && sg->offset) {
|
|
|
|
+ pr_err("%s: sg[%d] offset not allowed in internal "
|
|
|
|
+ "entries\n", __func__, i);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt)
|
|
u32 pa;
|
|
u32 pa;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- pa = sg_phys(sg);
|
|
|
|
- bytes = sg->length;
|
|
|
|
|
|
+ pa = sg_phys(sg) - sg->offset;
|
|
|
|
+ bytes = sg->length + sg->offset;
|
|
|
|
|
|
BUG_ON(bytes != PAGE_SIZE);
|
|
BUG_ON(bytes != PAGE_SIZE);
|
|
|
|
|
|
@@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
|
|
u32 pa;
|
|
u32 pa;
|
|
size_t bytes;
|
|
size_t bytes;
|
|
|
|
|
|
- pa = sg_phys(sg);
|
|
|
|
- bytes = sg->length;
|
|
|
|
|
|
+ pa = sg_phys(sg) - sg->offset;
|
|
|
|
+ bytes = sg->length + sg->offset;
|
|
|
|
|
|
flags &= ~IOVMF_PGSZ_MASK;
|
|
flags &= ~IOVMF_PGSZ_MASK;
|
|
|
|
|
|
@@ -432,7 +447,7 @@ err_out:
|
|
for_each_sg(sgt->sgl, sg, i, j) {
|
|
for_each_sg(sgt->sgl, sg, i, j) {
|
|
size_t bytes;
|
|
size_t bytes;
|
|
|
|
|
|
- bytes = sg->length;
|
|
|
|
|
|
+ bytes = sg->length + sg->offset;
|
|
order = get_order(bytes);
|
|
order = get_order(bytes);
|
|
|
|
|
|
/* ignore failures.. we're already handling one */
|
|
/* ignore failures.. we're already handling one */
|
|
@@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
|
|
size_t bytes;
|
|
size_t bytes;
|
|
int order;
|
|
int order;
|
|
|
|
|
|
- bytes = sg->length;
|
|
|
|
|
|
+ bytes = sg->length + sg->offset;
|
|
order = get_order(bytes);
|
|
order = get_order(bytes);
|
|
|
|
|
|
err = iommu_unmap(domain, start, order);
|
|
err = iommu_unmap(domain, start, order);
|
|
@@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
|
|
if (IS_ERR_VALUE(da))
|
|
if (IS_ERR_VALUE(da))
|
|
vunmap_sg(va);
|
|
vunmap_sg(va);
|
|
|
|
|
|
- return da;
|
|
|
|
|
|
+ return da + sgtable_offset(sgt);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(omap_iommu_vmap);
|
|
EXPORT_SYMBOL_GPL(omap_iommu_vmap);
|
|
|
|
|
|
@@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
|
|
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
|
|
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
|
|
* Just returns 'sgt' to the caller to free
|
|
* Just returns 'sgt' to the caller to free
|
|
*/
|
|
*/
|
|
|
|
+ da &= PAGE_MASK;
|
|
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
|
|
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
|
|
IOVMF_DISCONT | IOVMF_MMIO);
|
|
IOVMF_DISCONT | IOVMF_MMIO);
|
|
if (!sgt)
|
|
if (!sgt)
|