|
@@ -43,6 +43,7 @@
|
|
|
#include <xen/xen-ops.h>
|
|
|
#include <xen/hvc-console.h>
|
|
|
#include <asm/dma-mapping.h>
|
|
|
+#include <asm/xen/page-coherent.h>
|
|
|
/*
|
|
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
|
|
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
|
|
@@ -142,6 +143,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
|
|
|
int i, rc;
|
|
|
int dma_bits;
|
|
|
dma_addr_t dma_handle;
|
|
|
+ phys_addr_t p = virt_to_phys(buf);
|
|
|
|
|
|
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
|
|
|
|
|
@@ -151,7 +153,7 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
|
|
|
|
|
|
do {
|
|
|
rc = xen_create_contiguous_region(
|
|
|
- (unsigned long)buf + (i << IO_TLB_SHIFT),
|
|
|
+ p + (i << IO_TLB_SHIFT),
|
|
|
get_order(slabs << IO_TLB_SHIFT),
|
|
|
dma_bits, &dma_handle);
|
|
|
} while (rc && dma_bits++ < max_dma_bits);
|
|
@@ -279,7 +281,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
void *ret;
|
|
|
int order = get_order(size);
|
|
|
u64 dma_mask = DMA_BIT_MASK(32);
|
|
|
- unsigned long vstart;
|
|
|
phys_addr_t phys;
|
|
|
dma_addr_t dev_addr;
|
|
|
|
|
@@ -294,8 +295,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
|
|
|
return ret;
|
|
|
|
|
|
- vstart = __get_free_pages(flags, order);
|
|
|
- ret = (void *)vstart;
|
|
|
+ /* On ARM this function returns an ioremap'ped virtual address for
|
|
|
+ * which virt_to_phys doesn't return the corresponding physical
|
|
|
+ * address. In fact on ARM virt_to_phys only works for kernel direct
|
|
|
+ * mapped RAM memory. Also see comment below.
|
|
|
+ */
|
|
|
+ ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
|
|
|
|
|
|
if (!ret)
|
|
|
return ret;
|
|
@@ -303,15 +308,19 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
if (hwdev && hwdev->coherent_dma_mask)
|
|
|
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
|
|
|
|
|
|
- phys = virt_to_phys(ret);
|
|
|
+ /* At this point dma_handle is the physical address, next we are
|
|
|
+ * going to set it to the machine address.
|
|
|
+ * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
|
|
|
+ * to *dma_handle. */
|
|
|
+ phys = *dma_handle;
|
|
|
dev_addr = xen_phys_to_bus(phys);
|
|
|
if (((dev_addr + size - 1 <= dma_mask)) &&
|
|
|
!range_straddles_page_boundary(phys, size))
|
|
|
*dma_handle = dev_addr;
|
|
|
else {
|
|
|
- if (xen_create_contiguous_region(vstart, order,
|
|
|
+ if (xen_create_contiguous_region(phys, order,
|
|
|
fls64(dma_mask), dma_handle) != 0) {
|
|
|
- free_pages(vstart, order);
|
|
|
+ xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
|
|
|
return NULL;
|
|
|
}
|
|
|
}
|
|
@@ -334,13 +343,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|
|
if (hwdev && hwdev->coherent_dma_mask)
|
|
|
dma_mask = hwdev->coherent_dma_mask;
|
|
|
|
|
|
- phys = virt_to_phys(vaddr);
|
|
|
+ /* do not use virt_to_phys because on ARM it doesn't return you the
|
|
|
+ * physical address */
|
|
|
+ phys = xen_bus_to_phys(dev_addr);
|
|
|
|
|
|
if (((dev_addr + size - 1 > dma_mask)) ||
|
|
|
range_straddles_page_boundary(phys, size))
|
|
|
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
|
|
|
+ xen_destroy_contiguous_region(phys, order);
|
|
|
|
|
|
- free_pages((unsigned long)vaddr, order);
|
|
|
+ xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
|
|
|
|