|
@@ -42,12 +42,31 @@
|
|
|
#include <xen/page.h>
|
|
|
#include <xen/xen-ops.h>
|
|
|
#include <xen/hvc-console.h>
|
|
|
+
|
|
|
+#include <asm/dma-mapping.h>
|
|
|
+#include <asm/xen/page-coherent.h>
|
|
|
+
|
|
|
+#include <trace/events/swiotlb.h>
|
|
|
/*
|
|
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
|
|
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
|
|
|
* API.
|
|
|
*/
|
|
|
|
|
|
+#ifndef CONFIG_X86
|
|
|
+static unsigned long dma_alloc_coherent_mask(struct device *dev,
|
|
|
+ gfp_t gfp)
|
|
|
+{
|
|
|
+ unsigned long dma_mask = 0;
|
|
|
+
|
|
|
+ dma_mask = dev->coherent_dma_mask;
|
|
|
+ if (!dma_mask)
|
|
|
+ dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
|
|
|
+
|
|
|
+ return dma_mask;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static char *xen_io_tlb_start, *xen_io_tlb_end;
|
|
|
static unsigned long xen_io_tlb_nslabs;
|
|
|
/*
|
|
@@ -56,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs;
|
|
|
|
|
|
static u64 start_dma_addr;
|
|
|
|
|
|
-static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
|
|
|
+static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
|
|
|
{
|
|
|
return phys_to_machine(XPADDR(paddr)).maddr;
|
|
|
}
|
|
|
|
|
|
-static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
|
|
|
+static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
|
|
|
{
|
|
|
return machine_to_phys(XMADDR(baddr)).paddr;
|
|
|
}
|
|
|
|
|
|
-static dma_addr_t xen_virt_to_bus(void *address)
|
|
|
+static inline dma_addr_t xen_virt_to_bus(void *address)
|
|
|
{
|
|
|
return xen_phys_to_bus(virt_to_phys(address));
|
|
|
}
|
|
@@ -89,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
|
|
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
|
|
{
|
|
|
unsigned long pfn = PFN_DOWN(p);
|
|
|
unsigned int offset = p & ~PAGE_MASK;
|
|
@@ -126,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
|
|
|
{
|
|
|
int i, rc;
|
|
|
int dma_bits;
|
|
|
+ dma_addr_t dma_handle;
|
|
|
+ phys_addr_t p = virt_to_phys(buf);
|
|
|
|
|
|
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
|
|
|
|
|
@@ -135,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
|
|
|
|
|
|
do {
|
|
|
rc = xen_create_contiguous_region(
|
|
|
- (unsigned long)buf + (i << IO_TLB_SHIFT),
|
|
|
+ p + (i << IO_TLB_SHIFT),
|
|
|
get_order(slabs << IO_TLB_SHIFT),
|
|
|
- dma_bits);
|
|
|
+ dma_bits, &dma_handle);
|
|
|
} while (rc && dma_bits++ < max_dma_bits);
|
|
|
if (rc)
|
|
|
return rc;
|
|
@@ -263,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
void *ret;
|
|
|
int order = get_order(size);
|
|
|
u64 dma_mask = DMA_BIT_MASK(32);
|
|
|
- unsigned long vstart;
|
|
|
phys_addr_t phys;
|
|
|
dma_addr_t dev_addr;
|
|
|
|
|
@@ -278,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
|
|
|
return ret;
|
|
|
|
|
|
- vstart = __get_free_pages(flags, order);
|
|
|
- ret = (void *)vstart;
|
|
|
+ /* On ARM this function returns an ioremap'ped virtual address for
|
|
|
+ * which virt_to_phys doesn't return the corresponding physical
|
|
|
+ * address. In fact on ARM virt_to_phys only works for kernel direct
|
|
|
+ * mapped RAM memory. Also see comment below.
|
|
|
+ */
|
|
|
+ ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
|
|
|
|
|
|
if (!ret)
|
|
|
return ret;
|
|
@@ -287,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
if (hwdev && hwdev->coherent_dma_mask)
|
|
|
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
|
|
|
|
|
|
- phys = virt_to_phys(ret);
|
|
|
+ /* At this point dma_handle is the physical address, next we are
|
|
|
+ * going to set it to the machine address.
|
|
|
+ * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
|
|
|
+ * to *dma_handle. */
|
|
|
+ phys = *dma_handle;
|
|
|
dev_addr = xen_phys_to_bus(phys);
|
|
|
if (((dev_addr + size - 1 <= dma_mask)) &&
|
|
|
!range_straddles_page_boundary(phys, size))
|
|
|
*dma_handle = dev_addr;
|
|
|
else {
|
|
|
- if (xen_create_contiguous_region(vstart, order,
|
|
|
- fls64(dma_mask)) != 0) {
|
|
|
- free_pages(vstart, order);
|
|
|
+ if (xen_create_contiguous_region(phys, order,
|
|
|
+ fls64(dma_mask), dma_handle) != 0) {
|
|
|
+ xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
|
|
|
return NULL;
|
|
|
}
|
|
|
- *dma_handle = virt_to_machine(ret).maddr;
|
|
|
}
|
|
|
memset(ret, 0, size);
|
|
|
return ret;
|
|
@@ -319,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|
|
if (hwdev && hwdev->coherent_dma_mask)
|
|
|
dma_mask = hwdev->coherent_dma_mask;
|
|
|
|
|
|
- phys = virt_to_phys(vaddr);
|
|
|
+ /* do not use virt_to_phys because on ARM it doesn't return you the
|
|
|
+ * physical address */
|
|
|
+ phys = xen_bus_to_phys(dev_addr);
|
|
|
|
|
|
if (((dev_addr + size - 1 > dma_mask)) ||
|
|
|
range_straddles_page_boundary(phys, size))
|
|
|
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
|
|
|
+ xen_destroy_contiguous_region(phys, order);
|
|
|
|
|
|
- free_pages((unsigned long)vaddr, order);
|
|
|
+ xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
|
|
|
|
|
@@ -352,16 +381,25 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
* buffering it.
|
|
|
*/
|
|
|
if (dma_capable(dev, dev_addr, size) &&
|
|
|
- !range_straddles_page_boundary(phys, size) && !swiotlb_force)
|
|
|
+ !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
|
|
|
+ /* we are not interested in the dma_addr returned by
|
|
|
+ * xen_dma_map_page, only in the potential cache flushes executed
|
|
|
+ * by the function. */
|
|
|
+ xen_dma_map_page(dev, page, offset, size, dir, attrs);
|
|
|
return dev_addr;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Oh well, have to allocate and map a bounce buffer.
|
|
|
*/
|
|
|
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
|
|
|
+
|
|
|
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
|
|
|
if (map == SWIOTLB_MAP_ERROR)
|
|
|
return DMA_ERROR_CODE;
|
|
|
|
|
|
+ xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
|
|
+ map & ~PAGE_MASK, size, dir, attrs);
|
|
|
dev_addr = xen_phys_to_bus(map);
|
|
|
|
|
|
/*
|
|
@@ -384,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
|
|
|
* whatever the device wrote there.
|
|
|
*/
|
|
|
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
- size_t size, enum dma_data_direction dir)
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
{
|
|
|
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
+ xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
|
|
|
+
|
|
|
/* NOTE: We use dev_addr here, not paddr! */
|
|
|
if (is_xen_swiotlb_buffer(dev_addr)) {
|
|
|
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
|
|
@@ -412,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- xen_unmap_single(hwdev, dev_addr, size, dir);
|
|
|
+ xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
|
|
|
|
|
@@ -435,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
+ if (target == SYNC_FOR_CPU)
|
|
|
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
|
|
|
+
|
|
|
/* NOTE: We use dev_addr here, not paddr! */
|
|
|
- if (is_xen_swiotlb_buffer(dev_addr)) {
|
|
|
+ if (is_xen_swiotlb_buffer(dev_addr))
|
|
|
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
|
|
|
- return;
|
|
|
- }
|
|
|
+
|
|
|
+ if (target == SYNC_FOR_DEVICE)
|
|
|
+ xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
|
|
|
|
|
|
if (dir != DMA_FROM_DEVICE)
|
|
|
return;
|
|
@@ -502,16 +547,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
|
sg->length,
|
|
|
dir);
|
|
|
if (map == SWIOTLB_MAP_ERROR) {
|
|
|
+ dev_warn(hwdev, "swiotlb buffer is full\n");
|
|
|
/* Don't panic here, we expect map_sg users
|
|
|
to do proper error handling. */
|
|
|
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
|
|
|
attrs);
|
|
|
sg_dma_len(sgl) = 0;
|
|
|
- return DMA_ERROR_CODE;
|
|
|
+ return 0;
|
|
|
}
|
|
|
sg->dma_address = xen_phys_to_bus(map);
|
|
|
- } else
|
|
|
+ } else {
|
|
|
+ /* we are not interested in the dma_addr returned by
|
|
|
+ * xen_dma_map_page, only in the potential cache flushes executed
|
|
|
+ * by the function. */
|
|
|
+ xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
|
|
|
+ paddr & ~PAGE_MASK,
|
|
|
+ sg->length,
|
|
|
+ dir,
|
|
|
+ attrs);
|
|
|
sg->dma_address = dev_addr;
|
|
|
+ }
|
|
|
sg_dma_len(sg) = sg->length;
|
|
|
}
|
|
|
return nelems;
|
|
@@ -533,7 +588,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
for_each_sg(sgl, sg, nelems, i)
|
|
|
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
|
|
|
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
|
|
|
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
|
|
@@ -593,3 +648,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|
|
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
|
|
|
+
|
|
|
+int
|
|
|
+xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
|
|
|
+{
|
|
|
+ if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *dev->dma_mask = dma_mask;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
|