|
@@ -38,6 +38,7 @@
|
|
|
#include <xen/swiotlb-xen.h>
|
|
|
#include <xen/page.h>
|
|
|
#include <xen/xen-ops.h>
|
|
|
+#include <xen/hvc-console.h>
|
|
|
/*
|
|
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
|
|
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
|
|
@@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
|
|
|
void __init xen_swiotlb_init(int verbose)
|
|
|
{
|
|
|
unsigned long bytes;
|
|
|
- int rc;
|
|
|
+ int rc = -ENOMEM;
|
|
|
unsigned long nr_tbl;
|
|
|
+ char *m = NULL;
|
|
|
+ unsigned int repeat = 3;
|
|
|
|
|
|
nr_tbl = swioltb_nr_tbl();
|
|
|
if (nr_tbl)
|
|
@@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose)
|
|
|
xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
|
|
|
xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
|
|
|
}
|
|
|
-
|
|
|
+retry:
|
|
|
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
|
|
|
|
|
|
/*
|
|
|
* Get IO TLB memory from any location.
|
|
|
*/
|
|
|
xen_io_tlb_start = alloc_bootmem(bytes);
|
|
|
- if (!xen_io_tlb_start)
|
|
|
- panic("Cannot allocate SWIOTLB buffer");
|
|
|
-
|
|
|
+ if (!xen_io_tlb_start) {
|
|
|
+ m = "Cannot allocate Xen-SWIOTLB buffer!\n";
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
xen_io_tlb_end = xen_io_tlb_start + bytes;
|
|
|
/*
|
|
|
* And replace that memory with pages under 4GB.
|
|
@@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose)
|
|
|
rc = xen_swiotlb_fixup(xen_io_tlb_start,
|
|
|
bytes,
|
|
|
xen_io_tlb_nslabs);
|
|
|
- if (rc)
|
|
|
+ if (rc) {
|
|
|
+ free_bootmem(__pa(xen_io_tlb_start), bytes);
|
|
|
+ m = "Failed to get contiguous memory for DMA from Xen!\n"\
|
|
|
+ "You either: don't have the permissions, do not have"\
|
|
|
+ " enough free memory under 4GB, or the hypervisor memory"\
|
|
|
+ "is too fragmented!";
|
|
|
goto error;
|
|
|
-
|
|
|
+ }
|
|
|
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
|
|
|
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
|
|
|
|
|
|
return;
|
|
|
error:
|
|
|
- panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
|
|
|
- "We either don't have the permission or you do not have enough"\
|
|
|
- "free memory under 4GB!\n", rc);
|
|
|
+ if (repeat--) {
|
|
|
+ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
|
|
|
+ (xen_io_tlb_nslabs >> 1));
|
|
|
+ printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
|
|
|
+ (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ xen_raw_printk("%s (rc:%d)", m, rc);
|
|
|
+ panic("%s (rc:%d)", m, rc);
|
|
|
}
|
|
|
|
|
|
void *
|
|
@@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
int order = get_order(size);
|
|
|
u64 dma_mask = DMA_BIT_MASK(32);
|
|
|
unsigned long vstart;
|
|
|
+ phys_addr_t phys;
|
|
|
+ dma_addr_t dev_addr;
|
|
|
|
|
|
/*
|
|
|
* Ignore region specifiers - the kernel's ideas of
|
|
@@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
|
vstart = __get_free_pages(flags, order);
|
|
|
ret = (void *)vstart;
|
|
|
|
|
|
+ if (!ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
if (hwdev && hwdev->coherent_dma_mask)
|
|
|
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
|
|
|
+ dma_mask = hwdev->coherent_dma_mask;
|
|
|
|
|
|
- if (ret) {
|
|
|
+ phys = virt_to_phys(ret);
|
|
|
+ dev_addr = xen_phys_to_bus(phys);
|
|
|
+ if (((dev_addr + size - 1 <= dma_mask)) &&
|
|
|
+ !range_straddles_page_boundary(phys, size))
|
|
|
+ *dma_handle = dev_addr;
|
|
|
+ else {
|
|
|
if (xen_create_contiguous_region(vstart, order,
|
|
|
fls64(dma_mask)) != 0) {
|
|
|
free_pages(vstart, order);
|
|
|
return NULL;
|
|
|
}
|
|
|
- memset(ret, 0, size);
|
|
|
*dma_handle = virt_to_machine(ret).maddr;
|
|
|
}
|
|
|
+ memset(ret, 0, size);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
|
|
@@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|
|
dma_addr_t dev_addr)
|
|
|
{
|
|
|
int order = get_order(size);
|
|
|
+ phys_addr_t phys;
|
|
|
+ u64 dma_mask = DMA_BIT_MASK(32);
|
|
|
|
|
|
if (dma_release_from_coherent(hwdev, order, vaddr))
|
|
|
return;
|
|
|
|
|
|
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
|
|
|
+ if (hwdev && hwdev->coherent_dma_mask)
|
|
|
+ dma_mask = hwdev->coherent_dma_mask;
|
|
|
+
|
|
|
+ phys = virt_to_phys(vaddr);
|
|
|
+
|
|
|
+ if (((dev_addr + size - 1 > dma_mask)) ||
|
|
|
+ range_straddles_page_boundary(phys, size))
|
|
|
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
|
|
|
+
|
|
|
free_pages((unsigned long)vaddr, order);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
|
|
@@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
|
/*
|
|
|
* Ensure that the address returned is DMA'ble
|
|
|
*/
|
|
|
- if (!dma_capable(dev, dev_addr, size))
|
|
|
- panic("map_single: bounce buffer is not DMA'ble");
|
|
|
-
|
|
|
+ if (!dma_capable(dev, dev_addr, size)) {
|
|
|
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
|
|
|
+ dev_addr = 0;
|
|
|
+ }
|
|
|
return dev_addr;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
|