|
@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dma_addr_t dev_addr;
|
|
dma_addr_t dev_addr;
|
|
void *ret;
|
|
void *ret;
|
|
int order = get_order(size);
|
|
int order = get_order(size);
|
|
|
|
+ u64 dma_mask = DMA_32BIT_MASK;
|
|
|
|
+
|
|
|
|
+ if (hwdev && hwdev->coherent_dma_mask)
|
|
|
|
+ dma_mask = hwdev->coherent_dma_mask;
|
|
|
|
|
|
ret = (void *)__get_free_pages(flags, order);
|
|
ret = (void *)__get_free_pages(flags, order);
|
|
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
|
|
|
|
|
|
+ if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
|
|
/*
|
|
/*
|
|
* The allocated memory isn't reachable by the device.
|
|
* The allocated memory isn't reachable by the device.
|
|
* Fall back on swiotlb_map_single().
|
|
* Fall back on swiotlb_map_single().
|
|
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dev_addr = virt_to_bus(ret);
|
|
dev_addr = virt_to_bus(ret);
|
|
|
|
|
|
/* Confirm address can be DMA'd by device */
|
|
/* Confirm address can be DMA'd by device */
|
|
- if (address_needs_mapping(hwdev, dev_addr, size)) {
|
|
|
|
|
|
+ if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
|
|
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
|
|
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
|
|
- (unsigned long long)*hwdev->dma_mask,
|
|
|
|
|
|
+ (unsigned long long)dma_mask,
|
|
(unsigned long long)dev_addr);
|
|
(unsigned long long)dev_addr);
|
|
|
|
|
|
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
|
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|