|
@@ -361,25 +361,22 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Allocates bounce buffer and returns its kernel virtual address.
|
|
|
- */
|
|
|
-static void *
|
|
|
-map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
|
|
|
+void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
|
|
|
+ phys_addr_t phys, size_t size, int dir)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
char *dma_addr;
|
|
|
unsigned int nslots, stride, index, wrap;
|
|
|
int i;
|
|
|
- unsigned long start_dma_addr;
|
|
|
unsigned long mask;
|
|
|
unsigned long offset_slots;
|
|
|
unsigned long max_slots;
|
|
|
|
|
|
mask = dma_get_seg_boundary(hwdev);
|
|
|
- start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
|
|
|
|
|
|
- offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
|
|
+ tbl_dma_addr &= mask;
|
|
|
+
|
|
|
+ offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
|
|
|
|
|
/*
|
|
|
* Carefully handle integer overflow which can occur when mask == ~0UL.
|
|
@@ -467,6 +464,18 @@ found:
|
|
|
return dma_addr;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Allocates bounce buffer and returns its kernel virtual address.
|
|
|
+ */
|
|
|
+
|
|
|
+static void *
|
|
|
+map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
|
|
|
+{
|
|
|
+ dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
|
|
|
+
|
|
|
+ return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
|
|
|
*/
|