|
@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static u64 dma_direct_get_required_mask(struct device *dev)
|
|
|
+{
|
|
|
+ u64 end, mask;
|
|
|
+
|
|
|
+ end = memblock_end_of_DRAM() + get_dma_offset(dev);
|
|
|
+
|
|
|
+ mask = 1ULL << (fls64(end) - 1);
|
|
|
+ mask += mask - 1;
|
|
|
+
|
|
|
+ return mask;
|
|
|
+}
|
|
|
+
|
|
|
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
|
|
struct page *page,
|
|
|
unsigned long offset,
|
|
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
|
|
|
.dma_supported = dma_direct_dma_supported,
|
|
|
.map_page = dma_direct_map_page,
|
|
|
.unmap_page = dma_direct_unmap_page,
|
|
|
+ .get_required_mask = dma_direct_get_required_mask,
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
|
.sync_single_for_cpu = dma_direct_sync_single,
|
|
|
.sync_single_for_device = dma_direct_sync_single,
|
|
@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
|
|
|
u64 dma_get_required_mask(struct device *dev)
|
|
|
{
|
|
|
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
|
- u64 mask, end = 0;
|
|
|
|
|
|
if (ppc_md.dma_get_required_mask)
|
|
|
return ppc_md.dma_get_required_mask(dev);
|
|
@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
|
|
|
if (unlikely(dma_ops == NULL))
|
|
|
return 0;
|
|
|
|
|
|
-#ifdef CONFIG_PPC64
|
|
|
- else if (dma_ops == &dma_iommu_ops)
|
|
|
- return dma_iommu_get_required_mask(dev);
|
|
|
-#endif
|
|
|
-#ifdef CONFIG_SWIOTLB
|
|
|
- else if (dma_ops == &swiotlb_dma_ops) {
|
|
|
- u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
|
|
|
-
|
|
|
- end = memblock_end_of_DRAM();
|
|
|
- if (max_direct_dma_addr && end > max_direct_dma_addr)
|
|
|
- end = max_direct_dma_addr;
|
|
|
- end += get_dma_offset(dev);
|
|
|
- }
|
|
|
-#endif
|
|
|
- else if (dma_ops == &dma_direct_ops)
|
|
|
- end = memblock_end_of_DRAM() + get_dma_offset(dev);
|
|
|
- else {
|
|
|
- WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
|
|
|
- end = memblock_end_of_DRAM();
|
|
|
- }
|
|
|
+ if (dma_ops->get_required_mask)
|
|
|
+ return dma_ops->get_required_mask(dev);
|
|
|
|
|
|
- mask = 1ULL << (fls64(end) - 1);
|
|
|
- mask += mask - 1;
|
|
|
-
|
|
|
- return mask;
|
|
|
+ return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
|
|
|