|
@@ -31,6 +31,7 @@
|
|
|
#include <linux/string.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
|
#include <linux/bitops.h>
|
|
|
+#include <linux/iommu-helper.h>
|
|
|
#include <asm/io.h>
|
|
|
#include <asm/prom.h>
|
|
|
#include <asm/iommu.h>
|
|
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
|
|
|
__setup("protect4gb=", setup_protect4gb);
|
|
|
__setup("iommu=", setup_iommu);
|
|
|
|
|
|
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|
|
+static unsigned long iommu_range_alloc(struct device *dev,
|
|
|
+ struct iommu_table *tbl,
|
|
|
unsigned long npages,
|
|
|
unsigned long *handle,
|
|
|
unsigned long mask,
|
|
|
unsigned int align_order)
|
|
|
{
|
|
|
- unsigned long n, end, i, start;
|
|
|
+ unsigned long n, end, start;
|
|
|
unsigned long limit;
|
|
|
int largealloc = npages > 15;
|
|
|
int pass = 0;
|
|
|
unsigned long align_mask;
|
|
|
+ unsigned long boundary_size;
|
|
|
|
|
|
align_mask = 0xffffffffffffffffl >> (64 - align_order);
|
|
|
|
|
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|
|
start &= mask;
|
|
|
}
|
|
|
|
|
|
- n = find_next_zero_bit(tbl->it_map, limit, start);
|
|
|
-
|
|
|
- /* Align allocation */
|
|
|
- n = (n + align_mask) & ~align_mask;
|
|
|
-
|
|
|
- end = n + npages;
|
|
|
+ if (dev)
|
|
|
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
|
|
+ 1 << IOMMU_PAGE_SHIFT);
|
|
|
+ else
|
|
|
+ boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
|
|
|
+ /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
|
|
|
|
|
- if (unlikely(end >= limit)) {
|
|
|
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages,
|
|
|
+ tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
|
|
|
+ align_mask);
|
|
|
+ if (n == -1) {
|
|
|
if (likely(pass < 2)) {
|
|
|
/* First failure, just rescan the half of the table.
|
|
|
* Second failure, rescan the other half of the table.
|
|
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- for (i = n; i < end; i++)
|
|
|
- if (test_bit(i, tbl->it_map)) {
|
|
|
- start = i+1;
|
|
|
- goto again;
|
|
|
- }
|
|
|
-
|
|
|
- for (i = n; i < end; i++)
|
|
|
- __set_bit(i, tbl->it_map);
|
|
|
+ end = n + npages;
|
|
|
|
|
|
/* Bump the hint to a new block for small allocs. */
|
|
|
if (largealloc) {
|
|
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
|
|
|
return n;
|
|
|
}
|
|
|
|
|
|
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
|
|
|
- unsigned int npages, enum dma_data_direction direction,
|
|
|
- unsigned long mask, unsigned int align_order)
|
|
|
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|
|
+ void *page, unsigned int npages,
|
|
|
+ enum dma_data_direction direction,
|
|
|
+ unsigned long mask, unsigned int align_order)
|
|
|
{
|
|
|
unsigned long entry, flags;
|
|
|
dma_addr_t ret = DMA_ERROR_CODE;
|
|
|
|
|
|
spin_lock_irqsave(&(tbl->it_lock), flags);
|
|
|
|
|
|
- entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
|
|
|
+ entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
|
|
|
|
|
|
if (unlikely(entry == DMA_ERROR_CODE)) {
|
|
|
spin_unlock_irqrestore(&(tbl->it_lock), flags);
|
|
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
unsigned int npages)
|
|
|
{
|
|
|
unsigned long entry, free_entry;
|
|
|
- unsigned long i;
|
|
|
|
|
|
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
|
|
free_entry = entry - tbl->it_offset;
|
|
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
}
|
|
|
|
|
|
ppc_md.tce_free(tbl, entry, npages);
|
|
|
-
|
|
|
- for (i = 0; i < npages; i++)
|
|
|
- __clear_bit(free_entry+i, tbl->it_map);
|
|
|
+ iommu_area_free(tbl->it_map, free_entry, npages);
|
|
|
}
|
|
|
|
|
|
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
@@ -317,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
|
|
(vaddr & ~PAGE_MASK) == 0)
|
|
|
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
|
|
- entry = iommu_range_alloc(tbl, npages, &handle,
|
|
|
+ entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
|
|
mask >> IOMMU_PAGE_SHIFT, align);
|
|
|
|
|
|
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
|
@@ -574,9 +571,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
|
|
|
* need not be page aligned, the dma_addr_t returned will point to the same
|
|
|
* byte within the page as vaddr.
|
|
|
*/
|
|
|
-dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
|
|
- size_t size, unsigned long mask,
|
|
|
- enum dma_data_direction direction)
|
|
|
+dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
|
|
|
+ void *vaddr, size_t size, unsigned long mask,
|
|
|
+ enum dma_data_direction direction)
|
|
|
{
|
|
|
dma_addr_t dma_handle = DMA_ERROR_CODE;
|
|
|
unsigned long uaddr;
|
|
@@ -593,7 +590,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
|
|
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
|
|
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
|
|
|
|
|
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
|
|
|
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
|
|
mask >> IOMMU_PAGE_SHIFT, align);
|
|
|
if (dma_handle == DMA_ERROR_CODE) {
|
|
|
if (printk_ratelimit()) {
|
|
@@ -625,8 +622,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|
|
* Returns the virtual address of the buffer and sets dma_handle
|
|
|
* to the dma address (mapping) of the first page.
|
|
|
*/
|
|
|
-void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
|
|
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
|
|
|
+void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|
|
+ size_t size, dma_addr_t *dma_handle,
|
|
|
+ unsigned long mask, gfp_t flag, int node)
|
|
|
{
|
|
|
void *ret = NULL;
|
|
|
dma_addr_t mapping;
|
|
@@ -660,7 +658,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
|
|
/* Set up tces to cover the allocated range */
|
|
|
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
|
|
io_order = get_iommu_order(size);
|
|
|
- mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
|
|
+ mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
|
|
mask >> IOMMU_PAGE_SHIFT, io_order);
|
|
|
if (mapping == DMA_ERROR_CODE) {
|
|
|
free_pages((unsigned long)ret, order);
|