|
@@ -25,6 +25,7 @@
|
|
#include <linux/bitops.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/scatterlist.h>
|
|
|
|
+#include <linux/iommu-helper.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/io.h>
|
|
#include <asm/io.h>
|
|
#include <asm/mtrr.h>
|
|
#include <asm/mtrr.h>
|
|
@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table;
|
|
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
|
|
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
|
|
static int need_flush; /* global flush state. set for each gart wrap */
|
|
static int need_flush; /* global flush state. set for each gart wrap */
|
|
|
|
|
|
-static unsigned long alloc_iommu(int size)
|
|
|
|
|
|
+static unsigned long alloc_iommu(struct device *dev, int size)
|
|
{
|
|
{
|
|
unsigned long offset, flags;
|
|
unsigned long offset, flags;
|
|
|
|
+ unsigned long boundary_size;
|
|
|
|
+ unsigned long base_index;
|
|
|
|
+
|
|
|
|
+ base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
|
|
|
|
+ PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
|
|
|
+ PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
|
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
|
- offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
|
|
|
|
- iommu_pages, size);
|
|
|
|
|
|
+ offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
|
|
|
|
+ size, base_index, boundary_size, 0);
|
|
if (offset == -1) {
|
|
if (offset == -1) {
|
|
need_flush = 1;
|
|
need_flush = 1;
|
|
- offset = find_next_zero_string(iommu_gart_bitmap, 0,
|
|
|
|
- iommu_pages, size);
|
|
|
|
|
|
+ offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
|
|
|
|
+ size, base_index, boundary_size, 0);
|
|
}
|
|
}
|
|
if (offset != -1) {
|
|
if (offset != -1) {
|
|
set_bit_string(iommu_gart_bitmap, offset, size);
|
|
set_bit_string(iommu_gart_bitmap, offset, size);
|
|
@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
|
spin_lock_irqsave(&iommu_bitmap_lock, flags);
|
|
- __clear_bit_string(iommu_gart_bitmap, offset, size);
|
|
|
|
|
|
+ iommu_area_free(iommu_gart_bitmap, offset, size);
|
|
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
|
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
|
size_t size, int dir)
|
|
size_t size, int dir)
|
|
{
|
|
{
|
|
unsigned long npages = to_pages(phys_mem, size);
|
|
unsigned long npages = to_pages(phys_mem, size);
|
|
- unsigned long iommu_page = alloc_iommu(npages);
|
|
|
|
|
|
+ unsigned long iommu_page = alloc_iommu(dev, npages);
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (iommu_page == -1) {
|
|
if (iommu_page == -1) {
|
|
@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|
}
|
|
}
|
|
|
|
|
|
/* Map multiple scatterlist entries continuous into the first. */
|
|
/* Map multiple scatterlist entries continuous into the first. */
|
|
-static int __dma_map_cont(struct scatterlist *start, int nelems,
|
|
|
|
- struct scatterlist *sout, unsigned long pages)
|
|
|
|
|
|
+static int __dma_map_cont(struct device *dev, struct scatterlist *start,
|
|
|
|
+ int nelems, struct scatterlist *sout,
|
|
|
|
+ unsigned long pages)
|
|
{
|
|
{
|
|
- unsigned long iommu_start = alloc_iommu(pages);
|
|
|
|
|
|
+ unsigned long iommu_start = alloc_iommu(dev, pages);
|
|
unsigned long iommu_page = iommu_start;
|
|
unsigned long iommu_page = iommu_start;
|
|
struct scatterlist *s;
|
|
struct scatterlist *s;
|
|
int i;
|
|
int i;
|
|
@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
|
|
}
|
|
}
|
|
|
|
|
|
static inline int
|
|
static inline int
|
|
-dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
|
|
|
|
- unsigned long pages, int need)
|
|
|
|
|
|
+dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
|
|
|
|
+ struct scatterlist *sout, unsigned long pages, int need)
|
|
{
|
|
{
|
|
if (!need) {
|
|
if (!need) {
|
|
BUG_ON(nelems != 1);
|
|
BUG_ON(nelems != 1);
|
|
@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
|
|
sout->dma_length = start->length;
|
|
sout->dma_length = start->length;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
- return __dma_map_cont(start, nelems, sout, pages);
|
|
|
|
|
|
+ return __dma_map_cont(dev, start, nelems, sout, pages);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -449,8 +458,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|
if (!iommu_merge || !nextneed || !need || s->offset ||
|
|
if (!iommu_merge || !nextneed || !need || s->offset ||
|
|
(s->length + seg_size > max_seg_size) ||
|
|
(s->length + seg_size > max_seg_size) ||
|
|
(ps->offset + ps->length) % PAGE_SIZE) {
|
|
(ps->offset + ps->length) % PAGE_SIZE) {
|
|
- if (dma_map_cont(start_sg, i - start, sgmap,
|
|
|
|
- pages, need) < 0)
|
|
|
|
|
|
+ if (dma_map_cont(dev, start_sg, i - start,
|
|
|
|
+ sgmap, pages, need) < 0)
|
|
goto error;
|
|
goto error;
|
|
out++;
|
|
out++;
|
|
seg_size = 0;
|
|
seg_size = 0;
|
|
@@ -466,7 +475,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
|
|
pages += to_pages(s->offset, s->length);
|
|
pages += to_pages(s->offset, s->length);
|
|
ps = s;
|
|
ps = s;
|
|
}
|
|
}
|
|
- if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
|
|
|
|
|
|
+ if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
|
|
goto error;
|
|
goto error;
|
|
out++;
|
|
out++;
|
|
flush_gart();
|
|
flush_gart();
|