|
@@ -365,113 +365,14 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
|
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
|
|
|
-
|
|
|
|
-static long fill_sg(long entry, struct device *dev,
|
|
|
|
- struct scatterlist *sg,
|
|
|
|
- int nused, int nelems, unsigned long prot)
|
|
|
|
-{
|
|
|
|
- struct scatterlist *dma_sg = sg;
|
|
|
|
- unsigned long flags;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
-
|
|
|
|
- iommu_batch_start(dev, prot, entry);
|
|
|
|
-
|
|
|
|
- for (i = 0; i < nused; i++) {
|
|
|
|
- unsigned long pteval = ~0UL;
|
|
|
|
- u32 dma_npages;
|
|
|
|
-
|
|
|
|
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
|
|
|
|
- dma_sg->dma_length +
|
|
|
|
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
|
|
|
|
- do {
|
|
|
|
- unsigned long offset;
|
|
|
|
- signed int len;
|
|
|
|
-
|
|
|
|
- /* If we are here, we know we have at least one
|
|
|
|
- * more page to map. So walk forward until we
|
|
|
|
- * hit a page crossing, and begin creating new
|
|
|
|
- * mappings from that spot.
|
|
|
|
- */
|
|
|
|
- for (;;) {
|
|
|
|
- unsigned long tmp;
|
|
|
|
-
|
|
|
|
- tmp = SG_ENT_PHYS_ADDRESS(sg);
|
|
|
|
- len = sg->length;
|
|
|
|
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
|
|
|
|
- pteval = tmp & IO_PAGE_MASK;
|
|
|
|
- offset = tmp & (IO_PAGE_SIZE - 1UL);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
|
|
|
|
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
|
|
|
|
- offset = 0UL;
|
|
|
|
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- sg = sg_next(sg);
|
|
|
|
- nelems--;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- pteval = (pteval & IOPTE_PAGE);
|
|
|
|
- while (len > 0) {
|
|
|
|
- long err;
|
|
|
|
-
|
|
|
|
- err = iommu_batch_add(pteval);
|
|
|
|
- if (unlikely(err < 0L))
|
|
|
|
- goto iommu_map_failed;
|
|
|
|
-
|
|
|
|
- pteval += IO_PAGE_SIZE;
|
|
|
|
- len -= (IO_PAGE_SIZE - offset);
|
|
|
|
- offset = 0;
|
|
|
|
- dma_npages--;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- pteval = (pteval & IOPTE_PAGE) + len;
|
|
|
|
- sg = sg_next(sg);
|
|
|
|
- nelems--;
|
|
|
|
-
|
|
|
|
- /* Skip over any tail mappings we've fully mapped,
|
|
|
|
- * adjusting pteval along the way. Stop when we
|
|
|
|
- * detect a page crossing event.
|
|
|
|
- */
|
|
|
|
- while (nelems &&
|
|
|
|
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
|
|
|
|
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
|
|
|
|
- ((pteval ^
|
|
|
|
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
|
|
|
|
- pteval += sg->length;
|
|
|
|
- sg = sg_next(sg);
|
|
|
|
- nelems--;
|
|
|
|
- }
|
|
|
|
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
|
|
|
|
- pteval = ~0UL;
|
|
|
|
- } while (dma_npages != 0);
|
|
|
|
- dma_sg = sg_next(dma_sg);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (unlikely(iommu_batch_end() < 0L))
|
|
|
|
- goto iommu_map_failed;
|
|
|
|
-
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
-iommu_map_failed:
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
- return -1L;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
int nelems, enum dma_data_direction direction)
|
|
int nelems, enum dma_data_direction direction)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags, npages, i, prot;
|
|
|
|
+ struct scatterlist *sg;
|
|
struct iommu *iommu;
|
|
struct iommu *iommu;
|
|
- unsigned long flags, npages, prot;
|
|
|
|
- u32 dma_base;
|
|
|
|
- struct scatterlist *sgtmp;
|
|
|
|
long entry, err;
|
|
long entry, err;
|
|
- int used;
|
|
|
|
|
|
+ u32 dma_base;
|
|
|
|
|
|
/* Fast path single entry scatterlists. */
|
|
/* Fast path single entry scatterlists. */
|
|
if (nelems == 1) {
|
|
if (nelems == 1) {
|
|
@@ -489,10 +390,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
if (unlikely(direction == DMA_NONE))
|
|
if (unlikely(direction == DMA_NONE))
|
|
goto bad;
|
|
goto bad;
|
|
|
|
|
|
- /* Step 1: Prepare scatter list. */
|
|
|
|
- npages = prepare_sg(dev, sglist, nelems);
|
|
|
|
|
|
+ npages = calc_npages(sglist, nelems);
|
|
|
|
|
|
- /* Step 2: Allocate a cluster and context, if necessary. */
|
|
|
|
spin_lock_irqsave(&iommu->lock, flags);
|
|
spin_lock_irqsave(&iommu->lock, flags);
|
|
entry = arena_alloc(&iommu->arena, npages);
|
|
entry = arena_alloc(&iommu->arena, npages);
|
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
@@ -503,27 +402,45 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
dma_base = iommu->page_table_map_base +
|
|
dma_base = iommu->page_table_map_base +
|
|
(entry << IO_PAGE_SHIFT);
|
|
(entry << IO_PAGE_SHIFT);
|
|
|
|
|
|
- /* Step 3: Normalize DMA addresses. */
|
|
|
|
- used = nelems;
|
|
|
|
-
|
|
|
|
- sgtmp = sglist;
|
|
|
|
- while (used && sgtmp->dma_length) {
|
|
|
|
- sgtmp->dma_address += dma_base;
|
|
|
|
- sgtmp = sg_next(sgtmp);
|
|
|
|
- used--;
|
|
|
|
- }
|
|
|
|
- used = nelems - used;
|
|
|
|
-
|
|
|
|
- /* Step 4: Create the mappings. */
|
|
|
|
prot = HV_PCI_MAP_ATTR_READ;
|
|
prot = HV_PCI_MAP_ATTR_READ;
|
|
if (direction != DMA_TO_DEVICE)
|
|
if (direction != DMA_TO_DEVICE)
|
|
prot |= HV_PCI_MAP_ATTR_WRITE;
|
|
prot |= HV_PCI_MAP_ATTR_WRITE;
|
|
|
|
|
|
- err = fill_sg(entry, dev, sglist, used, nelems, prot);
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+
|
|
|
|
+ iommu_batch_start(dev, prot, entry);
|
|
|
|
+
|
|
|
|
+ for_each_sg(sglist, sg, nelems, i) {
|
|
|
|
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
|
|
|
|
+ unsigned long slen = sg->length;
|
|
|
|
+ unsigned long this_npages;
|
|
|
|
+
|
|
|
|
+ this_npages = iommu_num_pages(paddr, slen);
|
|
|
|
+
|
|
|
|
+ sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
|
|
|
|
+ sg->dma_length = slen;
|
|
|
|
+
|
|
|
|
+ paddr &= IO_PAGE_MASK;
|
|
|
|
+ while (this_npages--) {
|
|
|
|
+ err = iommu_batch_add(paddr);
|
|
|
|
+ if (unlikely(err < 0L)) {
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+ goto iommu_map_failed;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ paddr += IO_PAGE_SIZE;
|
|
|
|
+ dma_base += IO_PAGE_SIZE;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ err = iommu_batch_end();
|
|
|
|
+
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+
|
|
if (unlikely(err < 0L))
|
|
if (unlikely(err < 0L))
|
|
goto iommu_map_failed;
|
|
goto iommu_map_failed;
|
|
|
|
|
|
- return used;
|
|
|
|
|
|
+ return nelems;
|
|
|
|
|
|
bad:
|
|
bad:
|
|
if (printk_ratelimit())
|
|
if (printk_ratelimit())
|
|
@@ -541,12 +458,11 @@ iommu_map_failed:
|
|
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
int nelems, enum dma_data_direction direction)
|
|
int nelems, enum dma_data_direction direction)
|
|
{
|
|
{
|
|
|
|
+ unsigned long flags, npages;
|
|
struct pci_pbm_info *pbm;
|
|
struct pci_pbm_info *pbm;
|
|
|
|
+ u32 devhandle, bus_addr;
|
|
struct iommu *iommu;
|
|
struct iommu *iommu;
|
|
- unsigned long flags, i, npages;
|
|
|
|
- struct scatterlist *sg, *sgprv;
|
|
|
|
long entry;
|
|
long entry;
|
|
- u32 devhandle, bus_addr;
|
|
|
|
|
|
|
|
if (unlikely(direction == DMA_NONE)) {
|
|
if (unlikely(direction == DMA_NONE)) {
|
|
if (printk_ratelimit())
|
|
if (printk_ratelimit())
|
|
@@ -558,16 +474,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
devhandle = pbm->devhandle;
|
|
devhandle = pbm->devhandle;
|
|
|
|
|
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
|
bus_addr = sglist->dma_address & IO_PAGE_MASK;
|
|
- sgprv = NULL;
|
|
|
|
- for_each_sg(sglist, sg, nelems, i) {
|
|
|
|
- if (sg->dma_length == 0)
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- sgprv = sg;
|
|
|
|
- }
|
|
|
|
|
|
|
|
- npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
|
|
|
|
- bus_addr) >> IO_PAGE_SHIFT;
|
|
|
|
|
|
+ npages = calc_npages(sglist, nelems);
|
|
|
|
|
|
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
|
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
|
|
|
|