|
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
|
|
|
|
|
spin_lock_irqsave(&iommu->lock, flags);
|
|
|
ret = __iommu_queue_command(iommu, cmd);
|
|
|
+ if (!ret)
|
|
|
+ iommu->need_sync = 1;
|
|
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
|
|
|
|
return ret;
|
|
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
|
|
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
|
|
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
|
|
|
|
|
|
- iommu->need_sync = 0;
|
|
|
-
|
|
|
spin_lock_irqsave(&iommu->lock, flags);
|
|
|
|
|
|
+ if (!iommu->need_sync)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ iommu->need_sync = 0;
|
|
|
+
|
|
|
ret = __iommu_queue_command(iommu, &cmd);
|
|
|
|
|
|
if (ret)
|
|
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
|
|
|
|
|
ret = iommu_queue_command(iommu, &cmd);
|
|
|
|
|
|
- iommu->need_sync = 1;
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
|
|
|
|
|
ret = iommu_queue_command(iommu, &cmd);
|
|
|
|
|
|
- iommu->need_sync = 1;
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
|
|
|
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
|
|
|
|
|
iommu_queue_inv_dev_entry(iommu, devid);
|
|
|
-
|
|
|
- iommu->need_sync = 1;
|
|
|
}
|
|
|
|
|
|
/*****************************************************************************
|
|
@@ -1034,8 +1033,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
|
|
|
if (addr == bad_dma_address)
|
|
|
goto out;
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
@@ -1063,8 +1061,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|
|
|
|
|
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
}
|
|
@@ -1130,8 +1127,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
goto unmap;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
@@ -1176,8 +1172,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
s->dma_address = s->dma_length = 0;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
}
|
|
@@ -1228,8 +1223,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
@@ -1260,8 +1254,7 @@ static void free_coherent(struct device *dev, size_t size,
|
|
|
|
|
|
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
|
|
|
|
|
- if (unlikely(iommu->need_sync))
|
|
|
- iommu_completion_wait(iommu);
|
|
|
+ iommu_completion_wait(iommu);
|
|
|
|
|
|
spin_unlock_irqrestore(&domain->lock, flags);
|
|
|
|