|
@@ -295,7 +295,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
|
|
|
u64 address, size_t size)
|
|
|
{
|
|
|
int s = 0;
|
|
|
- unsigned pages = iommu_nr_pages(address, size);
|
|
|
+ unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
|
|
|
|
|
|
address &= PAGE_MASK;
|
|
|
|
|
@@ -679,8 +679,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
|
|
|
if (iommu->exclusion_start &&
|
|
|
iommu->exclusion_start < dma_dom->aperture_size) {
|
|
|
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
|
|
|
- int pages = iommu_nr_pages(iommu->exclusion_start,
|
|
|
- iommu->exclusion_length);
|
|
|
+ int pages = iommu_num_pages(iommu->exclusion_start,
|
|
|
+ iommu->exclusion_length,
|
|
|
+ PAGE_SIZE);
|
|
|
dma_ops_reserve_addresses(dma_dom, startpage, pages);
|
|
|
}
|
|
|
|
|
@@ -935,7 +936,7 @@ static dma_addr_t __map_single(struct device *dev,
|
|
|
unsigned long align_mask = 0;
|
|
|
int i;
|
|
|
|
|
|
- pages = iommu_nr_pages(paddr, size);
|
|
|
+ pages = iommu_num_pages(paddr, size, PAGE_SIZE);
|
|
|
paddr &= PAGE_MASK;
|
|
|
|
|
|
if (align)
|
|
@@ -980,7 +981,7 @@ static void __unmap_single(struct amd_iommu *iommu,
|
|
|
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
|
|
|
return;
|
|
|
|
|
|
- pages = iommu_nr_pages(dma_addr, size);
|
|
|
+ pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|
|
|
dma_addr &= PAGE_MASK;
|
|
|
start = dma_addr;
|
|
|
|