|
@@ -344,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
|
|
u64 __pte, *pte, *page;
|
|
u64 __pte, *pte, *page;
|
|
|
|
|
|
bus_addr = PAGE_ALIGN(bus_addr);
|
|
bus_addr = PAGE_ALIGN(bus_addr);
|
|
- phys_addr = PAGE_ALIGN(bus_addr);
|
|
|
|
|
|
+ phys_addr = PAGE_ALIGN(phys_addr);
|
|
|
|
|
|
/* only support 512GB address spaces for now */
|
|
/* only support 512GB address spaces for now */
|
|
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
|
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
|
@@ -600,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
p2 = IOMMU_PTE_PAGE(p1[i]);
|
|
p2 = IOMMU_PTE_PAGE(p1[i]);
|
|
- for (j = 0; j < 512; ++i) {
|
|
|
|
|
|
+ for (j = 0; j < 512; ++j) {
|
|
if (!IOMMU_PTE_PRESENT(p2[j]))
|
|
if (!IOMMU_PTE_PRESENT(p2[j]))
|
|
continue;
|
|
continue;
|
|
p3 = IOMMU_PTE_PAGE(p2[j]);
|
|
p3 = IOMMU_PTE_PAGE(p2[j]);
|
|
@@ -910,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
|
|
if (address >= dom->aperture_size)
|
|
if (address >= dom->aperture_size)
|
|
return;
|
|
return;
|
|
|
|
|
|
- WARN_ON(address & 0xfffULL || address > dom->aperture_size);
|
|
|
|
|
|
+ WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
|
|
|
|
|
|
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
|
|
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
|
|
pte += IOMMU_PTE_L0_INDEX(address);
|
|
pte += IOMMU_PTE_L0_INDEX(address);
|
|
@@ -922,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
|
|
|
|
|
|
/*
|
|
/*
|
|
* This function contains common code for mapping of a physically
|
|
* This function contains common code for mapping of a physically
|
|
- * contiguous memory region into DMA address space. It is uses by all
|
|
|
|
- * mapping functions provided by this IOMMU driver.
|
|
|
|
|
|
+ * contiguous memory region into DMA address space. It is used by all
|
|
|
|
+ * mapping functions provided with this IOMMU driver.
|
|
* Must be called with the domain lock held.
|
|
* Must be called with the domain lock held.
|
|
*/
|
|
*/
|
|
static dma_addr_t __map_single(struct device *dev,
|
|
static dma_addr_t __map_single(struct device *dev,
|
|
@@ -983,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
|
|
dma_addr_t i, start;
|
|
dma_addr_t i, start;
|
|
unsigned int pages;
|
|
unsigned int pages;
|
|
|
|
|
|
- if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
|
|
|
|
|
|
+ if ((dma_addr == bad_dma_address) ||
|
|
|
|
+ (dma_addr + size > dma_dom->aperture_size))
|
|
return;
|
|
return;
|
|
|
|
|
|
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|
|
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|