|
@@ -37,6 +37,9 @@ struct command {
|
|
|
u32 data[4];
|
|
|
};
|
|
|
|
|
|
+static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
|
|
+ struct unity_map_entry *e);
|
|
|
+
|
|
|
static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
|
|
|
{
|
|
|
u32 tail, head;
|
|
@@ -144,3 +147,122 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int iommu_map(struct protection_domain *dom,
|
|
|
+ unsigned long bus_addr,
|
|
|
+ unsigned long phys_addr,
|
|
|
+ int prot)
|
|
|
+{
|
|
|
+ u64 __pte, *pte, *page;
|
|
|
+
|
|
|
+ bus_addr = PAGE_ALIGN(bus_addr);
|
|
|
+ phys_addr = PAGE_ALIGN(bus_addr);
|
|
|
+
|
|
|
+ /* only support 512GB address spaces for now */
|
|
|
+ if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
|
|
|
+
|
|
|
+ if (!IOMMU_PTE_PRESENT(*pte)) {
|
|
|
+ page = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
|
+ if (!page)
|
|
|
+ return -ENOMEM;
|
|
|
+ *pte = IOMMU_L2_PDE(virt_to_phys(page));
|
|
|
+ }
|
|
|
+
|
|
|
+ pte = IOMMU_PTE_PAGE(*pte);
|
|
|
+ pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
|
|
|
+
|
|
|
+ if (!IOMMU_PTE_PRESENT(*pte)) {
|
|
|
+ page = (u64 *)get_zeroed_page(GFP_KERNEL);
|
|
|
+ if (!page)
|
|
|
+ return -ENOMEM;
|
|
|
+ *pte = IOMMU_L1_PDE(virt_to_phys(page));
|
|
|
+ }
|
|
|
+
|
|
|
+ pte = IOMMU_PTE_PAGE(*pte);
|
|
|
+ pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
|
|
|
+
|
|
|
+ if (IOMMU_PTE_PRESENT(*pte))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ __pte = phys_addr | IOMMU_PTE_P;
|
|
|
+ if (prot & IOMMU_PROT_IR)
|
|
|
+ __pte |= IOMMU_PTE_IR;
|
|
|
+ if (prot & IOMMU_PROT_IW)
|
|
|
+ __pte |= IOMMU_PTE_IW;
|
|
|
+
|
|
|
+ *pte = __pte;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int iommu_for_unity_map(struct amd_iommu *iommu,
|
|
|
+ struct unity_map_entry *entry)
|
|
|
+{
|
|
|
+ u16 bdf, i;
|
|
|
+
|
|
|
+ for (i = entry->devid_start; i <= entry->devid_end; ++i) {
|
|
|
+ bdf = amd_iommu_alias_table[i];
|
|
|
+ if (amd_iommu_rlookup_table[bdf] == iommu)
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int iommu_init_unity_mappings(struct amd_iommu *iommu)
|
|
|
+{
|
|
|
+ struct unity_map_entry *entry;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
|
|
+ if (!iommu_for_unity_map(iommu, entry))
|
|
|
+ continue;
|
|
|
+ ret = dma_ops_unity_map(iommu->default_dom, entry);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
|
|
|
+ struct unity_map_entry *e)
|
|
|
+{
|
|
|
+ u64 addr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ for (addr = e->address_start; addr < e->address_end;
|
|
|
+ addr += PAGE_SIZE) {
|
|
|
+ ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ /*
|
|
|
+ * if unity mapping is in aperture range mark the page
|
|
|
+ * as allocated in the aperture
|
|
|
+ */
|
|
|
+ if (addr < dma_dom->aperture_size)
|
|
|
+ __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
|
|
|
+ u16 devid)
|
|
|
+{
|
|
|
+ struct unity_map_entry *e;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ list_for_each_entry(e, &amd_iommu_unity_map, list) {
|
|
|
+ if (!(devid >= e->devid_start && devid <= e->devid_end))
|
|
|
+ continue;
|
|
|
+ ret = dma_ops_unity_map(dma_dom, e);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|