|
@@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|
|
ret = IRQ_HANDLED;
|
|
|
resume = RESUME_RETRY;
|
|
|
} else {
|
|
|
+ dev_err_ratelimited(smmu->dev,
|
|
|
+ "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
|
|
|
+ iova, fsynr, root_cfg->cbndx);
|
|
|
ret = IRQ_NONE;
|
|
|
resume = RESUME_TERMINATE;
|
|
|
}
|
|
@@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
reg |= SCTLR_E;
|
|
|
#endif
|
|
|
- writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
|
|
|
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
|
|
|
}
|
|
|
|
|
|
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|
@@ -1562,9 +1565,13 @@ static struct iommu_ops arm_smmu_ops = {
|
|
|
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|
|
{
|
|
|
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
|
|
- void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
|
|
|
+ void __iomem *cb_base;
|
|
|
int i = 0;
|
|
|
- u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
|
|
|
+ u32 reg;
|
|
|
+
|
|
|
+ /* Clear Global FSR */
|
|
|
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
|
|
|
+ writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR);
|
|
|
|
|
|
/* Mark all SMRn as invalid and all S2CRn as bypass */
|
|
|
for (i = 0; i < smmu->num_mapping_groups; ++i) {
|
|
@@ -1572,33 +1579,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|
|
writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
|
|
|
}
|
|
|
|
|
|
- /* Make sure all context banks are disabled */
|
|
|
- for (i = 0; i < smmu->num_context_banks; ++i)
|
|
|
- writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
|
|
|
+ /* Make sure all context banks are disabled and clear CB_FSR */
|
|
|
+ for (i = 0; i < smmu->num_context_banks; ++i) {
|
|
|
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
|
|
|
+ writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
|
|
|
+ writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
|
|
|
+ }
|
|
|
|
|
|
/* Invalidate the TLB, just in case */
|
|
|
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
|
|
|
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
|
|
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
|
|
|
|
|
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
|
|
|
+
|
|
|
/* Enable fault reporting */
|
|
|
- scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
|
|
|
+ reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
|
|
|
|
|
|
/* Disable TLB broadcasting. */
|
|
|
- scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
|
|
|
+ reg |= (sCR0_VMIDPNE | sCR0_PTM);
|
|
|
|
|
|
/* Enable client access, but bypass when no mapping is found */
|
|
|
- scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
|
|
|
+ reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
|
|
|
|
|
|
/* Disable forced broadcasting */
|
|
|
- scr0 &= ~sCR0_FB;
|
|
|
+ reg &= ~sCR0_FB;
|
|
|
|
|
|
/* Don't upgrade barriers */
|
|
|
- scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
|
|
|
+ reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
|
|
|
|
|
|
/* Push the button */
|
|
|
arm_smmu_tlb_sync(smmu);
|
|
|
- writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
|
|
|
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0);
|
|
|
}
|
|
|
|
|
|
static int arm_smmu_id_size_to_bits(int size)
|
|
@@ -1703,13 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
|
|
|
smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
|
|
|
|
|
|
- /* Check that we ioremapped enough */
|
|
|
+ /* Check for size mismatch of SMMU address space from mapped region */
|
|
|
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
|
|
|
size *= (smmu->pagesize << 1);
|
|
|
- if (smmu->size < size)
|
|
|
- dev_warn(smmu->dev,
|
|
|
- "device is 0x%lx bytes but only mapped 0x%lx!\n",
|
|
|
- size, smmu->size);
|
|
|
+ if (smmu->size != size)
|
|
|
+ dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs "
|
|
|
+ "from mapped region size (0x%lx)!\n", size, smmu->size);
|
|
|
|
|
|
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
|
|
|
ID1_NUMS2CB_MASK;
|
|
@@ -1784,15 +1795,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
smmu->dev = dev;
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
- if (!res) {
|
|
|
- dev_err(dev, "missing base address/size\n");
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
+ smmu->base = devm_ioremap_resource(dev, res);
|
|
|
+ if (IS_ERR(smmu->base))
|
|
|
+ return PTR_ERR(smmu->base);
|
|
|
smmu->size = resource_size(res);
|
|
|
- smmu->base = devm_request_and_ioremap(dev, res);
|
|
|
- if (!smmu->base)
|
|
|
- return -EADDRNOTAVAIL;
|
|
|
|
|
|
if (of_property_read_u32(dev->of_node, "#global-interrupts",
|
|
|
&smmu->num_global_irqs)) {
|
|
@@ -1807,12 +1813,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|
|
smmu->num_context_irqs++;
|
|
|
}
|
|
|
|
|
|
- if (num_irqs < smmu->num_global_irqs) {
|
|
|
- dev_warn(dev, "found %d interrupts but expected at least %d\n",
|
|
|
- num_irqs, smmu->num_global_irqs);
|
|
|
- smmu->num_global_irqs = num_irqs;
|
|
|
+ if (!smmu->num_context_irqs) {
|
|
|
+ dev_err(dev, "found %d interrupts but expected at least %d\n",
|
|
|
+ num_irqs, smmu->num_global_irqs + 1);
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
- smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
|
|
|
|
|
|
smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
|
|
|
GFP_KERNEL);
|
|
@@ -1936,7 +1941,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
|
|
|
free_irq(smmu->irqs[i], smmu);
|
|
|
|
|
|
/* Turn the thing off */
|
|
|
- writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
|
|
|
+ writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1984,7 +1989,7 @@ static void __exit arm_smmu_exit(void)
|
|
|
return platform_driver_unregister(&arm_smmu_driver);
|
|
|
}
|
|
|
|
|
|
-module_init(arm_smmu_init);
|
|
|
+subsys_initcall(arm_smmu_init);
|
|
|
module_exit(arm_smmu_exit);
|
|
|
|
|
|
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
|