|
@@ -330,6 +330,14 @@ parse_dmar_table(void)
|
|
|
entry_header = (struct acpi_dmar_header *)(dmar + 1);
|
|
|
while (((unsigned long)entry_header) <
|
|
|
(((unsigned long)dmar) + dmar_tbl->length)) {
|
|
|
+ /* Avoid looping forever on bad ACPI tables */
|
|
|
+ if (entry_header->length == 0) {
|
|
|
+ printk(KERN_WARNING PREFIX
|
|
|
+ "Invalid 0-length structure\n");
|
|
|
+ ret = -EINVAL;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
dmar_table_print_dmar_entry(entry_header);
|
|
|
|
|
|
switch (entry_header->type) {
|
|
@@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
|
int map_size;
|
|
|
u32 ver;
|
|
|
static int iommu_allocated = 0;
|
|
|
- int agaw;
|
|
|
+ int agaw = 0;
|
|
|
|
|
|
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
|
|
if (!iommu)
|
|
@@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
|
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
|
|
|
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
|
|
|
|
|
|
+#ifdef CONFIG_DMAR
|
|
|
agaw = iommu_calculate_agaw(iommu);
|
|
|
if (agaw < 0) {
|
|
|
printk(KERN_ERR
|
|
@@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|
|
iommu->seq_id);
|
|
|
goto error;
|
|
|
}
|
|
|
+#endif
|
|
|
iommu->agaw = agaw;
|
|
|
|
|
|
/* the registers might be more than one page */
|
|
@@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int qi_check_fault(struct intel_iommu *iommu, int index)
|
|
|
+{
|
|
|
+ u32 fault;
|
|
|
+ int head;
|
|
|
+ struct q_inval *qi = iommu->qi;
|
|
|
+ int wait_index = (index + 1) % QI_LENGTH;
|
|
|
+
|
|
|
+ fault = readl(iommu->reg + DMAR_FSTS_REG);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If IQE happens, the head points to the descriptor associated
|
|
|
+ * with the error. No new descriptors are fetched until the IQE
|
|
|
+ * is cleared.
|
|
|
+ */
|
|
|
+ if (fault & DMA_FSTS_IQE) {
|
|
|
+ head = readl(iommu->reg + DMAR_IQH_REG);
|
|
|
+ if ((head >> 4) == index) {
|
|
|
+ memcpy(&qi->desc[index], &qi->desc[wait_index],
|
|
|
+ sizeof(struct qi_desc));
|
|
|
+ __iommu_flush_cache(iommu, &qi->desc[index],
|
|
|
+ sizeof(struct qi_desc));
|
|
|
+ writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Submit the queued invalidation descriptor to the remapping
|
|
|
* hardware unit and wait for its completion.
|
|
|
*/
|
|
|
-void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|
|
+int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|
|
{
|
|
|
+ int rc = 0;
|
|
|
struct q_inval *qi = iommu->qi;
|
|
|
struct qi_desc *hw, wait_desc;
|
|
|
int wait_index, index;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (!qi)
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
hw = qi->desc;
|
|
|
|
|
@@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|
|
|
|
|
hw[index] = *desc;
|
|
|
|
|
|
- wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
|
|
|
+ wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
|
|
|
+ QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
|
|
|
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
|
|
|
|
|
|
hw[wait_index] = wait_desc;
|
|
@@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|
|
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
|
|
|
qi->free_cnt -= 2;
|
|
|
|
|
|
- spin_lock(&iommu->register_lock);
|
|
|
/*
|
|
|
* update the HW tail register indicating the presence of
|
|
|
* new descriptors.
|
|
|
*/
|
|
|
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
|
|
|
- spin_unlock(&iommu->register_lock);
|
|
|
|
|
|
while (qi->desc_status[wait_index] != QI_DONE) {
|
|
|
/*
|
|
@@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
|
|
|
* a deadlock where the interrupt context can wait indefinitely
|
|
|
* for free slots in the queue.
|
|
|
*/
|
|
|
+ rc = qi_check_fault(iommu, index);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
+
|
|
|
spin_unlock(&qi->q_lock);
|
|
|
cpu_relax();
|
|
|
spin_lock(&qi->q_lock);
|
|
|
}
|
|
|
-
|
|
|
- qi->desc_status[index] = QI_DONE;
|
|
|
+out:
|
|
|
+ qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
|
|
|
|
|
|
reclaim_free_desc(qi);
|
|
|
spin_unlock_irqrestore(&qi->q_lock, flags);
|
|
|
+
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu)
|
|
|
desc.low = QI_IEC_TYPE;
|
|
|
desc.high = 0;
|
|
|
|
|
|
+ /* should never fail */
|
|
|
qi_submit_sync(&desc, iommu);
|
|
|
}
|
|
|
|
|
|
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
|
|
u64 type, int non_present_entry_flush)
|
|
|
{
|
|
|
-
|
|
|
struct qi_desc desc;
|
|
|
|
|
|
if (non_present_entry_flush) {
|
|
@@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
|
|
|
| QI_CC_GRAN(type) | QI_CC_TYPE;
|
|
|
desc.high = 0;
|
|
|
|
|
|
- qi_submit_sync(&desc, iommu);
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
+ return qi_submit_sync(&desc, iommu);
|
|
|
}
|
|
|
|
|
|
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|
@@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
|
|
|
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
|
|
|
| QI_IOTLB_AM(size_order);
|
|
|
|
|
|
- qi_submit_sync(&desc, iommu);
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
+ return qi_submit_sync(&desc, iommu);
|
|
|
}
|
|
|
|
|
|
/*
|