|
@@ -88,6 +88,12 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
|
|
|
u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
|
|
|
u8 pci_cache_line_size;
|
|
|
|
|
|
+/*
|
|
|
+ * If we set up a device for bus mastering, we need to check the latency
|
|
|
+ * timer as certain BIOSes forget to set it properly.
|
|
|
+ */
|
|
|
+unsigned int pcibios_max_latency = 255;
|
|
|
+
|
|
|
/**
|
|
|
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
|
|
|
* @bus: pointer to PCI bus structure to search
|
|
@@ -959,6 +965,7 @@ void pci_restore_state(struct pci_dev *dev)
|
|
|
|
|
|
/* PCI Express register must be restored first */
|
|
|
pci_restore_pcie_state(dev);
|
|
|
+ pci_restore_ats_state(dev);
|
|
|
|
|
|
/*
|
|
|
* The Base Address register should be programmed before the command
|
|
@@ -967,7 +974,7 @@ void pci_restore_state(struct pci_dev *dev)
|
|
|
for (i = 15; i >= 0; i--) {
|
|
|
pci_read_config_dword(dev, i * 4, &val);
|
|
|
if (val != dev->saved_config_space[i]) {
|
|
|
- dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
|
|
|
+ dev_dbg(&dev->dev, "restoring config "
|
|
|
"space at offset %#x (was %#x, writing %#x)\n",
|
|
|
i, val, (int)dev->saved_config_space[i]);
|
|
|
pci_write_config_dword(dev,i * 4,
|
|
@@ -1536,8 +1543,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
|
|
|
- enable ? "enabled" : "disabled");
|
|
|
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2595,6 +2601,33 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
|
|
|
dev->is_busmaster = enable;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * pcibios_set_master - enable PCI bus-mastering for device dev
|
|
|
+ * @dev: the PCI device to enable
|
|
|
+ *
|
|
|
+ * Enables PCI bus-mastering for the device. This is the default
|
|
|
+ * implementation. Architecture specific implementations can override
|
|
|
+ * this if necessary.
|
|
|
+ */
|
|
|
+void __weak pcibios_set_master(struct pci_dev *dev)
|
|
|
+{
|
|
|
+ u8 lat;
|
|
|
+
|
|
|
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
|
|
|
+ if (pci_is_pcie(dev))
|
|
|
+ return;
|
|
|
+
|
|
|
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
|
|
|
+ if (lat < 16)
|
|
|
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
|
|
|
+ else if (lat > pcibios_max_latency)
|
|
|
+ lat = pcibios_max_latency;
|
|
|
+ else
|
|
|
+ return;
|
|
|
+ dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
|
|
|
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* pci_set_master - enables bus-mastering for device dev
|
|
|
* @dev: the PCI device to enable
|
|
@@ -2767,6 +2800,116 @@ pci_intx(struct pci_dev *pdev, int enable)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * pci_intx_mask_supported - probe for INTx masking support
|
|
|
+ * @pdev: the PCI device to operate on
|
|
|
+ *
|
|
|
+ * Check if the device dev support INTx masking via the config space
|
|
|
+ * command word.
|
|
|
+ */
|
|
|
+bool pci_intx_mask_supported(struct pci_dev *dev)
|
|
|
+{
|
|
|
+ bool mask_supported = false;
|
|
|
+ u16 orig, new;
|
|
|
+
|
|
|
+ pci_cfg_access_lock(dev);
|
|
|
+
|
|
|
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
|
|
|
+ pci_write_config_word(dev, PCI_COMMAND,
|
|
|
+ orig ^ PCI_COMMAND_INTX_DISABLE);
|
|
|
+ pci_read_config_word(dev, PCI_COMMAND, &new);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * There's no way to protect against hardware bugs or detect them
|
|
|
+ * reliably, but as long as we know what the value should be, let's
|
|
|
+ * go ahead and check it.
|
|
|
+ */
|
|
|
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
|
|
|
+ dev_err(&dev->dev, "Command register changed from "
|
|
|
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
|
|
|
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
|
|
|
+ mask_supported = true;
|
|
|
+ pci_write_config_word(dev, PCI_COMMAND, orig);
|
|
|
+ }
|
|
|
+
|
|
|
+ pci_cfg_access_unlock(dev);
|
|
|
+ return mask_supported;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
|
|
|
+
|
|
|
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
|
|
|
+{
|
|
|
+ struct pci_bus *bus = dev->bus;
|
|
|
+ bool mask_updated = true;
|
|
|
+ u32 cmd_status_dword;
|
|
|
+ u16 origcmd, newcmd;
|
|
|
+ unsigned long flags;
|
|
|
+ bool irq_pending;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We do a single dword read to retrieve both command and status.
|
|
|
+ * Document assumptions that make this possible.
|
|
|
+ */
|
|
|
+ BUILD_BUG_ON(PCI_COMMAND % 4);
|
|
|
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&pci_lock, flags);
|
|
|
+
|
|
|
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
|
|
|
+
|
|
|
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check interrupt status register to see whether our device
|
|
|
+ * triggered the interrupt (when masking) or the next IRQ is
|
|
|
+ * already pending (when unmasking).
|
|
|
+ */
|
|
|
+ if (mask != irq_pending) {
|
|
|
+ mask_updated = false;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
+ origcmd = cmd_status_dword;
|
|
|
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
|
|
|
+ if (mask)
|
|
|
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
|
|
|
+ if (newcmd != origcmd)
|
|
|
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
|
|
|
+
|
|
|
+done:
|
|
|
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
|
|
|
+
|
|
|
+ return mask_updated;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
|
|
|
+ * @pdev: the PCI device to operate on
|
|
|
+ *
|
|
|
+ * Check if the device dev has its INTx line asserted, mask it and
|
|
|
+ * return true in that case. False is returned if not interrupt was
|
|
|
+ * pending.
|
|
|
+ */
|
|
|
+bool pci_check_and_mask_intx(struct pci_dev *dev)
|
|
|
+{
|
|
|
+ return pci_check_and_set_intx_mask(dev, true);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
|
|
|
+
|
|
|
+/**
|
|
|
+ * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
|
|
|
+ * @pdev: the PCI device to operate on
|
|
|
+ *
|
|
|
+ * Check if the device dev has its INTx line asserted, unmask it if not
|
|
|
+ * and return true. False is returned and the mask remains active if
|
|
|
+ * there was still an interrupt pending.
|
|
|
+ */
|
|
|
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
|
|
|
+{
|
|
|
+ return pci_check_and_set_intx_mask(dev, false);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
|
|
|
+
|
|
|
/**
|
|
|
* pci_msi_off - disables any msi or msix capabilities
|
|
|
* @dev: the PCI device to operate on
|
|
@@ -2965,7 +3108,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
|
|
|
might_sleep();
|
|
|
|
|
|
if (!probe) {
|
|
|
- pci_block_user_cfg_access(dev);
|
|
|
+ pci_cfg_access_lock(dev);
|
|
|
/* block PM suspend, driver probe, etc. */
|
|
|
device_lock(&dev->dev);
|
|
|
}
|
|
@@ -2990,7 +3133,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
|
|
|
done:
|
|
|
if (!probe) {
|
|
|
device_unlock(&dev->dev);
|
|
|
- pci_unblock_user_cfg_access(dev);
|
|
|
+ pci_cfg_access_unlock(dev);
|
|
|
}
|
|
|
|
|
|
return rc;
|