|
@@ -127,17 +127,23 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
|
|
|
* reliably as devices without an INTx disable bit will then generate a
|
|
|
* level IRQ which will never be cleared.
|
|
|
*/
|
|
|
-static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
|
|
+static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
|
|
{
|
|
|
u32 mask_bits = desc->masked;
|
|
|
|
|
|
if (!desc->msi_attrib.maskbit)
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
|
|
|
mask_bits &= ~mask;
|
|
|
mask_bits |= flag;
|
|
|
pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
|
|
|
- desc->masked = mask_bits;
|
|
|
+
|
|
|
+ return mask_bits;
|
|
|
+}
|
|
|
+
|
|
|
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
|
|
+{
|
|
|
+ desc->masked = __msi_mask_irq(desc, mask, flag);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -147,15 +153,21 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
|
|
* file. This saves a few milliseconds when initialising devices with lots
|
|
|
* of MSI-X interrupts.
|
|
|
*/
|
|
|
-static void msix_mask_irq(struct msi_desc *desc, u32 flag)
|
|
|
+static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
|
|
|
{
|
|
|
u32 mask_bits = desc->masked;
|
|
|
unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
|
|
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
|
|
|
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
|
|
|
mask_bits &= ~1;
|
|
|
mask_bits |= flag;
|
|
|
writel(mask_bits, desc->mask_base + offset);
|
|
|
- desc->masked = mask_bits;
|
|
|
+
|
|
|
+ return mask_bits;
|
|
|
+}
|
|
|
+
|
|
|
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
|
|
|
+{
|
|
|
+ desc->masked = __msix_mask_irq(desc, flag);
|
|
|
}
|
|
|
|
|
|
static void msi_set_mask_bit(unsigned irq, u32 flag)
|
|
@@ -188,9 +200,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
|
|
void __iomem *base = entry->mask_base +
|
|
|
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
|
|
|
|
|
- msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
|
|
|
- msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
|
|
|
- msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
|
|
|
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
|
|
|
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
|
|
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
|
|
|
} else {
|
|
|
struct pci_dev *dev = entry->dev;
|
|
|
int pos = entry->msi_attrib.pos;
|
|
@@ -225,11 +237,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
|
|
base = entry->mask_base +
|
|
|
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
|
|
|
|
|
|
- writel(msg->address_lo,
|
|
|
- base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
|
|
|
- writel(msg->address_hi,
|
|
|
- base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
|
|
|
- writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
|
|
|
+ writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
|
|
|
+ writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
|
|
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
|
|
|
} else {
|
|
|
struct pci_dev *dev = entry->dev;
|
|
|
int pos = entry->msi_attrib.pos;
|
|
@@ -385,6 +395,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
|
|
/* Configure MSI capability structure */
|
|
|
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
|
|
if (ret) {
|
|
|
+ msi_mask_irq(entry, mask, ~mask);
|
|
|
msi_free_irqs(dev);
|
|
|
return ret;
|
|
|
}
|
|
@@ -439,8 +450,14 @@ static int msix_capability_init(struct pci_dev *dev,
|
|
|
|
|
|
for (i = 0; i < nvec; i++) {
|
|
|
entry = alloc_msi_entry(dev);
|
|
|
- if (!entry)
|
|
|
- break;
|
|
|
+ if (!entry) {
|
|
|
+ if (!i)
|
|
|
+ iounmap(base);
|
|
|
+ else
|
|
|
+ msi_free_irqs(dev);
|
|
|
+ /* No enough memory. Don't try again */
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
j = entries[i].entry;
|
|
|
entry->msi_attrib.is_msix = 1;
|
|
@@ -487,7 +504,7 @@ static int msix_capability_init(struct pci_dev *dev,
|
|
|
set_irq_msi(entry->irq, entry);
|
|
|
j = entries[i].entry;
|
|
|
entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
|
|
|
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
|
|
|
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
|
|
|
msix_mask_irq(entry, 1);
|
|
|
i++;
|
|
|
}
|
|
@@ -611,9 +628,11 @@ void pci_msi_shutdown(struct pci_dev *dev)
|
|
|
pci_intx_for_msi(dev, 1);
|
|
|
dev->msi_enabled = 0;
|
|
|
|
|
|
+ /* Return the device with MSI unmasked as initial states */
|
|
|
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
|
|
|
mask = msi_capable_mask(ctrl);
|
|
|
- msi_mask_irq(desc, mask, ~mask);
|
|
|
+ /* Keep cached state to be restored */
|
|
|
+ __msi_mask_irq(desc, mask, ~mask);
|
|
|
|
|
|
/* Restore dev->irq to its default pin-assertion irq */
|
|
|
dev->irq = desc->msi_attrib.default_irq;
|
|
@@ -653,7 +672,6 @@ static int msi_free_irqs(struct pci_dev* dev)
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
|
|
|
if (entry->msi_attrib.is_msix) {
|
|
|
- msix_mask_irq(entry, 1);
|
|
|
if (list_is_last(&entry->list, &dev->msi_list))
|
|
|
iounmap(entry->mask_base);
|
|
|
}
|
|
@@ -741,9 +759,17 @@ static void msix_free_all_irqs(struct pci_dev *dev)
|
|
|
|
|
|
void pci_msix_shutdown(struct pci_dev* dev)
|
|
|
{
|
|
|
+ struct msi_desc *entry;
|
|
|
+
|
|
|
if (!pci_msi_enable || !dev || !dev->msix_enabled)
|
|
|
return;
|
|
|
|
|
|
+ /* Return the device with MSI-X masked as initial states */
|
|
|
+ list_for_each_entry(entry, &dev->msi_list, list) {
|
|
|
+ /* Keep cached states to be restored */
|
|
|
+ __msix_mask_irq(entry, 1);
|
|
|
+ }
|
|
|
+
|
|
|
msix_set_enable(dev, 0);
|
|
|
pci_intx_for_msi(dev, 1);
|
|
|
dev->msix_enabled = 0;
|