|
@@ -3561,54 +3561,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
|
int handled = 0;
|
|
int handled = 0;
|
|
int status;
|
|
int status;
|
|
|
|
|
|
|
|
+ /* loop handling interrupts until we have no new ones or
|
|
|
|
+ * we hit a invalid/hotplug case.
|
|
|
|
+ */
|
|
status = RTL_R16(IntrStatus);
|
|
status = RTL_R16(IntrStatus);
|
|
|
|
+ while (status && status != 0xffff) {
|
|
|
|
+ handled = 1;
|
|
|
|
|
|
- /* hotplug/major error/no more work/shared irq */
|
|
|
|
- if ((status == 0xffff) || !status)
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- handled = 1;
|
|
|
|
|
|
+ /* Handle all of the error cases first. These will reset
|
|
|
|
+ * the chip, so just exit the loop.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(!netif_running(dev))) {
|
|
|
|
+ rtl8169_asic_down(ioaddr);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
- if (unlikely(!netif_running(dev))) {
|
|
|
|
- rtl8169_asic_down(ioaddr);
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Work around for rx fifo overflow */
|
|
|
|
+ if (unlikely(status & RxFIFOOver) &&
|
|
|
|
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
|
|
|
|
+ netif_stop_queue(dev);
|
|
|
|
+ rtl8169_tx_timeout(dev);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
- status &= tp->intr_mask;
|
|
|
|
- RTL_W16(IntrStatus,
|
|
|
|
- (status & RxFIFOOver) ? (status | RxOverflow) : status);
|
|
|
|
|
|
+ if (unlikely(status & SYSErr)) {
|
|
|
|
+ rtl8169_pcierr_interrupt(dev);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
- if (!(status & tp->intr_event))
|
|
|
|
- goto out;
|
|
|
|
|
|
+ if (status & LinkChg)
|
|
|
|
+ rtl8169_check_link_status(dev, tp, ioaddr);
|
|
|
|
|
|
- /* Work around for rx fifo overflow */
|
|
|
|
- if (unlikely(status & RxFIFOOver) &&
|
|
|
|
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
|
|
|
|
- netif_stop_queue(dev);
|
|
|
|
- rtl8169_tx_timeout(dev);
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
|
|
+ /* We need to see the lastest version of tp->intr_mask to
|
|
|
|
+ * avoid ignoring an MSI interrupt and having to wait for
|
|
|
|
+ * another event which may never come.
|
|
|
|
+ */
|
|
|
|
+ smp_rmb();
|
|
|
|
+ if (status & tp->intr_mask & tp->napi_event) {
|
|
|
|
+ RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
|
|
|
|
+ tp->intr_mask = ~tp->napi_event;
|
|
|
|
+
|
|
|
|
+ if (likely(napi_schedule_prep(&tp->napi)))
|
|
|
|
+ __napi_schedule(&tp->napi);
|
|
|
|
+ else if (netif_msg_intr(tp)) {
|
|
|
|
+ printk(KERN_INFO "%s: interrupt %04x in poll\n",
|
|
|
|
+ dev->name, status);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
- if (unlikely(status & SYSErr)) {
|
|
|
|
- rtl8169_pcierr_interrupt(dev);
|
|
|
|
- goto out;
|
|
|
|
|
|
+ /* We only get a new MSI interrupt when all active irq
|
|
|
|
+ * sources on the chip have been acknowledged. So, ack
|
|
|
|
+ * everything we've seen and check if new sources have become
|
|
|
|
+ * active to avoid blocking all interrupts from the chip.
|
|
|
|
+ */
|
|
|
|
+ RTL_W16(IntrStatus,
|
|
|
|
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
|
|
|
|
+ status = RTL_R16(IntrStatus);
|
|
}
|
|
}
|
|
|
|
|
|
- if (status & LinkChg)
|
|
|
|
- rtl8169_check_link_status(dev, tp, ioaddr);
|
|
|
|
-
|
|
|
|
- if (status & tp->napi_event) {
|
|
|
|
- RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
|
|
|
|
- tp->intr_mask = ~tp->napi_event;
|
|
|
|
-
|
|
|
|
- if (likely(napi_schedule_prep(&tp->napi)))
|
|
|
|
- __napi_schedule(&tp->napi);
|
|
|
|
- else if (netif_msg_intr(tp)) {
|
|
|
|
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
|
|
|
|
- dev->name, status);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-out:
|
|
|
|
return IRQ_RETVAL(handled);
|
|
return IRQ_RETVAL(handled);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3624,13 +3634,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
if (work_done < budget) {
|
|
if (work_done < budget) {
|
|
napi_complete(napi);
|
|
napi_complete(napi);
|
|
- tp->intr_mask = 0xffff;
|
|
|
|
- /*
|
|
|
|
- * 20040426: the barrier is not strictly required but the
|
|
|
|
- * behavior of the irq handler could be less predictable
|
|
|
|
- * without it. Btw, the lack of flush for the posted pci
|
|
|
|
- * write is safe - FR
|
|
|
|
|
|
+
|
|
|
|
+ /* We need for force the visibility of tp->intr_mask
|
|
|
|
+ * for other CPUs, as we can loose an MSI interrupt
|
|
|
|
+ * and potentially wait for a retransmit timeout if we don't.
|
|
|
|
+ * The posted write to IntrMask is safe, as it will
|
|
|
|
+ * eventually make it to the chip and we won't loose anything
|
|
|
|
+ * until it does.
|
|
*/
|
|
*/
|
|
|
|
+ tp->intr_mask = 0xffff;
|
|
smp_wmb();
|
|
smp_wmb();
|
|
RTL_W16(IntrMask, tp->intr_event);
|
|
RTL_W16(IntrMask, tp->intr_event);
|
|
}
|
|
}
|