|
@@ -426,9 +426,30 @@ static void tg3_enable_ints(struct tg3 *tp)
|
|
|
tg3_cond_int(tp);
|
|
|
}
|
|
|
|
|
|
+static inline unsigned int tg3_has_work(struct tg3 *tp)
|
|
|
+{
|
|
|
+ struct tg3_hw_status *sblk = tp->hw_status;
|
|
|
+ unsigned int work_exists = 0;
|
|
|
+
|
|
|
+ /* check for phy events */
|
|
|
+ if (!(tp->tg3_flags &
|
|
|
+ (TG3_FLAG_USE_LINKCHG_REG |
|
|
|
+ TG3_FLAG_POLL_SERDES))) {
|
|
|
+ if (sblk->status & SD_STATUS_LINK_CHG)
|
|
|
+ work_exists = 1;
|
|
|
+ }
|
|
|
+ /* check for RX/TX work to do */
|
|
|
+ if (sblk->idx[0].tx_consumer != tp->tx_cons ||
|
|
|
+ sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
|
|
|
+ work_exists = 1;
|
|
|
+
|
|
|
+ return work_exists;
|
|
|
+}
|
|
|
+
|
|
|
/* tg3_restart_ints
|
|
|
- * similar to tg3_enable_ints, but it can return without flushing the
|
|
|
- * PIO write which reenables interrupts
|
|
|
+ * similar to tg3_enable_ints, but it accurately determines whether there
|
|
|
+ * is new work pending and can return without flushing the PIO write
|
|
|
+ * which reenables interrupts
|
|
|
*/
|
|
|
static void tg3_restart_ints(struct tg3 *tp)
|
|
|
{
|
|
@@ -437,7 +458,9 @@ static void tg3_restart_ints(struct tg3 *tp)
|
|
|
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
|
|
|
mmiowb();
|
|
|
|
|
|
- tg3_cond_int(tp);
|
|
|
+ if (tg3_has_work(tp))
|
|
|
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
|
|
|
+ (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
|
|
|
}
|
|
|
|
|
|
static inline void tg3_netif_stop(struct tg3 *tp)
|
|
@@ -2891,26 +2914,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
|
|
|
return (done ? 0 : 1);
|
|
|
}
|
|
|
|
|
|
-static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
|
|
|
-{
|
|
|
- struct tg3_hw_status *sblk = tp->hw_status;
|
|
|
- unsigned int work_exists = 0;
|
|
|
-
|
|
|
- /* check for phy events */
|
|
|
- if (!(tp->tg3_flags &
|
|
|
- (TG3_FLAG_USE_LINKCHG_REG |
|
|
|
- TG3_FLAG_POLL_SERDES))) {
|
|
|
- if (sblk->status & SD_STATUS_LINK_CHG)
|
|
|
- work_exists = 1;
|
|
|
- }
|
|
|
- /* check for RX/TX work to do */
|
|
|
- if (sblk->idx[0].tx_consumer != tp->tx_cons ||
|
|
|
- sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
|
|
|
- work_exists = 1;
|
|
|
-
|
|
|
- return work_exists;
|
|
|
-}
|
|
|
-
|
|
|
/* MSI ISR - No need to check for interrupt sharing and no need to
|
|
|
* flush status block and interrupt mailbox. PCI ordering rules
|
|
|
* guarantee that MSI will arrive after the status block.
|
|
@@ -2934,7 +2937,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
|
|
|
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
|
|
|
sblk->status &= ~SD_STATUS_UPDATED;
|
|
|
|
|
|
- if (likely(tg3_has_work(dev, tp)))
|
|
|
+ if (likely(tg3_has_work(tp)))
|
|
|
netif_rx_schedule(dev); /* schedule NAPI poll */
|
|
|
else {
|
|
|
/* no work, re-enable interrupts
|
|
@@ -2981,7 +2984,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
|
|
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
|
|
|
sblk->status &= ~SD_STATUS_UPDATED;
|
|
|
|
|
|
- if (likely(tg3_has_work(dev, tp)))
|
|
|
+ if (likely(tg3_has_work(tp)))
|
|
|
netif_rx_schedule(dev); /* schedule NAPI poll */
|
|
|
else {
|
|
|
/* no work, shared interrupt perhaps? re-enable
|