|
@@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
|
|
|
rp->tx_skbuff[entry]->len,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
}
|
|
|
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
|
|
|
+ dev_kfree_skb(rp->tx_skbuff[entry]);
|
|
|
rp->tx_skbuff[entry] = NULL;
|
|
|
entry = (++rp->dirty_tx) % TX_RING_SIZE;
|
|
|
}
|
|
@@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
|
|
|
if (intr_status & IntrPCIErr)
|
|
|
netif_warn(rp, hw, dev, "PCI error\n");
|
|
|
|
|
|
- napi_disable(&rp->napi);
|
|
|
- rhine_irq_disable(rp);
|
|
|
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
|
|
|
- napi_enable(&rp->napi);
|
|
|
- napi_schedule(&rp->napi);
|
|
|
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
|
|
|
|
|
|
out_unlock:
|
|
|
mutex_unlock(&rp->task_lock);
|