|
@@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
* Slow phase with lock held.
|
|
|
*/
|
|
|
|
|
|
- disable_irq_nosync(dev->irq);
|
|
|
+ disable_irq_nosync_lockdep(dev->irq);
|
|
|
|
|
|
spin_lock(&ei_local->page_lock);
|
|
|
|
|
@@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
netif_stop_queue(dev);
|
|
|
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
|
|
|
spin_unlock(&ei_local->page_lock);
|
|
|
- enable_irq(dev->irq);
|
|
|
+ enable_irq_lockdep(dev->irq);
|
|
|
ei_local->stat.tx_errors++;
|
|
|
return 1;
|
|
|
}
|
|
@@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
|
|
|
|
|
|
spin_unlock(&ei_local->page_lock);
|
|
|
- enable_irq(dev->irq);
|
|
|
+ enable_irq_lockdep(dev->irq);
|
|
|
|
|
|
dev_kfree_skb (skb);
|
|
|
ei_local->stat.tx_bytes += send_length;
|
|
@@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
void ei_poll(struct net_device *dev)
|
|
|
{
|
|
|
- disable_irq(dev->irq);
|
|
|
+ disable_irq_lockdep(dev->irq);
|
|
|
ei_interrupt(dev->irq, dev, NULL);
|
|
|
- enable_irq(dev->irq);
|
|
|
+ enable_irq_lockdep(dev->irq);
|
|
|
}
|
|
|
#endif
|
|
|
|