|
@@ -63,9 +63,7 @@ static void ri_tasklet(unsigned long dev)
|
|
|
txq = netdev_get_tx_queue(_dev, 0);
|
|
|
if ((skb = skb_peek(&dp->tq)) == NULL) {
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
|
- while ((skb = skb_dequeue(&dp->rq)) != NULL) {
|
|
|
- skb_queue_tail(&dp->tq, skb);
|
|
|
- }
|
|
|
+ skb_queue_splice_tail_init(&dp->rq, &dp->tq);
|
|
|
__netif_tx_unlock(txq);
|
|
|
} else {
|
|
|
/* reschedule */
|
|
@@ -163,7 +161,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
netif_stop_queue(dev);
|
|
|
}
|
|
|
|
|
|
- skb_queue_tail(&dp->rq, skb);
|
|
|
+ __skb_queue_tail(&dp->rq, skb);
|
|
|
if (!dp->tasklet_pending) {
|
|
|
dp->tasklet_pending = 1;
|
|
|
tasklet_schedule(&dp->ifb_tasklet);
|
|
@@ -178,8 +176,8 @@ static int ifb_close(struct net_device *dev)
|
|
|
|
|
|
tasklet_kill(&dp->ifb_tasklet);
|
|
|
netif_stop_queue(dev);
|
|
|
- skb_queue_purge(&dp->rq);
|
|
|
- skb_queue_purge(&dp->tq);
|
|
|
+ __skb_queue_purge(&dp->rq);
|
|
|
+ __skb_queue_purge(&dp->tq);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -188,8 +186,8 @@ static int ifb_open(struct net_device *dev)
|
|
|
struct ifb_private *dp = netdev_priv(dev);
|
|
|
|
|
|
tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
|
|
|
- skb_queue_head_init(&dp->rq);
|
|
|
- skb_queue_head_init(&dp->tq);
|
|
|
+ __skb_queue_head_init(&dp->rq);
|
|
|
+ __skb_queue_head_init(&dp->tq);
|
|
|
netif_start_queue(dev);
|
|
|
|
|
|
return 0;
|