|
@@ -562,7 +562,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
struct virtnet_info *vi = netdev_priv(dev);
|
|
|
int capacity;
|
|
|
|
|
|
-again:
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
|
free_old_xmit_skbs(vi);
|
|
|
|
|
@@ -571,14 +570,20 @@ again:
|
|
|
|
|
|
/* This can happen with OOM and indirect buffers. */
|
|
|
if (unlikely(capacity < 0)) {
|
|
|
- netif_stop_queue(dev);
|
|
|
- dev_warn(&dev->dev, "Unexpected full queue\n");
|
|
|
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
|
|
|
- virtqueue_disable_cb(vi->svq);
|
|
|
- netif_start_queue(dev);
|
|
|
- goto again;
|
|
|
+ if (net_ratelimit()) {
|
|
|
+ if (likely(capacity == -ENOMEM)) {
|
|
|
+ dev_warn(&dev->dev,
|
|
|
+ "TX queue failure: out of memory\n");
|
|
|
+ } else {
|
|
|
+ dev->stats.tx_fifo_errors++;
|
|
|
+ dev_warn(&dev->dev,
|
|
|
+ "Unexpected TX queue failure: %d\n",
|
|
|
+ capacity);
|
|
|
+ }
|
|
|
}
|
|
|
- return NETDEV_TX_BUSY;
|
|
|
+ dev->stats.tx_dropped++;
|
|
|
+ kfree_skb(skb);
|
|
|
+ return NETDEV_TX_OK;
|
|
|
}
|
|
|
virtqueue_kick(vi->svq);
|
|
|
|