|
@@ -517,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
|
|
|
#endif
|
|
|
|
|
|
enum netdev_queue_state_t {
|
|
|
- __QUEUE_STATE_XOFF,
|
|
|
+ __QUEUE_STATE_DRV_XOFF,
|
|
|
+ __QUEUE_STATE_STACK_XOFF,
|
|
|
__QUEUE_STATE_FROZEN,
|
|
|
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
|
|
|
- (1 << __QUEUE_STATE_FROZEN))
|
|
|
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
|
|
|
+ (1 << __QUEUE_STATE_STACK_XOFF))
|
|
|
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
|
|
|
+ (1 << __QUEUE_STATE_FROZEN))
|
|
|
};
|
|
|
+/*
|
|
|
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
|
|
|
+ * netif_tx_* functions below are used to manipulate this flag. The
|
|
|
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
|
|
|
+ * queue independently. The netif_xmit_*stopped functions below are called
|
|
|
+ * to check if the queue has been stopped by the driver or stack (either
|
|
|
+ * of the XOFF bits are set in the state). Drivers should not need to call
|
|
|
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
|
|
|
+ */
|
|
|
|
|
|
struct netdev_queue {
|
|
|
/*
|
|
@@ -1718,7 +1730,7 @@ extern void __netif_schedule(struct Qdisc *q);
|
|
|
|
|
|
static inline void netif_schedule_queue(struct netdev_queue *txq)
|
|
|
{
|
|
|
- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
|
|
|
+ if (!(txq->state & QUEUE_STATE_ANY_XOFF))
|
|
|
__netif_schedule(txq->qdisc);
|
|
|
}
|
|
|
|
|
@@ -1732,7 +1744,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
|
|
|
|
|
|
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
|
|
|
{
|
|
|
- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
|
|
|
+ clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1764,7 +1776,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
|
|
|
return;
|
|
|
}
|
|
|
#endif
|
|
|
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
|
|
|
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
|
|
|
__netif_schedule(dev_queue->qdisc);
|
|
|
}
|
|
|
|
|
@@ -1796,7 +1808,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
|
|
|
pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
|
|
|
return;
|
|
|
}
|
|
|
- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
|
|
|
+ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1823,7 +1835,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
|
|
|
|
|
|
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
|
|
|
{
|
|
|
- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
|
|
|
+ return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1837,9 +1849,16 @@ static inline int netif_queue_stopped(const struct net_device *dev)
|
|
|
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
|
|
|
}
|
|
|
|
|
|
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
|
|
|
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
|
|
|
{
|
|
|
- return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
|
|
|
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
|
|
|
+{
|
|
|
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
|
|
|
+}
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1926,7 +1945,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
|
|
if (netpoll_trap())
|
|
|
return;
|
|
|
#endif
|
|
|
- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
|
|
|
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
|
|
|
__netif_schedule(txq->qdisc);
|
|
|
}
|
|
|
|