|
@@ -251,7 +251,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
|
|
|
|
|
|
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
|
|
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
|
|
|
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
+ (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
/* Make sure that anybody stopping the queue after this
|
|
|
* sees the new next_to_clean.
|
|
|
*/
|
|
@@ -529,7 +529,7 @@ next_desc:
|
|
|
}
|
|
|
|
|
|
rx_ring->next_to_clean = i;
|
|
|
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
|
|
|
+ cleaned_count = ixgbevf_desc_unused(rx_ring);
|
|
|
|
|
|
if (cleaned_count)
|
|
|
ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
|
|
@@ -1380,7 +1380,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
|
|
|
for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
|
|
|
ixgbevf_alloc_rx_buffers(adapter, ring,
|
|
|
- IXGBE_DESC_UNUSED(ring));
|
|
|
+ ixgbevf_desc_unused(ring));
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3102,7 +3102,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
|
|
|
|
|
/* We need to check again in a case another CPU has just
|
|
|
* made room available. */
|
|
|
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
|
|
|
+ if (likely(ixgbevf_desc_unused(tx_ring) < size))
|
|
|
return -EBUSY;
|
|
|
|
|
|
/* A reprieve! - use start_queue because it doesn't call schedule */
|
|
@@ -3113,7 +3113,7 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
|
|
|
|
|
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
|
|
|
{
|
|
|
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
|
|
|
+ if (likely(ixgbevf_desc_unused(tx_ring) >= size))
|
|
|
return 0;
|
|
|
return __ixgbevf_maybe_stop_tx(tx_ring, size);
|
|
|
}
|