|
@@ -882,7 +882,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|
|
|
|
|
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
|
|
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
|
|
|
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
+ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
/* Make sure that anybody stopping the queue after this
|
|
|
* sees the new next_to_clean.
|
|
|
*/
|
|
@@ -1474,7 +1474,7 @@ next_desc:
|
|
|
}
|
|
|
|
|
|
rx_ring->next_to_clean = i;
|
|
|
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
|
|
|
+ cleaned_count = ixgbe_desc_unused(rx_ring);
|
|
|
|
|
|
if (cleaned_count)
|
|
|
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
|
|
@@ -3126,7 +3126,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
|
|
|
|
|
ixgbe_rx_desc_queue_enable(adapter, ring);
|
|
|
- ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
|
|
|
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
|
|
|
}
|
|
|
|
|
|
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
|
|
@@ -6817,7 +6817,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
|
|
|
|
|
|
/* We need to check again in a case another CPU has just
|
|
|
* made room available. */
|
|
|
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
|
|
|
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
|
|
|
return -EBUSY;
|
|
|
|
|
|
/* A reprieve! - use start_queue because it doesn't call schedule */
|
|
@@ -6828,7 +6828,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
|
|
|
|
|
|
static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
|
|
|
{
|
|
|
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
|
|
|
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
|
|
|
return 0;
|
|
|
return __ixgbe_maybe_stop_tx(tx_ring, size);
|
|
|
}
|