|
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
|
|
|
}
|
|
|
|
|
|
bi->dma = dma;
|
|
|
- bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
|
|
|
+ bi->page_offset = 0;
|
|
|
|
|
|
return true;
|
|
|
}
|
|
@@ -4129,27 +4129,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
|
|
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
|
|
|
- * @rx_ring: ring to setup
|
|
|
- *
|
|
|
- * On many IA platforms the L1 cache has a critical stride of 4K, this
|
|
|
- * results in each receive buffer starting in the same cache set. To help
|
|
|
- * reduce the pressure on this cache set we can interleave the offsets so
|
|
|
- * that only every other buffer will be in the same cache set.
|
|
|
- **/
|
|
|
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
|
|
|
-{
|
|
|
- struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
|
|
|
- u16 i;
|
|
|
-
|
|
|
- for (i = 0; i < rx_ring->count; i += 2) {
|
|
|
- rx_buffer[0].page_offset = 0;
|
|
|
- rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
|
|
|
- rx_buffer = &rx_buffer[2];
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
|
|
|
* @rx_ring: ring to free buffers from
|
|
@@ -4195,8 +4174,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
|
|
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
|
|
|
memset(rx_ring->rx_buffer_info, 0, size);
|
|
|
|
|
|
- ixgbe_init_rx_page_offset(rx_ring);
|
|
|
-
|
|
|
/* Zero out the descriptor ring */
|
|
|
memset(rx_ring->desc, 0, rx_ring->size);
|
|
|
|
|
@@ -4646,8 +4623,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
|
|
- ixgbe_init_rx_page_offset(rx_ring);
|
|
|
-
|
|
|
return 0;
|
|
|
err:
|
|
|
vfree(rx_ring->rx_buffer_info);
|