|
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
|
|
|
wr32(E1000_RXDCTL(reg_idx), rxdctl);
|
|
|
}
|
|
|
|
|
|
-static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
|
|
|
- struct igb_ring *rx_ring)
|
|
|
-{
|
|
|
-#define IGB_MAX_BUILD_SKB_SIZE \
|
|
|
- (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
|
|
|
- (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
|
|
|
-
|
|
|
- /* set build_skb flag */
|
|
|
- if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
|
|
|
- set_ring_build_skb_enabled(rx_ring);
|
|
|
- else
|
|
|
- clear_ring_build_skb_enabled(rx_ring);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* igb_configure_rx - Configure receive Unit after Reset
|
|
|
* @adapter: board private structure
|
|
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
|
|
|
|
|
|
/* Setup the HW Rx Head and Tail Descriptor Pointers and
|
|
|
* the Base and Length of the Rx Descriptor Ring */
|
|
|
- for (i = 0; i < adapter->num_rx_queues; i++) {
|
|
|
- struct igb_ring *rx_ring = adapter->rx_ring[i];
|
|
|
- igb_set_rx_buffer_len(adapter, rx_ring);
|
|
|
- igb_configure_rx_ring(adapter, rx_ring);
|
|
|
- }
|
|
|
+ for (i = 0; i < adapter->num_rx_queues; i++)
|
|
|
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
|
|
|
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
|
|
|
}
|
|
|
|
|
|
-static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
|
|
|
- union e1000_adv_rx_desc *rx_desc)
|
|
|
-{
|
|
|
- struct igb_rx_buffer *rx_buffer;
|
|
|
- struct sk_buff *skb;
|
|
|
- struct page *page;
|
|
|
- void *page_addr;
|
|
|
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
|
|
|
-#if (PAGE_SIZE < 8192)
|
|
|
- unsigned int truesize = IGB_RX_BUFSZ;
|
|
|
-#else
|
|
|
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
|
|
- SKB_DATA_ALIGN(NET_SKB_PAD +
|
|
|
- NET_IP_ALIGN +
|
|
|
- size);
|
|
|
-#endif
|
|
|
-
|
|
|
- /* If we spanned a buffer we have a huge mess so test for it */
|
|
|
- BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
|
|
|
-
|
|
|
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
|
|
- page = rx_buffer->page;
|
|
|
- prefetchw(page);
|
|
|
-
|
|
|
- page_addr = page_address(page) + rx_buffer->page_offset;
|
|
|
-
|
|
|
- /* prefetch first cache line of first page */
|
|
|
- prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
|
|
|
-#if L1_CACHE_BYTES < 128
|
|
|
- prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
|
|
|
-#endif
|
|
|
-
|
|
|
- /* build an skb to around the page buffer */
|
|
|
- skb = build_skb(page_addr, truesize);
|
|
|
- if (unlikely(!skb)) {
|
|
|
- rx_ring->rx_stats.alloc_failed++;
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- /* we are reusing so sync this buffer for CPU use */
|
|
|
- dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
- rx_buffer->dma,
|
|
|
- rx_buffer->page_offset,
|
|
|
- IGB_RX_BUFSZ,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- /* update pointers within the skb to store the data */
|
|
|
- skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
|
|
|
- __skb_put(skb, size);
|
|
|
-
|
|
|
- /* pull timestamp out of packet data */
|
|
|
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
|
|
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
|
|
|
- __skb_pull(skb, IGB_TS_HDR_LEN);
|
|
|
- }
|
|
|
-
|
|
|
- if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
|
|
|
- /* hand second half of page back to the ring */
|
|
|
- igb_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
- } else {
|
|
|
- /* we are not reusing the buffer so unmap it */
|
|
|
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
|
|
|
- PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
- }
|
|
|
-
|
|
|
- /* clear contents of buffer_info */
|
|
|
- rx_buffer->dma = 0;
|
|
|
- rx_buffer->page = NULL;
|
|
|
-
|
|
|
- return skb;
|
|
|
-}
|
|
|
-
|
|
|
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
|
|
|
union e1000_adv_rx_desc *rx_desc,
|
|
|
struct sk_buff *skb)
|
|
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
|
|
rmb();
|
|
|
|
|
|
/* retrieve a buffer from the ring */
|
|
|
- if (ring_uses_build_skb(rx_ring))
|
|
|
- skb = igb_build_rx_buffer(rx_ring, rx_desc);
|
|
|
- else
|
|
|
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
|
|
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
|
|
|
|
|
|
/* exit if we failed to retrieve a buffer */
|
|
|
if (!skb)
|
|
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
|
|
|
-{
|
|
|
- if (ring_uses_build_skb(rx_ring))
|
|
|
- return NET_SKB_PAD + NET_IP_ALIGN;
|
|
|
- else
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
|
|
|
* @adapter: address of board private structure
|
|
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
|
|
|
* Refresh the desc even if buffer_addrs didn't change
|
|
|
* because each write-back erases this info.
|
|
|
*/
|
|
|
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
|
|
|
- bi->page_offset +
|
|
|
- igb_rx_offset(rx_ring));
|
|
|
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
|
|
|
|
|
|
rx_desc++;
|
|
|
bi++;
|