|
@@ -108,7 +108,7 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
|
|
|
|
|
|
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
|
|
|
{
|
|
|
- if (buf->is_page)
|
|
|
+ if (buf->flags & EFX_RX_BUF_PAGE)
|
|
|
return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
|
|
|
else
|
|
|
return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
|
|
@@ -158,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
|
|
|
/* Adjust the SKB for padding and checksum */
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
|
|
rx_buf->len = skb_len - NET_IP_ALIGN;
|
|
|
- rx_buf->is_page = false;
|
|
|
+ rx_buf->flags = 0;
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
|
|
@@ -227,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
|
|
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
|
|
rx_buf->u.page = page;
|
|
|
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
|
|
- rx_buf->is_page = true;
|
|
|
+ rx_buf->flags = EFX_RX_BUF_PAGE;
|
|
|
++rx_queue->added_count;
|
|
|
++rx_queue->alloc_page_count;
|
|
|
++state->refcnt;
|
|
@@ -248,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
|
|
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
|
|
struct efx_rx_buffer *rx_buf)
|
|
|
{
|
|
|
- if (rx_buf->is_page && rx_buf->u.page) {
|
|
|
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
|
|
|
struct efx_rx_page_state *state;
|
|
|
|
|
|
state = page_address(rx_buf->u.page);
|
|
@@ -258,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
|
|
efx_rx_buf_size(efx),
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
}
|
|
|
- } else if (!rx_buf->is_page && rx_buf->u.skb) {
|
|
|
+ } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
|
|
|
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
|
|
|
rx_buf->len, PCI_DMA_FROMDEVICE);
|
|
|
}
|
|
@@ -267,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
|
|
static void efx_free_rx_buffer(struct efx_nic *efx,
|
|
|
struct efx_rx_buffer *rx_buf)
|
|
|
{
|
|
|
- if (rx_buf->is_page && rx_buf->u.page) {
|
|
|
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
|
|
|
__free_pages(rx_buf->u.page, efx->rx_buffer_order);
|
|
|
rx_buf->u.page = NULL;
|
|
|
- } else if (!rx_buf->is_page && rx_buf->u.skb) {
|
|
|
+ } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
|
|
|
dev_kfree_skb_any(rx_buf->u.skb);
|
|
|
rx_buf->u.skb = NULL;
|
|
|
}
|
|
@@ -310,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
|
|
|
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
|
|
|
new_buf->u.page = rx_buf->u.page;
|
|
|
new_buf->len = rx_buf->len;
|
|
|
- new_buf->is_page = true;
|
|
|
+ new_buf->flags = EFX_RX_BUF_PAGE;
|
|
|
++rx_queue->added_count;
|
|
|
}
|
|
|
|
|
@@ -324,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
|
|
|
struct efx_rx_buffer *new_buf;
|
|
|
unsigned index;
|
|
|
|
|
|
- if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
|
|
|
+ rx_buf->flags &= EFX_RX_BUF_PAGE;
|
|
|
+
|
|
|
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
|
|
|
+ efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
|
|
|
page_count(rx_buf->u.page) == 1)
|
|
|
efx_resurrect_rx_buffer(rx_queue, rx_buf);
|
|
|
|
|
@@ -411,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context)
|
|
|
|
|
|
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
- int len, bool *discard,
|
|
|
- bool *leak_packet)
|
|
|
+ int len, bool *leak_packet)
|
|
|
{
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
|
@@ -423,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
/* The packet must be discarded, but this is only a fatal error
|
|
|
* if the caller indicated it was
|
|
|
*/
|
|
|
- *discard = true;
|
|
|
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
|
|
|
|
|
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
|
|
|
if (net_ratelimit())
|
|
@@ -436,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
* data at the end of the skb will be trashed. So
|
|
|
* we have no choice but to leak the fragment.
|
|
|
*/
|
|
|
- *leak_packet = !rx_buf->is_page;
|
|
|
+ *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
|
|
|
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
|
|
|
} else {
|
|
|
if (net_ratelimit())
|
|
@@ -456,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
|
*/
|
|
|
static void efx_rx_packet_gro(struct efx_channel *channel,
|
|
|
struct efx_rx_buffer *rx_buf,
|
|
|
- const u8 *eh, bool checksummed)
|
|
|
+ const u8 *eh)
|
|
|
{
|
|
|
struct napi_struct *napi = &channel->napi_str;
|
|
|
gro_result_t gro_result;
|
|
|
|
|
|
/* Pass the skb/page into the GRO engine */
|
|
|
- if (rx_buf->is_page) {
|
|
|
+ if (rx_buf->flags & EFX_RX_BUF_PAGE) {
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
struct page *page = rx_buf->u.page;
|
|
|
struct sk_buff *skb;
|
|
@@ -484,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
|
|
|
skb->len = rx_buf->len;
|
|
|
skb->data_len = rx_buf->len;
|
|
|
skb->truesize += rx_buf->len;
|
|
|
- skb->ip_summed =
|
|
|
- checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
|
|
|
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
|
|
|
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
|
|
|
|
|
|
skb_record_rx_queue(skb, channel->channel);
|
|
|
|
|
@@ -493,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
|
|
|
} else {
|
|
|
struct sk_buff *skb = rx_buf->u.skb;
|
|
|
|
|
|
- EFX_BUG_ON_PARANOID(!checksummed);
|
|
|
+ EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
|
|
|
rx_buf->u.skb = NULL;
|
|
|
|
|
|
gro_result = napi_gro_receive(napi, skb);
|
|
@@ -508,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
|
|
|
}
|
|
|
|
|
|
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
|
|
- unsigned int len, bool checksummed, bool discard)
|
|
|
+ unsigned int len, u16 flags)
|
|
|
{
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
|
@@ -516,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
|
|
bool leak_packet = false;
|
|
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
|
|
+ rx_buf->flags |= flags;
|
|
|
|
|
|
/* This allows the refill path to post another buffer.
|
|
|
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
|
|
@@ -524,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
|
|
rx_queue->removed_count++;
|
|
|
|
|
|
/* Validate the length encoded in the event vs the descriptor pushed */
|
|
|
- efx_rx_packet__check_len(rx_queue, rx_buf, len,
|
|
|
- &discard, &leak_packet);
|
|
|
+ efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
|
|
|
|
|
|
netif_vdbg(efx, rx_status, efx->net_dev,
|
|
|
"RX queue %d received id %x at %llx+%x %s%s\n",
|
|
|
efx_rx_queue_index(rx_queue), index,
|
|
|
(unsigned long long)rx_buf->dma_addr, len,
|
|
|
- (checksummed ? " [SUMMED]" : ""),
|
|
|
- (discard ? " [DISCARD]" : ""));
|
|
|
+ (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
|
|
+ (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
|
|
|
|
|
/* Discard packet, if instructed to do so */
|
|
|
- if (unlikely(discard)) {
|
|
|
+ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
|
|
if (unlikely(leak_packet))
|
|
|
channel->n_skbuff_leaks++;
|
|
|
else
|
|
@@ -562,10 +564,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
|
|
rx_buf->len = len - efx->type->rx_buffer_hash_size;
|
|
|
out:
|
|
|
if (channel->rx_pkt)
|
|
|
- __efx_rx_packet(channel,
|
|
|
- channel->rx_pkt, channel->rx_pkt_csummed);
|
|
|
+ __efx_rx_packet(channel, channel->rx_pkt);
|
|
|
channel->rx_pkt = rx_buf;
|
|
|
- channel->rx_pkt_csummed = checksummed;
|
|
|
}
|
|
|
|
|
|
static void efx_rx_deliver(struct efx_channel *channel,
|
|
@@ -588,8 +588,7 @@ static void efx_rx_deliver(struct efx_channel *channel,
|
|
|
}
|
|
|
|
|
|
/* Handle a received packet. Second half: Touches packet payload. */
|
|
|
-void __efx_rx_packet(struct efx_channel *channel,
|
|
|
- struct efx_rx_buffer *rx_buf, bool checksummed)
|
|
|
+void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
|
|
|
{
|
|
|
struct efx_nic *efx = channel->efx;
|
|
|
u8 *eh = efx_rx_buf_eh(efx, rx_buf);
|
|
@@ -603,7 +602,7 @@ void __efx_rx_packet(struct efx_channel *channel,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (!rx_buf->is_page) {
|
|
|
+ if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
|
|
|
struct sk_buff *skb = rx_buf->u.skb;
|
|
|
|
|
|
prefetch(skb_shinfo(skb));
|
|
@@ -622,10 +621,10 @@ void __efx_rx_packet(struct efx_channel *channel,
|
|
|
}
|
|
|
|
|
|
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
|
|
- checksummed = false;
|
|
|
+ rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
|
|
|
|
|
- if (likely(checksummed || rx_buf->is_page))
|
|
|
- efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
|
|
|
+ if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
|
|
|
+ efx_rx_packet_gro(channel, rx_buf, eh);
|
|
|
else
|
|
|
efx_rx_deliver(channel, rx_buf);
|
|
|
}
|