|
@@ -209,13 +209,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
|
|
|
fp->last_max_sge = idx;
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
|
|
|
- struct eth_fast_path_rx_cqe *fp_cqe)
|
|
|
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
|
|
|
+ u16 sge_len,
|
|
|
+ struct eth_end_agg_rx_cqe *cqe)
|
|
|
{
|
|
|
struct bnx2x *bp = fp->bp;
|
|
|
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
|
|
|
- le16_to_cpu(fp_cqe->len_on_bd)) >>
|
|
|
- SGE_PAGE_SHIFT;
|
|
|
u16 last_max, last_elem, first_elem;
|
|
|
u16 delta = 0;
|
|
|
u16 i;
|
|
@@ -226,15 +224,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
|
|
|
/* First mark all used pages */
|
|
|
for (i = 0; i < sge_len; i++)
|
|
|
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
|
|
|
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
|
|
|
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
|
|
|
|
|
|
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
|
|
|
- sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
|
|
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
|
|
|
|
|
/* Here we assume that the last SGE index is the biggest */
|
|
|
prefetch((void *)(fp->sge_mask));
|
|
|
bnx2x_update_last_max_sge(fp,
|
|
|
- le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
|
|
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
|
|
|
|
|
last_max = RX_SGE(fp->last_max_sge);
|
|
|
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
|
|
@@ -328,6 +326,12 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
|
|
|
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
|
|
|
tpa_info->placement_offset = cqe->placement_offset;
|
|
|
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
|
|
|
+ if (fp->mode == TPA_MODE_GRO) {
|
|
|
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
|
|
|
+ tpa_info->full_page =
|
|
|
+ SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
|
|
|
+ tpa_info->gro_size = gro_size;
|
|
|
+ }
|
|
|
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
|
fp->tpa_queue_used |= (1 << queue);
|
|
@@ -384,25 +388,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
|
|
|
}
|
|
|
|
|
|
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
- u16 queue, struct sk_buff *skb,
|
|
|
+ struct bnx2x_agg_info *tpa_info,
|
|
|
+ u16 pages,
|
|
|
+ struct sk_buff *skb,
|
|
|
struct eth_end_agg_rx_cqe *cqe,
|
|
|
u16 cqe_idx)
|
|
|
{
|
|
|
struct sw_rx_page *rx_pg, old_rx_pg;
|
|
|
- u32 i, frag_len, frag_size, pages;
|
|
|
- int err;
|
|
|
- int j;
|
|
|
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
|
|
|
+ u32 i, frag_len, frag_size;
|
|
|
+ int err, j, frag_id = 0;
|
|
|
u16 len_on_bd = tpa_info->len_on_bd;
|
|
|
+ u16 full_page = 0, gro_size = 0;
|
|
|
|
|
|
frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
|
|
|
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
|
|
|
+
|
|
|
+ if (fp->mode == TPA_MODE_GRO) {
|
|
|
+ gro_size = tpa_info->gro_size;
|
|
|
+ full_page = tpa_info->full_page;
|
|
|
+ }
|
|
|
|
|
|
/* This is needed in order to enable forwarding support */
|
|
|
- if (frag_size)
|
|
|
+ if (frag_size) {
|
|
|
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
|
|
|
tpa_info->parsing_flags, len_on_bd);
|
|
|
|
|
|
+ /* set for GRO */
|
|
|
+ if (fp->mode == TPA_MODE_GRO)
|
|
|
+ skb_shinfo(skb)->gso_type =
|
|
|
+ (GET_FLAG(tpa_info->parsing_flags,
|
|
|
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
|
|
|
+ PRS_FLAG_OVERETH_IPV6) ?
|
|
|
+ SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
|
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
|
|
|
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
|
|
@@ -419,7 +438,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
|
|
|
/* FW gives the indices of the SGE as if the ring is an array
|
|
|
(meaning that "next" element will consume 2 indices) */
|
|
|
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
|
|
|
+ if (fp->mode == TPA_MODE_GRO)
|
|
|
+ frag_len = min_t(u32, frag_size, (u32)full_page);
|
|
|
+ else /* LRO */
|
|
|
+ frag_len = min_t(u32, frag_size,
|
|
|
+ (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
|
|
|
+
|
|
|
rx_pg = &fp->rx_page_ring[sge_idx];
|
|
|
old_rx_pg = *rx_pg;
|
|
|
|
|
@@ -435,9 +459,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
dma_unmap_page(&bp->pdev->dev,
|
|
|
dma_unmap_addr(&old_rx_pg, mapping),
|
|
|
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
|
|
|
-
|
|
|
/* Add one frag and update the appropriate fields in the skb */
|
|
|
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
|
|
|
+ if (fp->mode == TPA_MODE_LRO)
|
|
|
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
|
|
|
+ else { /* GRO */
|
|
|
+ int rem;
|
|
|
+ int offset = 0;
|
|
|
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
|
|
|
+ int len = rem > gro_size ? gro_size : rem;
|
|
|
+ skb_fill_page_desc(skb, frag_id++,
|
|
|
+ old_rx_pg.page, offset, len);
|
|
|
+ if (offset)
|
|
|
+ get_page(old_rx_pg.page);
|
|
|
+ offset += len;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
skb->data_len += frag_len;
|
|
|
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
|
|
@@ -449,18 +485,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
- u16 queue, struct eth_end_agg_rx_cqe *cqe,
|
|
|
- u16 cqe_idx)
|
|
|
+static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
+ struct bnx2x_agg_info *tpa_info,
|
|
|
+ u16 pages,
|
|
|
+ struct eth_end_agg_rx_cqe *cqe,
|
|
|
+ u16 cqe_idx)
|
|
|
{
|
|
|
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
|
|
|
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
|
|
|
- u32 pad = tpa_info->placement_offset;
|
|
|
+ u8 pad = tpa_info->placement_offset;
|
|
|
u16 len = tpa_info->len_on_bd;
|
|
|
struct sk_buff *skb = NULL;
|
|
|
- u8 *data = rx_buf->data;
|
|
|
- /* alloc new skb */
|
|
|
- u8 *new_data;
|
|
|
+ u8 *new_data, *data = rx_buf->data;
|
|
|
u8 old_tpa_state = tpa_info->tpa_state;
|
|
|
|
|
|
tpa_info->tpa_state = BNX2X_TPA_STOP;
|
|
@@ -500,7 +535,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
skb->protocol = eth_type_trans(skb, bp->dev);
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
- if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
|
|
|
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
|
|
|
+ skb, cqe, cqe_idx)) {
|
|
|
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
|
|
|
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
|
|
|
napi_gro_receive(&fp->napi, skb);
|
|
@@ -565,7 +601,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|
|
struct eth_fast_path_rx_cqe *cqe_fp;
|
|
|
u8 cqe_fp_flags;
|
|
|
enum eth_rx_cqe_type cqe_fp_type;
|
|
|
- u16 len, pad;
|
|
|
+ u16 len, pad, queue;
|
|
|
u8 *data;
|
|
|
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
@@ -586,17 +622,21 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|
|
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
|
|
|
cqe_fp_flags, cqe_fp->status_flags,
|
|
|
le32_to_cpu(cqe_fp->rss_hash_result),
|
|
|
- le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
|
|
|
+ le16_to_cpu(cqe_fp->vlan_tag),
|
|
|
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
|
|
|
|
|
|
/* is this a slowpath msg? */
|
|
|
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
|
|
|
bnx2x_sp_event(fp, cqe);
|
|
|
goto next_cqe;
|
|
|
}
|
|
|
+
|
|
|
rx_buf = &fp->rx_buf_ring[bd_cons];
|
|
|
data = rx_buf->data;
|
|
|
|
|
|
if (!CQE_TYPE_FAST(cqe_fp_type)) {
|
|
|
+ struct bnx2x_agg_info *tpa_info;
|
|
|
+ u16 frag_size, pages;
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
|
/* sanity check */
|
|
|
if (fp->disable_tpa &&
|
|
@@ -616,28 +656,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|
|
bnx2x_tpa_start(fp, queue,
|
|
|
bd_cons, bd_prod,
|
|
|
cqe_fp);
|
|
|
+
|
|
|
goto next_rx;
|
|
|
- } else {
|
|
|
- u16 queue =
|
|
|
- cqe->end_agg_cqe.queue_index;
|
|
|
- DP(NETIF_MSG_RX_STATUS,
|
|
|
- "calling tpa_stop on queue %d\n",
|
|
|
- queue);
|
|
|
|
|
|
- bnx2x_tpa_stop(bp, fp, queue,
|
|
|
- &cqe->end_agg_cqe,
|
|
|
- comp_ring_cons);
|
|
|
+ }
|
|
|
+ queue = cqe->end_agg_cqe.queue_index;
|
|
|
+ tpa_info = &fp->tpa_info[queue];
|
|
|
+ DP(NETIF_MSG_RX_STATUS,
|
|
|
+ "calling tpa_stop on queue %d\n",
|
|
|
+ queue);
|
|
|
+
|
|
|
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
|
|
|
+ tpa_info->len_on_bd;
|
|
|
+
|
|
|
+ if (fp->mode == TPA_MODE_GRO)
|
|
|
+ pages = (frag_size + tpa_info->full_page - 1) /
|
|
|
+ tpa_info->full_page;
|
|
|
+ else
|
|
|
+ pages = SGE_PAGE_ALIGN(frag_size) >>
|
|
|
+ SGE_PAGE_SHIFT;
|
|
|
+
|
|
|
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
|
|
|
+ &cqe->end_agg_cqe, comp_ring_cons);
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
|
- if (bp->panic)
|
|
|
- return 0;
|
|
|
+ if (bp->panic)
|
|
|
+ return 0;
|
|
|
#endif
|
|
|
|
|
|
- bnx2x_update_sge_prod(fp, cqe_fp);
|
|
|
- goto next_cqe;
|
|
|
- }
|
|
|
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
|
|
|
+ goto next_cqe;
|
|
|
}
|
|
|
/* non TPA */
|
|
|
- len = le16_to_cpu(cqe_fp->pkt_len);
|
|
|
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
|
|
|
pad = cqe_fp->placement_offset;
|
|
|
dma_sync_single_for_cpu(&bp->pdev->dev,
|
|
|
dma_unmap_addr(rx_buf, mapping),
|
|
@@ -3440,13 +3490,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
}
|
|
|
|
|
|
netdev_features_t bnx2x_fix_features(struct net_device *dev,
|
|
|
- netdev_features_t features)
|
|
|
+ netdev_features_t features)
|
|
|
{
|
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
|
|
|
|
/* TPA requires Rx CSUM offloading */
|
|
|
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
|
|
|
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
|
|
|
features &= ~NETIF_F_LRO;
|
|
|
+ features &= ~NETIF_F_GRO;
|
|
|
+ }
|
|
|
|
|
|
return features;
|
|
|
}
|
|
@@ -3462,6 +3514,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
|
|
|
else
|
|
|
flags &= ~TPA_ENABLE_FLAG;
|
|
|
|
|
|
+ if (features & NETIF_F_GRO)
|
|
|
+ flags |= GRO_ENABLE_FLAG;
|
|
|
+ else
|
|
|
+ flags &= ~GRO_ENABLE_FLAG;
|
|
|
+
|
|
|
if (features & NETIF_F_LOOPBACK) {
|
|
|
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
|
|
|
bp->link_params.loopback_mode = LOOPBACK_BMAC;
|