|
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
|
|
|
/* move empty skb from pool to prod and map it */
|
|
|
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
|
|
|
mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
|
|
|
- bp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
+ fp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
|
|
|
|
|
|
/* move partial skb from cons to pool (don't unmap yet) */
|
|
@@ -333,13 +333,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
|
|
|
struct sk_buff *skb = rx_buf->skb;
|
|
|
/* alloc new skb */
|
|
|
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
|
|
|
+ struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
|
|
|
|
|
|
/* Unmap skb in the pool anyway, as we are going to change
|
|
|
pool entry status to BNX2X_TPA_STOP even if new skb allocation
|
|
|
fails. */
|
|
|
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
|
|
|
- bp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
+ fp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
|
|
|
if (likely(new_skb)) {
|
|
|
/* fix ip xsum and give it to the stack */
|
|
@@ -349,10 +349,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
|
|
|
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
|
|
|
|
|
|
#ifdef BNX2X_STOP_ON_ERROR
|
|
|
- if (pad + len > bp->rx_buf_size) {
|
|
|
+ if (pad + len > fp->rx_buf_size) {
|
|
|
BNX2X_ERR("skb_put is about to fail... "
|
|
|
"pad %d len %d rx_buf_size %d\n",
|
|
|
- pad, len, bp->rx_buf_size);
|
|
|
+ pad, len, fp->rx_buf_size);
|
|
|
bnx2x_panic();
|
|
|
return;
|
|
|
}
|
|
@@ -582,7 +582,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
|
|
|
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
|
|
|
dma_unmap_single(&bp->pdev->dev,
|
|
|
dma_unmap_addr(rx_buf, mapping),
|
|
|
- bp->rx_buf_size,
|
|
|
+ fp->rx_buf_size,
|
|
|
DMA_FROM_DEVICE);
|
|
|
skb_reserve(skb, pad);
|
|
|
skb_put(skb, len);
|
|
@@ -821,19 +821,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
|
|
|
u16 ring_prod;
|
|
|
int i, j;
|
|
|
|
|
|
- bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
|
|
|
- IP_HEADER_ALIGNMENT_PADDING;
|
|
|
-
|
|
|
- DP(NETIF_MSG_IFUP,
|
|
|
- "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
|
|
|
-
|
|
|
for_each_rx_queue(bp, j) {
|
|
|
struct bnx2x_fastpath *fp = &bp->fp[j];
|
|
|
|
|
|
+ DP(NETIF_MSG_IFUP,
|
|
|
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
|
|
|
+
|
|
|
if (!fp->disable_tpa) {
|
|
|
for (i = 0; i < max_agg_queues; i++) {
|
|
|
fp->tpa_pool[i].skb =
|
|
|
- netdev_alloc_skb(bp->dev, bp->rx_buf_size);
|
|
|
+ netdev_alloc_skb(bp->dev, fp->rx_buf_size);
|
|
|
if (!fp->tpa_pool[i].skb) {
|
|
|
BNX2X_ERR("Failed to allocate TPA "
|
|
|
"skb pool for queue[%d] - "
|
|
@@ -941,7 +938,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
|
|
|
|
|
|
dma_unmap_single(&bp->pdev->dev,
|
|
|
dma_unmap_addr(rx_buf, mapping),
|
|
|
- bp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
+ fp->rx_buf_size, DMA_FROM_DEVICE);
|
|
|
|
|
|
rx_buf->skb = NULL;
|
|
|
dev_kfree_skb(skb);
|
|
@@ -1249,6 +1246,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_queue(bp, i) {
|
|
|
+ struct bnx2x_fastpath *fp = &bp->fp[i];
|
|
|
+
|
|
|
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
|
|
|
+ if (IS_FCOE_IDX(i))
|
|
|
+ /*
|
|
|
+ * Although there are no IP frames expected to arrive to
|
|
|
+ * this ring we still want to add an
|
|
|
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
|
|
|
+ * overrun attack.
|
|
|
+ */
|
|
|
+ fp->rx_buf_size =
|
|
|
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
|
|
|
+ BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
|
|
|
+ else
|
|
|
+ fp->rx_buf_size =
|
|
|
+ bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
|
|
|
+ IP_HEADER_ALIGNMENT_PADDING;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/* must be called with rtnl_lock */
|
|
|
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
|
|
{
|
|
@@ -1272,6 +1294,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
|
|
/* must be called before memory allocation and HW init */
|
|
|
bnx2x_ilt_set_info(bp);
|
|
|
|
|
|
+ /* Set the receive queues buffer size */
|
|
|
+ bnx2x_set_rx_buf_size(bp);
|
|
|
+
|
|
|
if (bnx2x_alloc_mem(bp))
|
|
|
return -ENOMEM;
|
|
|
|