|
@@ -101,9 +101,9 @@
|
|
|
* You can't change the ring sizes, but you can change where you place
|
|
|
* them in the NIC onboard memory.
|
|
|
*/
|
|
|
-#define TG3_RX_RING_SIZE 512
|
|
|
+#define TG3_RX_STD_RING_SIZE(tp) 512
|
|
|
#define TG3_DEF_RX_RING_PENDING 200
|
|
|
-#define TG3_RX_JUMBO_RING_SIZE 256
|
|
|
+#define TG3_RX_JMB_RING_SIZE(tp) 256
|
|
|
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
|
|
|
#define TG3_RSS_INDIR_TBL_SIZE 128
|
|
|
|
|
@@ -120,12 +120,12 @@
|
|
|
#define TG3_TX_RING_SIZE 512
|
|
|
#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
|
|
|
|
|
|
-#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
|
|
|
- TG3_RX_RING_SIZE)
|
|
|
-#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
|
|
|
- TG3_RX_JUMBO_RING_SIZE)
|
|
|
-#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
|
|
|
- TG3_RX_RCB_RING_SIZE(tp))
|
|
|
+#define TG3_RX_STD_RING_BYTES(tp) \
|
|
|
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
|
|
|
+#define TG3_RX_JMB_RING_BYTES(tp) \
|
|
|
+ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
|
|
|
+#define TG3_RX_RCB_RING_BYTES(tp) \
|
|
|
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RCB_RING_SIZE(tp))
|
|
|
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
|
|
|
TG3_TX_RING_SIZE)
|
|
|
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
|
|
@@ -143,11 +143,11 @@
|
|
|
#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
|
|
|
#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
|
|
|
|
|
|
-#define TG3_RX_STD_BUFF_RING_SIZE \
|
|
|
- (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
|
|
|
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
|
|
|
+ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
|
|
|
|
|
|
-#define TG3_RX_JMB_BUFF_RING_SIZE \
|
|
|
- (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
|
|
|
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
|
|
|
+ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
|
|
|
|
|
|
/* Due to a hardware bug, the 5701 can only DMA to memory addresses
|
|
|
* that are at least dword aligned when used in PCIX mode. The driver
|
|
@@ -4445,14 +4445,14 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
|
|
|
src_map = NULL;
|
|
|
switch (opaque_key) {
|
|
|
case RXD_OPAQUE_RING_STD:
|
|
|
- dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
|
|
|
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
|
|
|
desc = &tpr->rx_std[dest_idx];
|
|
|
map = &tpr->rx_std_buffers[dest_idx];
|
|
|
skb_size = tp->rx_pkt_map_sz;
|
|
|
break;
|
|
|
|
|
|
case RXD_OPAQUE_RING_JUMBO:
|
|
|
- dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
|
|
|
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
|
|
|
desc = &tpr->rx_jmb[dest_idx].std;
|
|
|
map = &tpr->rx_jmb_buffers[dest_idx];
|
|
|
skb_size = TG3_RX_JMB_MAP_SZ;
|
|
@@ -4507,7 +4507,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
|
|
|
|
|
|
switch (opaque_key) {
|
|
|
case RXD_OPAQUE_RING_STD:
|
|
|
- dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
|
|
|
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
|
|
|
dest_desc = &dpr->rx_std[dest_idx];
|
|
|
dest_map = &dpr->rx_std_buffers[dest_idx];
|
|
|
src_desc = &spr->rx_std[src_idx];
|
|
@@ -4515,7 +4515,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
|
|
|
break;
|
|
|
|
|
|
case RXD_OPAQUE_RING_JUMBO:
|
|
|
- dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
|
|
|
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
|
|
|
dest_desc = &dpr->rx_jmb[dest_idx].std;
|
|
|
dest_map = &dpr->rx_jmb_buffers[dest_idx];
|
|
|
src_desc = &spr->rx_jmb[src_idx].std;
|
|
@@ -4715,7 +4715,8 @@ next_pkt:
|
|
|
(*post_ptr)++;
|
|
|
|
|
|
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
|
|
|
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
|
|
|
+ tpr->rx_std_prod_idx = std_prod_idx &
|
|
|
+ tp->rx_std_ring_mask;
|
|
|
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
|
|
|
tpr->rx_std_prod_idx);
|
|
|
work_mask &= ~RXD_OPAQUE_RING_STD;
|
|
@@ -4739,13 +4740,14 @@ next_pkt_nopost:
|
|
|
/* Refill RX ring(s). */
|
|
|
if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
|
|
|
if (work_mask & RXD_OPAQUE_RING_STD) {
|
|
|
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
|
|
|
+ tpr->rx_std_prod_idx = std_prod_idx &
|
|
|
+ tp->rx_std_ring_mask;
|
|
|
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
|
|
|
tpr->rx_std_prod_idx);
|
|
|
}
|
|
|
if (work_mask & RXD_OPAQUE_RING_JUMBO) {
|
|
|
- tpr->rx_jmb_prod_idx = jmb_prod_idx %
|
|
|
- TG3_RX_JUMBO_RING_SIZE;
|
|
|
+ tpr->rx_jmb_prod_idx = jmb_prod_idx &
|
|
|
+ tp->rx_jmb_ring_mask;
|
|
|
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
|
|
|
tpr->rx_jmb_prod_idx);
|
|
|
}
|
|
@@ -4756,8 +4758,8 @@ next_pkt_nopost:
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
|
|
|
- tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
|
|
|
- tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
|
|
|
+ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
|
|
|
+ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
|
|
|
|
|
|
if (tnapi != &tp->napi[1])
|
|
|
napi_schedule(&tp->napi[1].napi);
|
|
@@ -4813,9 +4815,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
|
|
|
if (spr->rx_std_cons_idx < src_prod_idx)
|
|
|
cpycnt = src_prod_idx - spr->rx_std_cons_idx;
|
|
|
else
|
|
|
- cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
|
|
|
+ cpycnt = tp->rx_std_ring_mask + 1 -
|
|
|
+ spr->rx_std_cons_idx;
|
|
|
|
|
|
- cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
|
|
|
+ cpycnt = min(cpycnt,
|
|
|
+ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
|
|
|
|
|
|
si = spr->rx_std_cons_idx;
|
|
|
di = dpr->rx_std_prod_idx;
|
|
@@ -4849,10 +4853,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
|
|
|
dbd->addr_lo = sbd->addr_lo;
|
|
|
}
|
|
|
|
|
|
- spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
|
|
|
- TG3_RX_RING_SIZE;
|
|
|
- dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
|
|
|
- TG3_RX_RING_SIZE;
|
|
|
+ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
|
|
|
+ tp->rx_std_ring_mask;
|
|
|
+ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
|
|
|
+ tp->rx_std_ring_mask;
|
|
|
}
|
|
|
|
|
|
while (1) {
|
|
@@ -4869,10 +4873,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
|
|
|
if (spr->rx_jmb_cons_idx < src_prod_idx)
|
|
|
cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
|
|
|
else
|
|
|
- cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
|
|
|
+ cpycnt = tp->rx_jmb_ring_mask + 1 -
|
|
|
+ spr->rx_jmb_cons_idx;
|
|
|
|
|
|
cpycnt = min(cpycnt,
|
|
|
- TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
|
|
|
+ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
|
|
|
|
|
|
si = spr->rx_jmb_cons_idx;
|
|
|
di = dpr->rx_jmb_prod_idx;
|
|
@@ -4906,10 +4911,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
|
|
|
dbd->addr_lo = sbd->addr_lo;
|
|
|
}
|
|
|
|
|
|
- spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
|
|
|
- TG3_RX_JUMBO_RING_SIZE;
|
|
|
- dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
|
|
|
- TG3_RX_JUMBO_RING_SIZE;
|
|
|
+ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
|
|
|
+ tp->rx_jmb_ring_mask;
|
|
|
+ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
|
|
|
+ tp->rx_jmb_ring_mask;
|
|
|
}
|
|
|
|
|
|
return err;
|
|
@@ -6059,14 +6064,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
|
|
|
|
|
|
if (tpr != &tp->napi[0].prodring) {
|
|
|
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
|
|
|
- i = (i + 1) % TG3_RX_RING_SIZE)
|
|
|
+ i = (i + 1) & tp->rx_std_ring_mask)
|
|
|
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
|
|
|
tp->rx_pkt_map_sz);
|
|
|
|
|
|
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
|
|
|
for (i = tpr->rx_jmb_cons_idx;
|
|
|
i != tpr->rx_jmb_prod_idx;
|
|
|
- i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
|
|
|
+ i = (i + 1) & tp->rx_jmb_ring_mask) {
|
|
|
tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
|
|
|
TG3_RX_JMB_MAP_SZ);
|
|
|
}
|
|
@@ -6075,12 +6080,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < TG3_RX_RING_SIZE; i++)
|
|
|
+ for (i = 0; i <= tp->rx_std_ring_mask; i++)
|
|
|
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
|
|
|
tp->rx_pkt_map_sz);
|
|
|
|
|
|
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
|
|
|
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
|
|
|
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
|
|
|
tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
|
|
|
TG3_RX_JMB_MAP_SZ);
|
|
|
}
|
|
@@ -6104,15 +6109,16 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
|
|
|
tpr->rx_jmb_prod_idx = 0;
|
|
|
|
|
|
if (tpr != &tp->napi[0].prodring) {
|
|
|
- memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
|
|
|
+ memset(&tpr->rx_std_buffers[0], 0,
|
|
|
+ TG3_RX_STD_BUFF_RING_SIZE(tp));
|
|
|
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
|
|
|
memset(&tpr->rx_jmb_buffers[0], 0,
|
|
|
- TG3_RX_JMB_BUFF_RING_SIZE);
|
|
|
+ TG3_RX_JMB_BUFF_RING_SIZE(tp));
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
|
/* Zero out all descriptors. */
|
|
|
- memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
|
|
|
+ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
|
|
|
|
|
|
rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
|
|
|
if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
|
|
@@ -6124,7 +6130,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
|
|
|
* stuff once. This works because the card does not
|
|
|
* write into the rx buffer posting rings.
|
|
|
*/
|
|
|
- for (i = 0; i < TG3_RX_RING_SIZE; i++) {
|
|
|
+ for (i = 0; i <= tp->rx_std_ring_mask; i++) {
|
|
|
struct tg3_rx_buffer_desc *rxd;
|
|
|
|
|
|
rxd = &tpr->rx_std[i];
|
|
@@ -6151,12 +6157,12 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
|
|
|
if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
|
|
|
goto done;
|
|
|
|
|
|
- memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
|
|
|
+ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
|
|
|
|
|
|
if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
|
|
|
goto done;
|
|
|
|
|
|
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
|
|
|
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
|
|
|
struct tg3_rx_buffer_desc *rxd;
|
|
|
|
|
|
rxd = &tpr->rx_jmb[i].std;
|
|
@@ -6196,12 +6202,12 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
|
|
|
kfree(tpr->rx_jmb_buffers);
|
|
|
tpr->rx_jmb_buffers = NULL;
|
|
|
if (tpr->rx_std) {
|
|
|
- pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
|
|
|
+ pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
|
|
|
tpr->rx_std, tpr->rx_std_mapping);
|
|
|
tpr->rx_std = NULL;
|
|
|
}
|
|
|
if (tpr->rx_jmb) {
|
|
|
- pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
|
|
|
+ pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
|
|
|
tpr->rx_jmb, tpr->rx_jmb_mapping);
|
|
|
tpr->rx_jmb = NULL;
|
|
|
}
|
|
@@ -6210,23 +6216,24 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
|
|
|
static int tg3_rx_prodring_init(struct tg3 *tp,
|
|
|
struct tg3_rx_prodring_set *tpr)
|
|
|
{
|
|
|
- tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
|
|
|
+ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
|
|
|
+ GFP_KERNEL);
|
|
|
if (!tpr->rx_std_buffers)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
|
|
|
+ tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
|
|
|
&tpr->rx_std_mapping);
|
|
|
if (!tpr->rx_std)
|
|
|
goto err_out;
|
|
|
|
|
|
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
|
|
|
- tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
|
|
|
+ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
|
|
|
GFP_KERNEL);
|
|
|
if (!tpr->rx_jmb_buffers)
|
|
|
goto err_out;
|
|
|
|
|
|
tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
|
|
|
- TG3_RX_JUMBO_RING_BYTES,
|
|
|
+ TG3_RX_JMB_RING_BYTES(tp),
|
|
|
&tpr->rx_jmb_mapping);
|
|
|
if (!tpr->rx_jmb)
|
|
|
goto err_out;
|
|
@@ -9854,10 +9861,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
|
|
|
{
|
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
|
|
|
|
- ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
|
|
|
+ ering->rx_max_pending = tp->rx_std_ring_mask;
|
|
|
ering->rx_mini_max_pending = 0;
|
|
|
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
|
|
|
- ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
|
|
|
+ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
|
|
|
else
|
|
|
ering->rx_jumbo_max_pending = 0;
|
|
|
|
|
@@ -9878,8 +9885,8 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
|
|
|
struct tg3 *tp = netdev_priv(dev);
|
|
|
int i, irq_sync = 0, err = 0;
|
|
|
|
|
|
- if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
|
|
|
- (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
|
|
|
+ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
|
|
|
+ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
|
|
|
(ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
|
|
|
(ering->tx_pending <= MAX_SKB_FRAGS) ||
|
|
|
((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
|
|
@@ -13597,7 +13604,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
- tp->rx_std_max_post = TG3_RX_RING_SIZE;
|
|
|
+ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
|
|
|
+ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
|
|
|
+ tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
|
|
|
|
|
|
/* Increment the rx prod index on the rx std ring by at most
|
|
|
* 8 for these chips to workaround hw errata.
|