|
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
|
|
|
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
|
|
|
struct net_device *dev = cq->dev;
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
|
|
|
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
|
|
|
int done;
|
|
|
|
|
|
if (!priv->port_up)
|
|
@@ -355,8 +355,7 @@ err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
|
|
|
- struct mlx4_en_rx_ring *rx_ring)
|
|
|
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
|
|
|
{
|
|
|
struct mlx4_en_filter *filter, *tmp;
|
|
|
LIST_HEAD(del_list);
|
|
@@ -1242,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
napi_synchronize(&cq->napi);
|
|
|
mlx4_en_process_rx_cq(dev, cq, 0);
|
|
@@ -1264,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
|
|
|
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
|
|
continue;
|
|
|
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
|
|
|
- i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
|
|
|
- priv->tx_ring[i].cons, priv->tx_ring[i].prod);
|
|
|
+ i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
|
|
|
+ priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
|
|
|
}
|
|
|
|
|
|
priv->port_stats.tx_timeout++;
|
|
@@ -1305,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
/* Setup cq moderation params */
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
cq->moder_cnt = priv->rx_frames;
|
|
|
cq->moder_time = priv->rx_usecs;
|
|
|
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
|
|
@@ -1314,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- cq = &priv->tx_cq[i];
|
|
|
+ cq = priv->tx_cq[i];
|
|
|
cq->moder_cnt = priv->tx_frames;
|
|
|
cq->moder_time = priv->tx_usecs;
|
|
|
}
|
|
@@ -1348,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
|
|
spin_lock_bh(&priv->stats_lock);
|
|
|
- rx_packets = priv->rx_ring[ring].packets;
|
|
|
- rx_bytes = priv->rx_ring[ring].bytes;
|
|
|
+ rx_packets = priv->rx_ring[ring]->packets;
|
|
|
+ rx_bytes = priv->rx_ring[ring]->bytes;
|
|
|
spin_unlock_bh(&priv->stats_lock);
|
|
|
|
|
|
rx_pkt_diff = ((unsigned long) (rx_packets -
|
|
@@ -1378,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
if (moder_time != priv->last_moder_time[ring]) {
|
|
|
priv->last_moder_time[ring] = moder_time;
|
|
|
- cq = &priv->rx_cq[ring];
|
|
|
+ cq = priv->rx_cq[ring];
|
|
|
cq->moder_time = moder_time;
|
|
|
cq->moder_cnt = priv->rx_frames;
|
|
|
err = mlx4_en_set_cq_moder(priv, cq);
|
|
@@ -1501,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
return err;
|
|
|
}
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
|
|
|
mlx4_en_cq_init_lock(cq);
|
|
|
|
|
@@ -1519,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
goto cq_err;
|
|
|
}
|
|
|
mlx4_en_arm_cq(priv, cq);
|
|
|
- priv->rx_ring[i].cqn = cq->mcq.cqn;
|
|
|
+ priv->rx_ring[i]->cqn = cq->mcq.cqn;
|
|
|
++rx_index;
|
|
|
}
|
|
|
|
|
@@ -1545,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
/* Configure tx cq's and rings */
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
/* Configure cq */
|
|
|
- cq = &priv->tx_cq[i];
|
|
|
+ cq = priv->tx_cq[i];
|
|
|
err = mlx4_en_activate_cq(priv, cq, i);
|
|
|
if (err) {
|
|
|
en_err(priv, "Failed allocating Tx CQ\n");
|
|
@@ -1561,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
cq->buf->wqe_index = cpu_to_be16(0xffff);
|
|
|
|
|
|
/* Configure ring */
|
|
|
- tx_ring = &priv->tx_ring[i];
|
|
|
+ tx_ring = priv->tx_ring[i];
|
|
|
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
|
|
|
i / priv->num_tx_rings_p_up);
|
|
|
if (err) {
|
|
@@ -1631,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
|
|
|
tx_err:
|
|
|
while (tx_index--) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
|
|
|
}
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
rss_err:
|
|
@@ -1641,9 +1640,9 @@ mac_err:
|
|
|
mlx4_en_put_qp(priv);
|
|
|
cq_err:
|
|
|
while (rx_index--)
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
|
|
|
for (i = 0; i < priv->rx_ring_num; i++)
|
|
|
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
|
|
|
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
|
|
|
|
|
return err; /* need to close devices */
|
|
|
}
|
|
@@ -1739,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
/* Free TX Rings */
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
|
|
|
}
|
|
|
msleep(10);
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++)
|
|
|
- mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
|
|
|
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
|
|
|
|
|
|
/* Free RSS qps */
|
|
|
mlx4_en_release_rss_steer(priv);
|
|
@@ -1757,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
/* Free RX Rings */
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- struct mlx4_en_cq *cq = &priv->rx_cq[i];
|
|
|
+ struct mlx4_en_cq *cq = priv->rx_cq[i];
|
|
|
|
|
|
local_bh_disable();
|
|
|
while (!mlx4_en_cq_lock_napi(cq)) {
|
|
@@ -1768,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
|
|
|
msleep(1);
|
|
|
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
|
|
|
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
|
|
}
|
|
|
}
|
|
@@ -1806,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|
|
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- priv->tx_ring[i].bytes = 0;
|
|
|
- priv->tx_ring[i].packets = 0;
|
|
|
- priv->tx_ring[i].tx_csum = 0;
|
|
|
+ priv->tx_ring[i]->bytes = 0;
|
|
|
+ priv->tx_ring[i]->packets = 0;
|
|
|
+ priv->tx_ring[i]->tx_csum = 0;
|
|
|
}
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- priv->rx_ring[i].bytes = 0;
|
|
|
- priv->rx_ring[i].packets = 0;
|
|
|
- priv->rx_ring[i].csum_ok = 0;
|
|
|
- priv->rx_ring[i].csum_none = 0;
|
|
|
+ priv->rx_ring[i]->bytes = 0;
|
|
|
+ priv->rx_ring[i]->packets = 0;
|
|
|
+ priv->rx_ring[i]->csum_ok = 0;
|
|
|
+ priv->rx_ring[i]->csum_none = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1871,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|
|
#endif
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- if (priv->tx_ring[i].tx_info)
|
|
|
+ if (priv->tx_ring && priv->tx_ring[i])
|
|
|
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- if (priv->tx_cq[i].buf)
|
|
|
+ if (priv->tx_cq && priv->tx_cq[i])
|
|
|
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- if (priv->rx_ring[i].rx_info)
|
|
|
+ if (priv->rx_ring[i])
|
|
|
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
priv->prof->rx_ring_size, priv->stride);
|
|
|
- if (priv->rx_cq[i].buf)
|
|
|
+ if (priv->rx_cq[i])
|
|
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
}
|
|
|
|
|
@@ -1937,6 +1936,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|
|
|
|
|
err:
|
|
|
en_err(priv, "Failed to allocate NIC resources\n");
|
|
|
+ for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
+ if (priv->rx_ring[i])
|
|
|
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
+ prof->rx_ring_size,
|
|
|
+ priv->stride);
|
|
|
+ if (priv->rx_cq[i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
+ }
|
|
|
+ for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
+ if (priv->tx_ring[i])
|
|
|
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
+ if (priv->tx_cq[i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
+ }
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -2230,13 +2243,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
|
|
|
priv->tx_ring_num = prof->tx_ring_num;
|
|
|
|
|
|
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
|
|
|
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_ring) {
|
|
|
err = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
|
|
|
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_cq) {
|
|
|
err = -ENOMEM;
|