|
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
|
|
|
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
|
|
|
struct net_device *dev = cq->dev;
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
- struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring];
|
|
|
+ struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
|
|
|
int done;
|
|
|
|
|
|
if (!priv->port_up)
|
|
@@ -102,6 +102,7 @@ struct mlx4_en_filter {
|
|
|
struct list_head next;
|
|
|
struct work_struct work;
|
|
|
|
|
|
+ u8 ip_proto;
|
|
|
__be32 src_ip;
|
|
|
__be32 dst_ip;
|
|
|
__be16 src_port;
|
|
@@ -120,14 +121,26 @@ struct mlx4_en_filter {
|
|
|
|
|
|
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
|
|
|
|
|
|
+static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
|
|
|
+{
|
|
|
+ switch (ip_proto) {
|
|
|
+ case IPPROTO_UDP:
|
|
|
+ return MLX4_NET_TRANS_RULE_ID_UDP;
|
|
|
+ case IPPROTO_TCP:
|
|
|
+ return MLX4_NET_TRANS_RULE_ID_TCP;
|
|
|
+ default:
|
|
|
+ return -EPROTONOSUPPORT;
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
static void mlx4_en_filter_work(struct work_struct *work)
|
|
|
{
|
|
|
struct mlx4_en_filter *filter = container_of(work,
|
|
|
struct mlx4_en_filter,
|
|
|
work);
|
|
|
struct mlx4_en_priv *priv = filter->priv;
|
|
|
- struct mlx4_spec_list spec_tcp = {
|
|
|
- .id = MLX4_NET_TRANS_RULE_ID_TCP,
|
|
|
+ struct mlx4_spec_list spec_tcp_udp = {
|
|
|
+ .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
|
|
|
{
|
|
|
.tcp_udp = {
|
|
|
.dst_port = filter->dst_port,
|
|
@@ -163,9 +176,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
|
|
|
int rc;
|
|
|
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
|
|
|
+ if (spec_tcp_udp.id < 0) {
|
|
|
+ en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
|
|
|
+ filter->ip_proto);
|
|
|
+ goto ignore;
|
|
|
+ }
|
|
|
list_add_tail(&spec_eth.list, &rule.list);
|
|
|
list_add_tail(&spec_ip.list, &rule.list);
|
|
|
- list_add_tail(&spec_tcp.list, &rule.list);
|
|
|
+ list_add_tail(&spec_tcp_udp.list, &rule.list);
|
|
|
|
|
|
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
|
|
|
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
|
|
@@ -183,6 +201,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
|
|
|
if (rc)
|
|
|
en_err(priv, "Error attaching flow. err = %d\n", rc);
|
|
|
|
|
|
+ignore:
|
|
|
mlx4_en_filter_rfs_expire(priv);
|
|
|
|
|
|
filter->activated = 1;
|
|
@@ -206,8 +225,8 @@ filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
|
|
|
|
|
|
static struct mlx4_en_filter *
|
|
|
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
|
|
|
- __be32 dst_ip, __be16 src_port, __be16 dst_port,
|
|
|
- u32 flow_id)
|
|
|
+ __be32 dst_ip, u8 ip_proto, __be16 src_port,
|
|
|
+ __be16 dst_port, u32 flow_id)
|
|
|
{
|
|
|
struct mlx4_en_filter *filter = NULL;
|
|
|
|
|
@@ -221,6 +240,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
|
|
|
|
|
|
filter->src_ip = src_ip;
|
|
|
filter->dst_ip = dst_ip;
|
|
|
+ filter->ip_proto = ip_proto;
|
|
|
filter->src_port = src_port;
|
|
|
filter->dst_port = dst_port;
|
|
|
|
|
@@ -252,7 +272,7 @@ static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
|
|
|
|
|
|
static inline struct mlx4_en_filter *
|
|
|
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
|
|
|
- __be16 src_port, __be16 dst_port)
|
|
|
+ u8 ip_proto, __be16 src_port, __be16 dst_port)
|
|
|
{
|
|
|
struct mlx4_en_filter *filter;
|
|
|
struct mlx4_en_filter *ret = NULL;
|
|
@@ -263,6 +283,7 @@ mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
|
|
|
filter_chain) {
|
|
|
if (filter->src_ip == src_ip &&
|
|
|
filter->dst_ip == dst_ip &&
|
|
|
+ filter->ip_proto == ip_proto &&
|
|
|
filter->src_port == src_port &&
|
|
|
filter->dst_port == dst_port) {
|
|
|
ret = filter;
|
|
@@ -281,6 +302,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
|
struct mlx4_en_filter *filter;
|
|
|
const struct iphdr *ip;
|
|
|
const __be16 *ports;
|
|
|
+ u8 ip_proto;
|
|
|
__be32 src_ip;
|
|
|
__be32 dst_ip;
|
|
|
__be16 src_port;
|
|
@@ -295,18 +317,19 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
|
if (ip_is_fragment(ip))
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
|
+ if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
|
|
|
+ return -EPROTONOSUPPORT;
|
|
|
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
|
|
|
|
|
+ ip_proto = ip->protocol;
|
|
|
src_ip = ip->saddr;
|
|
|
dst_ip = ip->daddr;
|
|
|
src_port = ports[0];
|
|
|
dst_port = ports[1];
|
|
|
|
|
|
- if (ip->protocol != IPPROTO_TCP)
|
|
|
- return -EPROTONOSUPPORT;
|
|
|
-
|
|
|
spin_lock_bh(&priv->filters_lock);
|
|
|
- filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
|
|
|
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
|
|
|
+ src_port, dst_port);
|
|
|
if (filter) {
|
|
|
if (filter->rxq_index == rxq_index)
|
|
|
goto out;
|
|
@@ -314,7 +337,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
|
filter->rxq_index = rxq_index;
|
|
|
} else {
|
|
|
filter = mlx4_en_filter_alloc(priv, rxq_index,
|
|
|
- src_ip, dst_ip,
|
|
|
+ src_ip, dst_ip, ip_proto,
|
|
|
src_port, dst_port, flow_id);
|
|
|
if (!filter) {
|
|
|
ret = -ENOMEM;
|
|
@@ -332,8 +355,7 @@ err:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
|
|
|
- struct mlx4_en_rx_ring *rx_ring)
|
|
|
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
|
|
|
{
|
|
|
struct mlx4_en_filter *filter, *tmp;
|
|
|
LIST_HEAD(del_list);
|
|
@@ -1219,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
napi_synchronize(&cq->napi);
|
|
|
mlx4_en_process_rx_cq(dev, cq, 0);
|
|
@@ -1241,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
|
|
|
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
|
|
continue;
|
|
|
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
|
|
|
- i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn,
|
|
|
- priv->tx_ring[i].cons, priv->tx_ring[i].prod);
|
|
|
+ i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
|
|
|
+ priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
|
|
|
}
|
|
|
|
|
|
priv->port_stats.tx_timeout++;
|
|
@@ -1282,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
/* Setup cq moderation params */
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
cq->moder_cnt = priv->rx_frames;
|
|
|
cq->moder_time = priv->rx_usecs;
|
|
|
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
|
|
@@ -1291,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- cq = &priv->tx_cq[i];
|
|
|
+ cq = priv->tx_cq[i];
|
|
|
cq->moder_cnt = priv->tx_frames;
|
|
|
cq->moder_time = priv->tx_usecs;
|
|
|
}
|
|
@@ -1325,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
|
|
spin_lock_bh(&priv->stats_lock);
|
|
|
- rx_packets = priv->rx_ring[ring].packets;
|
|
|
- rx_bytes = priv->rx_ring[ring].bytes;
|
|
|
+ rx_packets = priv->rx_ring[ring]->packets;
|
|
|
+ rx_bytes = priv->rx_ring[ring]->bytes;
|
|
|
spin_unlock_bh(&priv->stats_lock);
|
|
|
|
|
|
rx_pkt_diff = ((unsigned long) (rx_packets -
|
|
@@ -1355,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|
|
|
|
|
if (moder_time != priv->last_moder_time[ring]) {
|
|
|
priv->last_moder_time[ring] = moder_time;
|
|
|
- cq = &priv->rx_cq[ring];
|
|
|
+ cq = priv->rx_cq[ring];
|
|
|
cq->moder_time = moder_time;
|
|
|
cq->moder_cnt = priv->rx_frames;
|
|
|
err = mlx4_en_set_cq_moder(priv, cq);
|
|
@@ -1478,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
return err;
|
|
|
}
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- cq = &priv->rx_cq[i];
|
|
|
+ cq = priv->rx_cq[i];
|
|
|
|
|
|
mlx4_en_cq_init_lock(cq);
|
|
|
|
|
@@ -1496,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
goto cq_err;
|
|
|
}
|
|
|
mlx4_en_arm_cq(priv, cq);
|
|
|
- priv->rx_ring[i].cqn = cq->mcq.cqn;
|
|
|
+ priv->rx_ring[i]->cqn = cq->mcq.cqn;
|
|
|
++rx_index;
|
|
|
}
|
|
|
|
|
@@ -1522,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
/* Configure tx cq's and rings */
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
/* Configure cq */
|
|
|
- cq = &priv->tx_cq[i];
|
|
|
+ cq = priv->tx_cq[i];
|
|
|
err = mlx4_en_activate_cq(priv, cq, i);
|
|
|
if (err) {
|
|
|
en_err(priv, "Failed allocating Tx CQ\n");
|
|
@@ -1538,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
cq->buf->wqe_index = cpu_to_be16(0xffff);
|
|
|
|
|
|
/* Configure ring */
|
|
|
- tx_ring = &priv->tx_ring[i];
|
|
|
+ tx_ring = priv->tx_ring[i];
|
|
|
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
|
|
|
i / priv->num_tx_rings_p_up);
|
|
|
if (err) {
|
|
@@ -1608,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
|
|
|
tx_err:
|
|
|
while (tx_index--) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
|
|
|
}
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
rss_err:
|
|
@@ -1618,9 +1640,9 @@ mac_err:
|
|
|
mlx4_en_put_qp(priv);
|
|
|
cq_err:
|
|
|
while (rx_index--)
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
|
|
|
for (i = 0; i < priv->rx_ring_num; i++)
|
|
|
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
|
|
|
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
|
|
|
|
|
return err; /* need to close devices */
|
|
|
}
|
|
@@ -1716,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
/* Free TX Rings */
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
|
|
|
}
|
|
|
msleep(10);
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++)
|
|
|
- mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
|
|
|
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
|
|
|
|
|
|
/* Free RSS qps */
|
|
|
mlx4_en_release_rss_steer(priv);
|
|
@@ -1734,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
/* Free RX Rings */
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- struct mlx4_en_cq *cq = &priv->rx_cq[i];
|
|
|
+ struct mlx4_en_cq *cq = priv->rx_cq[i];
|
|
|
|
|
|
local_bh_disable();
|
|
|
while (!mlx4_en_cq_lock_napi(cq)) {
|
|
@@ -1745,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
|
|
|
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
|
|
|
msleep(1);
|
|
|
- mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
|
|
|
+ mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
|
|
|
mlx4_en_deactivate_cq(priv, cq);
|
|
|
}
|
|
|
}
|
|
@@ -1783,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|
|
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- priv->tx_ring[i].bytes = 0;
|
|
|
- priv->tx_ring[i].packets = 0;
|
|
|
- priv->tx_ring[i].tx_csum = 0;
|
|
|
+ priv->tx_ring[i]->bytes = 0;
|
|
|
+ priv->tx_ring[i]->packets = 0;
|
|
|
+ priv->tx_ring[i]->tx_csum = 0;
|
|
|
}
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- priv->rx_ring[i].bytes = 0;
|
|
|
- priv->rx_ring[i].packets = 0;
|
|
|
- priv->rx_ring[i].csum_ok = 0;
|
|
|
- priv->rx_ring[i].csum_none = 0;
|
|
|
+ priv->rx_ring[i]->bytes = 0;
|
|
|
+ priv->rx_ring[i]->packets = 0;
|
|
|
+ priv->rx_ring[i]->csum_ok = 0;
|
|
|
+ priv->rx_ring[i]->csum_none = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1848,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|
|
#endif
|
|
|
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- if (priv->tx_ring[i].tx_info)
|
|
|
+ if (priv->tx_ring && priv->tx_ring[i])
|
|
|
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- if (priv->tx_cq[i].buf)
|
|
|
+ if (priv->tx_cq && priv->tx_cq[i])
|
|
|
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
- if (priv->rx_ring[i].rx_info)
|
|
|
+ if (priv->rx_ring[i])
|
|
|
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
priv->prof->rx_ring_size, priv->stride);
|
|
|
- if (priv->rx_cq[i].buf)
|
|
|
+ if (priv->rx_cq[i])
|
|
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
}
|
|
|
|
|
@@ -1873,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|
|
struct mlx4_en_port_profile *prof = priv->prof;
|
|
|
int i;
|
|
|
int err;
|
|
|
+ int node;
|
|
|
|
|
|
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
|
|
|
if (err) {
|
|
@@ -1882,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|
|
|
|
|
/* Create tx Rings */
|
|
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
+ node = cpu_to_node(i % num_online_cpus());
|
|
|
if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
|
|
|
- prof->tx_ring_size, i, TX))
|
|
|
+ prof->tx_ring_size, i, TX, node))
|
|
|
goto err;
|
|
|
|
|
|
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
|
|
|
- prof->tx_ring_size, TXBB_SIZE))
|
|
|
+ prof->tx_ring_size, TXBB_SIZE, node))
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
/* Create rx Rings */
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
+ node = cpu_to_node(i % num_online_cpus());
|
|
|
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
|
|
|
- prof->rx_ring_size, i, RX))
|
|
|
+ prof->rx_ring_size, i, RX, node))
|
|
|
goto err;
|
|
|
|
|
|
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
|
|
|
- prof->rx_ring_size, priv->stride))
|
|
|
+ prof->rx_ring_size, priv->stride,
|
|
|
+ node))
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
@@ -1914,6 +1940,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|
|
|
|
|
err:
|
|
|
en_err(priv, "Failed to allocate NIC resources\n");
|
|
|
+ for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
+ if (priv->rx_ring[i])
|
|
|
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
|
|
|
+ prof->rx_ring_size,
|
|
|
+ priv->stride);
|
|
|
+ if (priv->rx_cq[i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
+ }
|
|
|
+ for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
+ if (priv->tx_ring[i])
|
|
|
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
+ if (priv->tx_cq[i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
+ }
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -2207,13 +2247,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
|
|
|
priv->tx_ring_num = prof->tx_ring_num;
|
|
|
|
|
|
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
|
|
|
+ priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_ring) {
|
|
|
err = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
|
|
|
+ priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_cq) {
|
|
|
err = -ENOMEM;
|