|
@@ -55,17 +55,20 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
|
|
|
|
|
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
|
|
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
|
|
|
- u16 stride)
|
|
|
+ u16 stride, int node)
|
|
|
{
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct mlx4_en_tx_ring *ring;
|
|
|
int tmp;
|
|
|
int err;
|
|
|
|
|
|
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
|
|
|
if (!ring) {
|
|
|
- en_err(priv, "Failed allocating TX ring\n");
|
|
|
- return -ENOMEM;
|
|
|
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
+ if (!ring) {
|
|
|
+ en_err(priv, "Failed allocating TX ring\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ring->size = size;
|
|
@@ -75,24 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
|
|
inline_thold = min(inline_thold, MAX_INLINE);
|
|
|
|
|
|
tmp = size * sizeof(struct mlx4_en_tx_info);
|
|
|
- ring->tx_info = vmalloc(tmp);
|
|
|
+ ring->tx_info = vmalloc_node(tmp, node);
|
|
|
if (!ring->tx_info) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto err_ring;
|
|
|
+ ring->tx_info = vmalloc(tmp);
|
|
|
+ if (!ring->tx_info) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_ring;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
|
|
|
ring->tx_info, tmp);
|
|
|
|
|
|
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
|
|
|
+ ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
|
|
|
if (!ring->bounce_buf) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto err_info;
|
|
|
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
|
|
|
+ if (!ring->bounce_buf) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_info;
|
|
|
+ }
|
|
|
}
|
|
|
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
|
|
|
|
|
|
+ /* Allocate HW buffers on provided NUMA node */
|
|
|
+ set_dev_node(&mdev->dev->pdev->dev, node);
|
|
|
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
|
|
|
2 * PAGE_SIZE);
|
|
|
+ set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
|
|
|
if (err) {
|
|
|
en_err(priv, "Failed allocating hwq resources\n");
|
|
|
goto err_bounce;
|
|
@@ -118,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
|
|
}
|
|
|
ring->qp.event = mlx4_en_sqp_event;
|
|
|
|
|
|
- err = mlx4_bf_alloc(mdev->dev, &ring->bf);
|
|
|
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
|
|
|
if (err) {
|
|
|
en_dbg(DRV, priv, "working without blueflame (%d)", err);
|
|
|
ring->bf.uar = &mdev->priv_uar;
|