|
@@ -93,13 +93,17 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
|
|
kfree(icm);
|
|
|
}
|
|
|
|
|
|
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
|
|
|
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
|
|
|
+ gfp_t gfp_mask, int node)
|
|
|
{
|
|
|
struct page *page;
|
|
|
|
|
|
- page = alloc_pages(gfp_mask, order);
|
|
|
- if (!page)
|
|
|
- return -ENOMEM;
|
|
|
+ page = alloc_pages_node(node, gfp_mask, order);
|
|
|
+ if (!page) {
|
|
|
+ page = alloc_pages(gfp_mask, order);
|
|
|
+ if (!page)
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
sg_set_page(mem, page, PAGE_SIZE << order, 0);
|
|
|
return 0;
|
|
@@ -130,9 +134,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
/* We use sg_set_buf for coherent allocs, which assumes low memory */
|
|
|
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
|
|
|
|
|
|
- icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
|
|
- if (!icm)
|
|
|
- return NULL;
|
|
|
+ icm = kmalloc_node(sizeof(*icm),
|
|
|
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
|
|
+ dev->numa_node);
|
|
|
+ if (!icm) {
|
|
|
+ icm = kmalloc(sizeof(*icm),
|
|
|
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
|
|
+ if (!icm)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
|
|
|
icm->refcount = 0;
|
|
|
INIT_LIST_HEAD(&icm->chunk_list);
|
|
@@ -141,10 +151,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
|
|
|
while (npages > 0) {
|
|
|
if (!chunk) {
|
|
|
- chunk = kmalloc(sizeof *chunk,
|
|
|
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
|
|
- if (!chunk)
|
|
|
- goto fail;
|
|
|
+ chunk = kmalloc_node(sizeof(*chunk),
|
|
|
+ gfp_mask & ~(__GFP_HIGHMEM |
|
|
|
+ __GFP_NOWARN),
|
|
|
+ dev->numa_node);
|
|
|
+ if (!chunk) {
|
|
|
+ chunk = kmalloc(sizeof(*chunk),
|
|
|
+ gfp_mask & ~(__GFP_HIGHMEM |
|
|
|
+ __GFP_NOWARN));
|
|
|
+ if (!chunk)
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
|
|
|
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
|
|
chunk->npages = 0;
|
|
@@ -161,7 +178,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|
|
cur_order, gfp_mask);
|
|
|
else
|
|
|
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
|
|
|
- cur_order, gfp_mask);
|
|
|
+ cur_order, gfp_mask,
|
|
|
+ dev->numa_node);
|
|
|
|
|
|
if (ret) {
|
|
|
if (--cur_order < 0)
|