|
@@ -1534,6 +1534,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
pgprot_t prot, int node, void *caller)
|
|
pgprot_t prot, int node, void *caller)
|
|
{
|
|
{
|
|
|
|
+ const int order = 0;
|
|
struct page **pages;
|
|
struct page **pages;
|
|
unsigned int nr_pages, array_size, i;
|
|
unsigned int nr_pages, array_size, i;
|
|
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
|
|
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
|
|
@@ -1560,11 +1561,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
|
|
|
|
for (i = 0; i < area->nr_pages; i++) {
|
|
for (i = 0; i < area->nr_pages; i++) {
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
+ gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
|
|
|
|
|
|
if (node < 0)
|
|
if (node < 0)
|
|
- page = alloc_page(gfp_mask);
|
|
|
|
|
|
+ page = alloc_page(tmp_mask);
|
|
else
|
|
else
|
|
- page = alloc_pages_node(node, gfp_mask, 0);
|
|
|
|
|
|
+ page = alloc_pages_node(node, tmp_mask, order);
|
|
|
|
|
|
if (unlikely(!page)) {
|
|
if (unlikely(!page)) {
|
|
/* Successfully allocated i pages, free them in __vunmap() */
|
|
/* Successfully allocated i pages, free them in __vunmap() */
|
|
@@ -1579,6 +1581,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
return area->addr;
|
|
return area->addr;
|
|
|
|
|
|
fail:
|
|
fail:
|
|
|
|
+ warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, "
|
|
|
|
+ "allocated %ld of %ld bytes\n",
|
|
|
|
+ (area->nr_pages*PAGE_SIZE), area->size);
|
|
vfree(area->addr);
|
|
vfree(area->addr);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|