|
@@ -28,6 +28,7 @@
|
|
|
#include <asm/atomic.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
+#include <asm/shmparam.h>
|
|
|
|
|
|
|
|
|
/*** Page table manipulation functions ***/
|
|
@@ -1155,12 +1156,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
|
|
}
|
|
|
|
|
|
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
|
- unsigned long flags, unsigned long start, unsigned long end,
|
|
|
- int node, gfp_t gfp_mask, void *caller)
|
|
|
+ unsigned long align, unsigned long flags, unsigned long start,
|
|
|
+ unsigned long end, int node, gfp_t gfp_mask, void *caller)
|
|
|
{
|
|
|
static struct vmap_area *va;
|
|
|
struct vm_struct *area;
|
|
|
- unsigned long align = 1;
|
|
|
|
|
|
BUG_ON(in_interrupt());
|
|
|
if (flags & VM_IOREMAP) {
|
|
@@ -1200,7 +1200,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
|
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|
|
unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
|
|
|
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
|
|
__builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__get_vm_area);
|
|
@@ -1209,7 +1209,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
|
unsigned long start, unsigned long end,
|
|
|
void *caller)
|
|
|
{
|
|
|
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
|
|
|
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
|
|
caller);
|
|
|
}
|
|
|
|
|
@@ -1224,22 +1224,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
|
*/
|
|
|
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
|
|
{
|
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
|
-1, GFP_KERNEL, __builtin_return_address(0));
|
|
|
}
|
|
|
|
|
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
|
void *caller)
|
|
|
{
|
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
|
-1, GFP_KERNEL, caller);
|
|
|
}
|
|
|
|
|
|
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
|
|
|
int node, gfp_t gfp_mask)
|
|
|
{
|
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
|
|
|
- gfp_mask, __builtin_return_address(0));
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
|
+ node, gfp_mask, __builtin_return_address(0));
|
|
|
}
|
|
|
|
|
|
static struct vm_struct *find_vm_area(const void *addr)
|
|
@@ -1402,7 +1402,8 @@ void *vmap(struct page **pages, unsigned int count,
|
|
|
}
|
|
|
EXPORT_SYMBOL(vmap);
|
|
|
|
|
|
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
+static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
|
+ gfp_t gfp_mask, pgprot_t prot,
|
|
|
int node, void *caller);
|
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
|
pgprot_t prot, int node, void *caller)
|
|
@@ -1416,7 +1417,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
|
area->nr_pages = nr_pages;
|
|
|
/* Please note that the recursion is strictly bounded. */
|
|
|
if (array_size > PAGE_SIZE) {
|
|
|
- pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
|
|
|
+ pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
|
|
|
PAGE_KERNEL, node, caller);
|
|
|
area->flags |= VM_VPAGES;
|
|
|
} else {
|
|
@@ -1475,6 +1476,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
|
|
|
/**
|
|
|
* __vmalloc_node - allocate virtually contiguous memory
|
|
|
* @size: allocation size
|
|
|
+ * @align: desired alignment
|
|
|
* @gfp_mask: flags for the page level allocator
|
|
|
* @prot: protection mask for the allocated pages
|
|
|
* @node: node to use for allocation or -1
|
|
@@ -1484,8 +1486,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
|
|
|
* allocator with @gfp_mask flags. Map them into contiguous
|
|
|
* kernel virtual space, using a pagetable protection of @prot.
|
|
|
*/
|
|
|
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
- int node, void *caller)
|
|
|
+static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
|
+ gfp_t gfp_mask, pgprot_t prot,
|
|
|
+ int node, void *caller)
|
|
|
{
|
|
|
struct vm_struct *area;
|
|
|
void *addr;
|
|
@@ -1495,8 +1498,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
|
|
return NULL;
|
|
|
|
|
|
- area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
|
|
|
- node, gfp_mask, caller);
|
|
|
+ area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
|
|
|
+ VMALLOC_END, node, gfp_mask, caller);
|
|
|
|
|
|
if (!area)
|
|
|
return NULL;
|
|
@@ -1515,7 +1518,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
|
|
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
|
|
{
|
|
|
- return __vmalloc_node(size, gfp_mask, prot, -1,
|
|
|
+ return __vmalloc_node(size, 1, gfp_mask, prot, -1,
|
|
|
__builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL(__vmalloc);
|
|
@@ -1531,7 +1534,7 @@ EXPORT_SYMBOL(__vmalloc);
|
|
|
*/
|
|
|
void *vmalloc(unsigned long size)
|
|
|
{
|
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
-1, __builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL(vmalloc);
|
|
@@ -1548,7 +1551,8 @@ void *vmalloc_user(unsigned long size)
|
|
|
struct vm_struct *area;
|
|
|
void *ret;
|
|
|
|
|
|
- ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
|
|
+ ret = __vmalloc_node(size, SHMLBA,
|
|
|
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
|
|
PAGE_KERNEL, -1, __builtin_return_address(0));
|
|
|
if (ret) {
|
|
|
area = find_vm_area(ret);
|
|
@@ -1571,7 +1575,7 @@ EXPORT_SYMBOL(vmalloc_user);
|
|
|
*/
|
|
|
void *vmalloc_node(unsigned long size, int node)
|
|
|
{
|
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
node, __builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL(vmalloc_node);
|
|
@@ -1594,7 +1598,7 @@ EXPORT_SYMBOL(vmalloc_node);
|
|
|
|
|
|
void *vmalloc_exec(unsigned long size)
|
|
|
{
|
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
|
-1, __builtin_return_address(0));
|
|
|
}
|
|
|
|
|
@@ -1615,7 +1619,7 @@ void *vmalloc_exec(unsigned long size)
|
|
|
*/
|
|
|
void *vmalloc_32(unsigned long size)
|
|
|
{
|
|
|
- return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
|
|
|
+ return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
|
|
|
-1, __builtin_return_address(0));
|
|
|
}
|
|
|
EXPORT_SYMBOL(vmalloc_32);
|
|
@@ -1632,7 +1636,7 @@ void *vmalloc_32_user(unsigned long size)
|
|
|
struct vm_struct *area;
|
|
|
void *ret;
|
|
|
|
|
|
- ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
|
|
|
+ ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
|
|
|
-1, __builtin_return_address(0));
|
|
|
if (ret) {
|
|
|
area = find_vm_area(ret);
|