|
@@ -29,6 +29,7 @@
|
|
#include <asm/atomic.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
+#include <asm/shmparam.h>
|
|
|
|
|
|
|
|
|
|
/*** Page table manipulation functions ***/
|
|
/*** Page table manipulation functions ***/
|
|
@@ -1156,12 +1157,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
|
|
}
|
|
}
|
|
|
|
|
|
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
- unsigned long flags, unsigned long start, unsigned long end,
|
|
|
|
- int node, gfp_t gfp_mask, void *caller)
|
|
|
|
|
|
+ unsigned long align, unsigned long flags, unsigned long start,
|
|
|
|
+ unsigned long end, int node, gfp_t gfp_mask, void *caller)
|
|
{
|
|
{
|
|
static struct vmap_area *va;
|
|
static struct vmap_area *va;
|
|
struct vm_struct *area;
|
|
struct vm_struct *area;
|
|
- unsigned long align = 1;
|
|
|
|
|
|
|
|
BUG_ON(in_interrupt());
|
|
BUG_ON(in_interrupt());
|
|
if (flags & VM_IOREMAP) {
|
|
if (flags & VM_IOREMAP) {
|
|
@@ -1201,7 +1201,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
|
|
unsigned long start, unsigned long end)
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
|
|
|
|
|
|
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
|
__builtin_return_address(0));
|
|
__builtin_return_address(0));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(__get_vm_area);
|
|
EXPORT_SYMBOL_GPL(__get_vm_area);
|
|
@@ -1210,7 +1210,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
unsigned long start, unsigned long end,
|
|
unsigned long start, unsigned long end,
|
|
void *caller)
|
|
void *caller)
|
|
{
|
|
{
|
|
- return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
|
|
|
|
|
|
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
|
|
caller);
|
|
caller);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1225,22 +1225,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
*/
|
|
*/
|
|
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
|
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
|
|
{
|
|
{
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
|
|
|
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
-1, GFP_KERNEL, __builtin_return_address(0));
|
|
-1, GFP_KERNEL, __builtin_return_address(0));
|
|
}
|
|
}
|
|
|
|
|
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
void *caller)
|
|
void *caller)
|
|
{
|
|
{
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
|
|
|
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
-1, GFP_KERNEL, caller);
|
|
-1, GFP_KERNEL, caller);
|
|
}
|
|
}
|
|
|
|
|
|
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
|
|
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
|
|
int node, gfp_t gfp_mask)
|
|
int node, gfp_t gfp_mask)
|
|
{
|
|
{
|
|
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
|
|
|
|
- gfp_mask, __builtin_return_address(0));
|
|
|
|
|
|
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
|
|
+ node, gfp_mask, __builtin_return_address(0));
|
|
}
|
|
}
|
|
|
|
|
|
static struct vm_struct *find_vm_area(const void *addr)
|
|
static struct vm_struct *find_vm_area(const void *addr)
|
|
@@ -1403,7 +1403,8 @@ void *vmap(struct page **pages, unsigned int count,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vmap);
|
|
EXPORT_SYMBOL(vmap);
|
|
|
|
|
|
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
|
|
|
+static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
|
|
+ gfp_t gfp_mask, pgprot_t prot,
|
|
int node, void *caller);
|
|
int node, void *caller);
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
pgprot_t prot, int node, void *caller)
|
|
pgprot_t prot, int node, void *caller)
|
|
@@ -1417,7 +1418,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
|
area->nr_pages = nr_pages;
|
|
area->nr_pages = nr_pages;
|
|
/* Please note that the recursion is strictly bounded. */
|
|
/* Please note that the recursion is strictly bounded. */
|
|
if (array_size > PAGE_SIZE) {
|
|
if (array_size > PAGE_SIZE) {
|
|
- pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
|
|
|
|
|
|
+ pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
|
|
PAGE_KERNEL, node, caller);
|
|
PAGE_KERNEL, node, caller);
|
|
area->flags |= VM_VPAGES;
|
|
area->flags |= VM_VPAGES;
|
|
} else {
|
|
} else {
|
|
@@ -1476,6 +1477,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
|
|
/**
|
|
/**
|
|
* __vmalloc_node - allocate virtually contiguous memory
|
|
* __vmalloc_node - allocate virtually contiguous memory
|
|
* @size: allocation size
|
|
* @size: allocation size
|
|
|
|
+ * @align: desired alignment
|
|
* @gfp_mask: flags for the page level allocator
|
|
* @gfp_mask: flags for the page level allocator
|
|
* @prot: protection mask for the allocated pages
|
|
* @prot: protection mask for the allocated pages
|
|
* @node: node to use for allocation or -1
|
|
* @node: node to use for allocation or -1
|
|
@@ -1485,8 +1487,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
|
|
* allocator with @gfp_mask flags. Map them into contiguous
|
|
* allocator with @gfp_mask flags. Map them into contiguous
|
|
* kernel virtual space, using a pagetable protection of @prot.
|
|
* kernel virtual space, using a pagetable protection of @prot.
|
|
*/
|
|
*/
|
|
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
|
- int node, void *caller)
|
|
|
|
|
|
+static void *__vmalloc_node(unsigned long size, unsigned long align,
|
|
|
|
+ gfp_t gfp_mask, pgprot_t prot,
|
|
|
|
+ int node, void *caller)
|
|
{
|
|
{
|
|
struct vm_struct *area;
|
|
struct vm_struct *area;
|
|
void *addr;
|
|
void *addr;
|
|
@@ -1496,8 +1499,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
|
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
|
|
|
|
- node, gfp_mask, caller);
|
|
|
|
|
|
+ area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
|
|
|
|
+ VMALLOC_END, node, gfp_mask, caller);
|
|
|
|
|
|
if (!area)
|
|
if (!area)
|
|
return NULL;
|
|
return NULL;
|
|
@@ -1516,7 +1519,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
|
|
|
|
|
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
|
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
|
{
|
|
{
|
|
- return __vmalloc_node(size, gfp_mask, prot, -1,
|
|
|
|
|
|
+ return __vmalloc_node(size, 1, gfp_mask, prot, -1,
|
|
__builtin_return_address(0));
|
|
__builtin_return_address(0));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__vmalloc);
|
|
EXPORT_SYMBOL(__vmalloc);
|
|
@@ -1532,7 +1535,7 @@ EXPORT_SYMBOL(__vmalloc);
|
|
*/
|
|
*/
|
|
void *vmalloc(unsigned long size)
|
|
void *vmalloc(unsigned long size)
|
|
{
|
|
{
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
-1, __builtin_return_address(0));
|
|
-1, __builtin_return_address(0));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vmalloc);
|
|
EXPORT_SYMBOL(vmalloc);
|
|
@@ -1549,7 +1552,8 @@ void *vmalloc_user(unsigned long size)
|
|
struct vm_struct *area;
|
|
struct vm_struct *area;
|
|
void *ret;
|
|
void *ret;
|
|
|
|
|
|
- ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
|
|
|
|
|
+ ret = __vmalloc_node(size, SHMLBA,
|
|
|
|
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
|
|
PAGE_KERNEL, -1, __builtin_return_address(0));
|
|
PAGE_KERNEL, -1, __builtin_return_address(0));
|
|
if (ret) {
|
|
if (ret) {
|
|
area = find_vm_area(ret);
|
|
area = find_vm_area(ret);
|
|
@@ -1572,7 +1576,7 @@ EXPORT_SYMBOL(vmalloc_user);
|
|
*/
|
|
*/
|
|
void *vmalloc_node(unsigned long size, int node)
|
|
void *vmalloc_node(unsigned long size, int node)
|
|
{
|
|
{
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
|
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
|
|
node, __builtin_return_address(0));
|
|
node, __builtin_return_address(0));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_node);
|
|
EXPORT_SYMBOL(vmalloc_node);
|
|
@@ -1595,7 +1599,7 @@ EXPORT_SYMBOL(vmalloc_node);
|
|
|
|
|
|
void *vmalloc_exec(unsigned long size)
|
|
void *vmalloc_exec(unsigned long size)
|
|
{
|
|
{
|
|
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
|
|
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
-1, __builtin_return_address(0));
|
|
-1, __builtin_return_address(0));
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1616,7 +1620,7 @@ void *vmalloc_exec(unsigned long size)
|
|
*/
|
|
*/
|
|
void *vmalloc_32(unsigned long size)
|
|
void *vmalloc_32(unsigned long size)
|
|
{
|
|
{
|
|
- return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
|
|
|
|
|
|
+ return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
|
|
-1, __builtin_return_address(0));
|
|
-1, __builtin_return_address(0));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(vmalloc_32);
|
|
EXPORT_SYMBOL(vmalloc_32);
|
|
@@ -1633,7 +1637,7 @@ void *vmalloc_32_user(unsigned long size)
|
|
struct vm_struct *area;
|
|
struct vm_struct *area;
|
|
void *ret;
|
|
void *ret;
|
|
|
|
|
|
- ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
|
|
|
|
|
|
+ ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
|
|
-1, __builtin_return_address(0));
|
|
-1, __builtin_return_address(0));
|
|
if (ret) {
|
|
if (ret) {
|
|
area = find_vm_area(ret);
|
|
area = find_vm_area(ret);
|