|
@@ -27,12 +27,19 @@ struct memory_segment {
|
|
|
|
|
|
static LIST_HEAD(mem_segs);
|
|
static LIST_HEAD(mem_segs);
|
|
|
|
|
|
-static pud_t *vmem_pud_alloc(void)
|
|
|
|
|
|
+static void __ref *vmem_alloc_pages(unsigned int order)
|
|
|
|
+{
|
|
|
|
+ if (slab_is_available())
|
|
|
|
+ return (void *)__get_free_pages(GFP_KERNEL, order);
|
|
|
|
+ return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline pud_t *vmem_pud_alloc(void)
|
|
{
|
|
{
|
|
pud_t *pud = NULL;
|
|
pud_t *pud = NULL;
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
- pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
|
|
|
|
|
|
+ pud = vmem_alloc_pages(2);
|
|
if (!pud)
|
|
if (!pud)
|
|
return NULL;
|
|
return NULL;
|
|
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
|
|
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
|
|
@@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void)
|
|
return pud;
|
|
return pud;
|
|
}
|
|
}
|
|
|
|
|
|
-static pmd_t *vmem_pmd_alloc(void)
|
|
|
|
|
|
+static inline pmd_t *vmem_pmd_alloc(void)
|
|
{
|
|
{
|
|
pmd_t *pmd = NULL;
|
|
pmd_t *pmd = NULL;
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
#ifdef CONFIG_64BIT
|
|
- pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
|
|
|
|
|
|
+ pmd = vmem_alloc_pages(2);
|
|
if (!pmd)
|
|
if (!pmd)
|
|
return NULL;
|
|
return NULL;
|
|
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
|
|
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
|
|
@@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
|
if (pte_none(*pt_dir)) {
|
|
if (pte_none(*pt_dir)) {
|
|
unsigned long new_page;
|
|
unsigned long new_page;
|
|
|
|
|
|
- new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
|
|
|
|
|
|
+ new_page =__pa(vmem_alloc_pages(0));
|
|
if (!new_page)
|
|
if (!new_page)
|
|
goto out;
|
|
goto out;
|
|
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
|
|
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
|
|
*pt_dir = pte;
|
|
*pt_dir = pte;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ memset(start, 0, nr * sizeof(struct page));
|
|
ret = 0;
|
|
ret = 0;
|
|
out:
|
|
out:
|
|
flush_tlb_kernel_range(start_addr, end_addr);
|
|
flush_tlb_kernel_range(start_addr, end_addr);
|