|
@@ -447,114 +447,17 @@ out_unlock:
|
|
return do_split;
|
|
return do_split;
|
|
}
|
|
}
|
|
|
|
|
|
-static LIST_HEAD(page_pool);
|
|
|
|
-static unsigned long pool_size, pool_pages, pool_low;
|
|
|
|
-static unsigned long pool_used, pool_failed;
|
|
|
|
-
|
|
|
|
-static void cpa_fill_pool(struct page **ret)
|
|
|
|
-{
|
|
|
|
- gfp_t gfp = GFP_KERNEL;
|
|
|
|
- unsigned long flags;
|
|
|
|
- struct page *p;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Avoid recursion (on debug-pagealloc) and also signal
|
|
|
|
- * our priority to get to these pagetables:
|
|
|
|
- */
|
|
|
|
- if (current->flags & PF_MEMALLOC)
|
|
|
|
- return;
|
|
|
|
- current->flags |= PF_MEMALLOC;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Allocate atomically from atomic contexts:
|
|
|
|
- */
|
|
|
|
- if (in_atomic() || irqs_disabled() || debug_pagealloc)
|
|
|
|
- gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
|
|
|
|
-
|
|
|
|
- while (pool_pages < pool_size || (ret && !*ret)) {
|
|
|
|
- p = alloc_pages(gfp, 0);
|
|
|
|
- if (!p) {
|
|
|
|
- pool_failed++;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- /*
|
|
|
|
- * If the call site needs a page right now, provide it:
|
|
|
|
- */
|
|
|
|
- if (ret && !*ret) {
|
|
|
|
- *ret = p;
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
- list_add(&p->lru, &page_pool);
|
|
|
|
- pool_pages++;
|
|
|
|
- spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- current->flags &= ~PF_MEMALLOC;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#define SHIFT_MB (20 - PAGE_SHIFT)
|
|
|
|
-#define ROUND_MB_GB ((1 << 10) - 1)
|
|
|
|
-#define SHIFT_MB_GB 10
|
|
|
|
-#define POOL_PAGES_PER_GB 16
|
|
|
|
-
|
|
|
|
-void __init cpa_init(void)
|
|
|
|
-{
|
|
|
|
- struct sysinfo si;
|
|
|
|
- unsigned long gb;
|
|
|
|
-
|
|
|
|
- si_meminfo(&si);
|
|
|
|
- /*
|
|
|
|
- * Calculate the number of pool pages:
|
|
|
|
- *
|
|
|
|
- * Convert totalram (nr of pages) to MiB and round to the next
|
|
|
|
- * GiB. Shift MiB to Gib and multiply the result by
|
|
|
|
- * POOL_PAGES_PER_GB:
|
|
|
|
- */
|
|
|
|
- if (debug_pagealloc) {
|
|
|
|
- gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
|
|
|
|
- pool_size = POOL_PAGES_PER_GB * gb;
|
|
|
|
- } else {
|
|
|
|
- pool_size = 1;
|
|
|
|
- }
|
|
|
|
- pool_low = pool_size;
|
|
|
|
-
|
|
|
|
- cpa_fill_pool(NULL);
|
|
|
|
- printk(KERN_DEBUG
|
|
|
|
- "CPA: page pool initialized %lu of %lu pages preallocated\n",
|
|
|
|
- pool_pages, pool_size);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int split_large_page(pte_t *kpte, unsigned long address)
|
|
static int split_large_page(pte_t *kpte, unsigned long address)
|
|
{
|
|
{
|
|
unsigned long flags, pfn, pfninc = 1;
|
|
unsigned long flags, pfn, pfninc = 1;
|
|
unsigned int i, level;
|
|
unsigned int i, level;
|
|
pte_t *pbase, *tmp;
|
|
pte_t *pbase, *tmp;
|
|
pgprot_t ref_prot;
|
|
pgprot_t ref_prot;
|
|
- struct page *base;
|
|
|
|
|
|
+ struct page *base = alloc_pages(GFP_KERNEL, 0);
|
|
|
|
+ if (!base)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- /*
|
|
|
|
- * Get a page from the pool. The pool list is protected by the
|
|
|
|
- * pgd_lock, which we have to take anyway for the split
|
|
|
|
- * operation:
|
|
|
|
- */
|
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
- if (list_empty(&page_pool)) {
|
|
|
|
- spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
- base = NULL;
|
|
|
|
- cpa_fill_pool(&base);
|
|
|
|
- if (!base)
|
|
|
|
- return -ENOMEM;
|
|
|
|
- spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
- } else {
|
|
|
|
- base = list_first_entry(&page_pool, struct page, lru);
|
|
|
|
- list_del(&base->lru);
|
|
|
|
- pool_pages--;
|
|
|
|
-
|
|
|
|
- if (pool_pages < pool_low)
|
|
|
|
- pool_low = pool_pages;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Check for races, another CPU might have split this page
|
|
* Check for races, another CPU might have split this page
|
|
* up for us already:
|
|
* up for us already:
|
|
@@ -611,11 +514,8 @@ out_unlock:
|
|
* If we dropped out via the lookup_address check under
|
|
* If we dropped out via the lookup_address check under
|
|
* pgd_lock then stick the page back into the pool:
|
|
* pgd_lock then stick the page back into the pool:
|
|
*/
|
|
*/
|
|
- if (base) {
|
|
|
|
- list_add(&base->lru, &page_pool);
|
|
|
|
- pool_pages++;
|
|
|
|
- } else
|
|
|
|
- pool_used++;
|
|
|
|
|
|
+ if (base)
|
|
|
|
+ __free_page(base);
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -899,8 +799,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
|
|
cpa_flush_all(cache);
|
|
cpa_flush_all(cache);
|
|
|
|
|
|
out:
|
|
out:
|
|
- cpa_fill_pool(NULL);
|
|
|
|
-
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1178,53 +1076,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
|
* but that can deadlock->flush only current cpu:
|
|
* but that can deadlock->flush only current cpu:
|
|
*/
|
|
*/
|
|
__flush_tlb_all();
|
|
__flush_tlb_all();
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Try to refill the page pool here. We can do this only after
|
|
|
|
- * the tlb flush.
|
|
|
|
- */
|
|
|
|
- cpa_fill_pool(NULL);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_DEBUG_FS
|
|
|
|
-static int dpa_show(struct seq_file *m, void *v)
|
|
|
|
-{
|
|
|
|
- seq_puts(m, "DEBUG_PAGEALLOC\n");
|
|
|
|
- seq_printf(m, "pool_size : %lu\n", pool_size);
|
|
|
|
- seq_printf(m, "pool_pages : %lu\n", pool_pages);
|
|
|
|
- seq_printf(m, "pool_low : %lu\n", pool_low);
|
|
|
|
- seq_printf(m, "pool_used : %lu\n", pool_used);
|
|
|
|
- seq_printf(m, "pool_failed : %lu\n", pool_failed);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int dpa_open(struct inode *inode, struct file *filp)
|
|
|
|
-{
|
|
|
|
- return single_open(filp, dpa_show, NULL);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-static const struct file_operations dpa_fops = {
|
|
|
|
- .open = dpa_open,
|
|
|
|
- .read = seq_read,
|
|
|
|
- .llseek = seq_lseek,
|
|
|
|
- .release = single_release,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int __init debug_pagealloc_proc_init(void)
|
|
|
|
-{
|
|
|
|
- struct dentry *de;
|
|
|
|
-
|
|
|
|
- de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
|
|
|
|
- &dpa_fops);
|
|
|
|
- if (!de)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-__initcall(debug_pagealloc_proc_init);
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
#ifdef CONFIG_HIBERNATION
|
|
|
|
|
|
bool kernel_page_present(struct page *page)
|
|
bool kernel_page_present(struct page *page)
|