|
@@ -1,6 +1,12 @@
|
|
#ifndef __ASM_SH_PGALLOC_H
|
|
#ifndef __ASM_SH_PGALLOC_H
|
|
#define __ASM_SH_PGALLOC_H
|
|
#define __ASM_SH_PGALLOC_H
|
|
|
|
|
|
|
|
+#include <linux/quicklist.h>
|
|
|
|
+#include <asm/page.h>
|
|
|
|
+
|
|
|
|
+#define QUICK_PGD 0 /* We preserve special mappings over free */
|
|
|
|
+#define QUICK_PT 1 /* Other page table pages that are zero on free */
|
|
|
|
+
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
pte_t *pte)
|
|
pte_t *pte)
|
|
{
|
|
{
|
|
@@ -13,48 +19,49 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
|
|
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void pgd_ctor(void *x)
|
|
|
|
+{
|
|
|
|
+ pgd_t *pgd = x;
|
|
|
|
+
|
|
|
|
+ memcpy(pgd + USER_PTRS_PER_PGD,
|
|
|
|
+ swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
|
|
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Allocate and free page tables.
|
|
* Allocate and free page tables.
|
|
*/
|
|
*/
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
{
|
|
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
|
|
|
|
-
|
|
|
|
- if (pgd) {
|
|
|
|
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
|
- memcpy(pgd + USER_PTRS_PER_PGD,
|
|
|
|
- swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
|
|
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return pgd;
|
|
|
|
|
|
+ return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void pgd_free(pgd_t *pgd)
|
|
static inline void pgd_free(pgd_t *pgd)
|
|
{
|
|
{
|
|
- free_page((unsigned long)pgd);
|
|
|
|
|
|
+ quicklist_free(QUICK_PGD, NULL, pgd);
|
|
}
|
|
}
|
|
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
unsigned long address)
|
|
unsigned long address)
|
|
{
|
|
{
|
|
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
|
|
|
|
|
+ return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
unsigned long address)
|
|
unsigned long address)
|
|
{
|
|
{
|
|
- return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
|
|
|
|
|
+ void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
|
|
|
+ return pg ? virt_to_page(pg) : NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static inline void pte_free_kernel(pte_t *pte)
|
|
static inline void pte_free_kernel(pte_t *pte)
|
|
{
|
|
{
|
|
- free_page((unsigned long)pte);
|
|
|
|
|
|
+ quicklist_free(QUICK_PT, NULL, pte);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void pte_free(struct page *pte)
|
|
static inline void pte_free(struct page *pte)
|
|
{
|
|
{
|
|
- __free_page(pte);
|
|
|
|
|
|
+ quicklist_free_page(QUICK_PT, NULL, pte);
|
|
}
|
|
}
|
|
|
|
|
|
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
|
|
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
|
|
@@ -66,6 +73,11 @@ static inline void pte_free(struct page *pte)
|
|
|
|
|
|
#define pmd_free(x) do { } while (0)
|
|
#define pmd_free(x) do { } while (0)
|
|
#define __pmd_free_tlb(tlb,x) do { } while (0)
|
|
#define __pmd_free_tlb(tlb,x) do { } while (0)
|
|
-#define check_pgt_cache() do { } while (0)
|
|
|
|
|
|
+
|
|
|
|
+static inline void check_pgt_cache(void)
|
|
|
|
+{
|
|
|
|
+ quicklist_trim(QUICK_PGD, NULL, 25, 16);
|
|
|
|
+ quicklist_trim(QUICK_PT, NULL, 25, 16);
|
|
|
|
+}
|
|
|
|
|
|
#endif /* __ASM_SH_PGALLOC_H */
|
|
#endif /* __ASM_SH_PGALLOC_H */
|