|
@@ -14,13 +14,9 @@
|
|
*
|
|
*
|
|
*/
|
|
*/
|
|
|
|
|
|
-#include <linux/threads.h>
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
-
|
|
|
|
-#define pgd_quicklist (current_cpu_data.pgd_quick)
|
|
|
|
-#define pmd_quicklist (current_cpu_data.pmd_quick)
|
|
|
|
-#define pte_quicklist (current_cpu_data.pte_quick)
|
|
|
|
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
|
|
|
|
|
|
+#include <linux/quicklist.h>
|
|
|
|
+#include <asm/page.h>
|
|
|
|
|
|
static inline void pgd_init(unsigned long page)
|
|
static inline void pgd_init(unsigned long page)
|
|
{
|
|
{
|
|
@@ -45,84 +41,37 @@ static inline pgd_t *get_pgd_slow(void)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline pgd_t *get_pgd_fast(void)
|
|
|
|
-{
|
|
|
|
- unsigned long *ret;
|
|
|
|
-
|
|
|
|
- if ((ret = pgd_quicklist) != NULL) {
|
|
|
|
- pgd_quicklist = (unsigned long *)(*ret);
|
|
|
|
- ret[0] = 0;
|
|
|
|
- pgtable_cache_size--;
|
|
|
|
- } else
|
|
|
|
- ret = (unsigned long *)get_pgd_slow();
|
|
|
|
-
|
|
|
|
- if (ret) {
|
|
|
|
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
|
|
- }
|
|
|
|
- return (pgd_t *)ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void free_pgd_fast(pgd_t *pgd)
|
|
|
|
-{
|
|
|
|
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
|
|
|
|
- pgd_quicklist = (unsigned long *) pgd;
|
|
|
|
- pgtable_cache_size++;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void free_pgd_slow(pgd_t *pgd)
|
|
|
|
|
|
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
{
|
|
- kfree((void *)pgd);
|
|
|
|
|
|
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
|
|
|
|
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
|
|
|
|
-
|
|
|
|
-static inline pte_t *get_pte_fast(void)
|
|
|
|
|
|
+static inline void pgd_free(pgd_t *pgd)
|
|
{
|
|
{
|
|
- unsigned long *ret;
|
|
|
|
-
|
|
|
|
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
|
|
|
|
- pte_quicklist = (unsigned long *)(*ret);
|
|
|
|
- ret[0] = ret[1];
|
|
|
|
- pgtable_cache_size--;
|
|
|
|
- }
|
|
|
|
- return (pte_t *)ret;
|
|
|
|
|
|
+ quicklist_free(0, NULL, pgd);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void free_pte_fast(pte_t *pte)
|
|
|
|
|
|
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|
|
|
+ unsigned long address)
|
|
{
|
|
{
|
|
- *(unsigned long *)pte = (unsigned long) pte_quicklist;
|
|
|
|
- pte_quicklist = (unsigned long *) pte;
|
|
|
|
- pgtable_cache_size++;
|
|
|
|
|
|
+ void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
|
|
+ return pg ? virt_to_page(pg) : NULL;
|
|
}
|
|
}
|
|
|
|
|
|
static inline void pte_free_kernel(pte_t *pte)
|
|
static inline void pte_free_kernel(pte_t *pte)
|
|
{
|
|
{
|
|
- free_page((unsigned long)pte);
|
|
|
|
|
|
+ quicklist_free(0, NULL, pte);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void pte_free(struct page *pte)
|
|
static inline void pte_free(struct page *pte)
|
|
{
|
|
{
|
|
- __free_page(pte);
|
|
|
|
|
|
+ quicklist_free_page(0, NULL, pte);
|
|
}
|
|
}
|
|
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
unsigned long address)
|
|
unsigned long address)
|
|
{
|
|
{
|
|
- pte_t *pte;
|
|
|
|
-
|
|
|
|
- pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
|
|
|
|
-
|
|
|
|
- return pte;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
|
|
-{
|
|
|
|
- struct page *pte;
|
|
|
|
-
|
|
|
|
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
|
|
|
|
-
|
|
|
|
- return pte;
|
|
|
|
|
|
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
|
|
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
|
|
@@ -142,31 +91,23 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
|
|
|
|
|
|
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
|
|
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
|
|
|
|
|
|
-static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
|
|
|
|
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
{
|
|
{
|
|
- pmd_t *pmd;
|
|
|
|
- pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
|
|
|
- return pmd;
|
|
|
|
|
|
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
-static __inline__ void pmd_free(pmd_t *pmd)
|
|
|
|
|
|
+static inline void pmd_free(pmd_t *pmd)
|
|
{
|
|
{
|
|
- free_page((unsigned long) pmd);
|
|
|
|
|
|
+ quicklist_free(0, NULL, pmd);
|
|
}
|
|
}
|
|
|
|
|
|
-#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
|
|
|
|
|
|
+#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
|
|
#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
|
|
#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
|
|
|
|
|
|
#else
|
|
#else
|
|
#error "No defined page table size"
|
|
#error "No defined page table size"
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#define check_pgt_cache() do { } while (0)
|
|
|
|
-#define pgd_free(pgd) free_pgd_slow(pgd)
|
|
|
|
-#define pgd_alloc(mm) get_pgd_fast()
|
|
|
|
-
|
|
|
|
-extern int do_check_pgt_cache(int, int);
|
|
|
|
-
|
|
|
|
#define pmd_populate_kernel(mm, pmd, pte) \
|
|
#define pmd_populate_kernel(mm, pmd, pte) \
|
|
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
|
|
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
|
|
|
|
|
|
@@ -176,4 +117,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
|
|
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void check_pgt_cache(void)
|
|
|
|
+{
|
|
|
|
+ quicklist_trim(0, NULL, 25, 16);
|
|
|
|
+}
|
|
|
|
+
|
|
#endif /* __ASM_SH64_PGALLOC_H */
|
|
#endif /* __ASM_SH64_PGALLOC_H */
|