|
@@ -232,42 +232,92 @@ static inline void pgd_list_del(pgd_t *pgd)
|
|
|
set_page_private(next, (unsigned long)pprev);
|
|
|
}
|
|
|
|
|
|
+#if (PTRS_PER_PMD == 1)
|
|
|
+/* Non-PAE pgd constructor */
|
|
|
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- if (PTRS_PER_PMD == 1) {
|
|
|
- memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
|
- spin_lock_irqsave(&pgd_lock, flags);
|
|
|
- }
|
|
|
+ /* !PAE, no pagetable sharing */
|
|
|
+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
|
+
|
|
|
+ spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
|
|
+ /* must happen under lock */
|
|
|
clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
|
|
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
|
KERNEL_PGD_PTRS);
|
|
|
-
|
|
|
- if (PTRS_PER_PMD > 1)
|
|
|
- return;
|
|
|
-
|
|
|
- /* must happen under lock */
|
|
|
paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
|
|
|
- __pa(swapper_pg_dir) >> PAGE_SHIFT,
|
|
|
- USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
|
|
|
-
|
|
|
+ __pa(swapper_pg_dir) >> PAGE_SHIFT,
|
|
|
+ USER_PTRS_PER_PGD,
|
|
|
+ KERNEL_PGD_PTRS);
|
|
|
pgd_list_add(pgd);
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
}
|
|
|
+#else /* PTRS_PER_PMD > 1 */
|
|
|
+/* PAE pgd constructor */
|
|
|
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
|
|
|
+{
|
|
|
+ /* PAE, kernel PMD may be shared */
|
|
|
+
|
|
|
+ if (SHARED_KERNEL_PMD) {
|
|
|
+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
|
|
|
+ swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
|
+ KERNEL_PGD_PTRS);
|
|
|
+ } else {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
|
+ spin_lock_irqsave(&pgd_lock, flags);
|
|
|
+ pgd_list_add(pgd);
|
|
|
+ spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
+ }
|
|
|
+}
|
|
|
+#endif /* PTRS_PER_PMD */
|
|
|
|
|
|
-/* never called when PTRS_PER_PMD > 1 */
|
|
|
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
|
|
|
{
|
|
|
unsigned long flags; /* can be called from interrupt context */
|
|
|
|
|
|
+ BUG_ON(SHARED_KERNEL_PMD);
|
|
|
+
|
|
|
paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
|
pgd_list_del(pgd);
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
}
|
|
|
|
|
|
+#define UNSHARED_PTRS_PER_PGD \
|
|
|
+ (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
|
|
|
+
|
|
|
+/* If we allocate a pmd for part of the kernel address space, then
|
|
|
+ make sure its initialized with the appropriate kernel mappings.
|
|
|
+ Otherwise use a cached zeroed pmd. */
|
|
|
+static pmd_t *pmd_cache_alloc(int idx)
|
|
|
+{
|
|
|
+ pmd_t *pmd;
|
|
|
+
|
|
|
+ if (idx >= USER_PTRS_PER_PGD) {
|
|
|
+ pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
|
|
|
+
|
|
|
+ if (pmd)
|
|
|
+ memcpy(pmd,
|
|
|
+ (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
|
|
|
+ sizeof(pmd_t) * PTRS_PER_PMD);
|
|
|
+ } else
|
|
|
+ pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
|
|
|
+
|
|
|
+ return pmd;
|
|
|
+}
|
|
|
+
|
|
|
+static void pmd_cache_free(pmd_t *pmd, int idx)
|
|
|
+{
|
|
|
+ if (idx >= USER_PTRS_PER_PGD)
|
|
|
+ free_page((unsigned long)pmd);
|
|
|
+ else
|
|
|
+ kmem_cache_free(pmd_cache, pmd);
|
|
|
+}
|
|
|
+
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
{
|
|
|
int i;
|
|
@@ -276,10 +326,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
if (PTRS_PER_PMD == 1 || !pgd)
|
|
|
return pgd;
|
|
|
|
|
|
- for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
|
|
|
- pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
|
|
|
+ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
|
|
|
+ pmd_t *pmd = pmd_cache_alloc(i);
|
|
|
+
|
|
|
if (!pmd)
|
|
|
goto out_oom;
|
|
|
+
|
|
|
paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
|
|
|
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
|
|
|
}
|
|
@@ -290,7 +342,7 @@ out_oom:
|
|
|
pgd_t pgdent = pgd[i];
|
|
|
void* pmd = (void *)__va(pgd_val(pgdent)-1);
|
|
|
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
|
|
|
- kmem_cache_free(pmd_cache, pmd);
|
|
|
+ pmd_cache_free(pmd, i);
|
|
|
}
|
|
|
kmem_cache_free(pgd_cache, pgd);
|
|
|
return NULL;
|
|
@@ -302,11 +354,11 @@ void pgd_free(pgd_t *pgd)
|
|
|
|
|
|
/* in the PAE case user pgd entries are overwritten before usage */
|
|
|
if (PTRS_PER_PMD > 1)
|
|
|
- for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
|
|
|
+ for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
|
|
|
pgd_t pgdent = pgd[i];
|
|
|
void* pmd = (void *)__va(pgd_val(pgdent)-1);
|
|
|
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
|
|
|
- kmem_cache_free(pmd_cache, pmd);
|
|
|
+ pmd_cache_free(pmd, i);
|
|
|
}
|
|
|
/* in the non-PAE case, free_pgtables() clears user pgd entries */
|
|
|
kmem_cache_free(pgd_cache, pgd);
|