|
@@ -219,50 +219,39 @@ static inline void pgd_list_del(pgd_t *pgd)
|
|
list_del(&page->lru);
|
|
list_del(&page->lru);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define UNSHARED_PTRS_PER_PGD \
|
|
|
|
+ (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
|
|
|
|
|
|
-
|
|
|
|
-#if (PTRS_PER_PMD == 1)
|
|
|
|
-/* Non-PAE pgd constructor */
|
|
|
|
-static void pgd_ctor(void *pgd)
|
|
|
|
|
|
+static void pgd_ctor(void *p)
|
|
{
|
|
{
|
|
|
|
+ pgd_t *pgd = p;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- /* !PAE, no pagetable sharing */
|
|
|
|
|
|
+ /* Clear usermode parts of PGD */
|
|
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
|
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
|
|
- /* must happen under lock */
|
|
|
|
- clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
|
|
|
|
- swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
|
|
- KERNEL_PGD_PTRS);
|
|
|
|
- paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
|
|
|
|
- __pa(swapper_pg_dir) >> PAGE_SHIFT,
|
|
|
|
- USER_PTRS_PER_PGD,
|
|
|
|
- KERNEL_PGD_PTRS);
|
|
|
|
- pgd_list_add(pgd);
|
|
|
|
- spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
-}
|
|
|
|
-#else /* PTRS_PER_PMD > 1 */
|
|
|
|
-/* PAE pgd constructor */
|
|
|
|
-static void pgd_ctor(void *pgd)
|
|
|
|
-{
|
|
|
|
- /* PAE, kernel PMD may be shared */
|
|
|
|
-
|
|
|
|
- if (SHARED_KERNEL_PMD) {
|
|
|
|
- clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
|
|
|
|
|
|
+ /* If the pgd points to a shared pagetable level (either the
|
|
|
|
+ ptes in non-PAE, or shared PMD in PAE), then just copy the
|
|
|
|
+ references from swapper_pg_dir. */
|
|
|
|
+ if (PAGETABLE_LEVELS == 2 ||
|
|
|
|
+ (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
|
|
|
|
+ clone_pgd_range(pgd + USER_PTRS_PER_PGD,
|
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
KERNEL_PGD_PTRS);
|
|
KERNEL_PGD_PTRS);
|
|
- } else {
|
|
|
|
- unsigned long flags;
|
|
|
|
|
|
+ paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
|
|
|
|
+ __pa(swapper_pg_dir) >> PAGE_SHIFT,
|
|
|
|
+ USER_PTRS_PER_PGD,
|
|
|
|
+ KERNEL_PGD_PTRS);
|
|
|
|
+ }
|
|
|
|
|
|
- memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
|
|
|
|
- spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
|
|
+ /* list required to sync kernel mapping updates */
|
|
|
|
+ if (!SHARED_KERNEL_PMD)
|
|
pgd_list_add(pgd);
|
|
pgd_list_add(pgd);
|
|
- spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&pgd_lock, flags);
|
|
}
|
|
}
|
|
-#endif /* PTRS_PER_PMD */
|
|
|
|
|
|
|
|
static void pgd_dtor(void *pgd)
|
|
static void pgd_dtor(void *pgd)
|
|
{
|
|
{
|
|
@@ -276,9 +265,6 @@ static void pgd_dtor(void *pgd)
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-#define UNSHARED_PTRS_PER_PGD \
|
|
|
|
- (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
#ifdef CONFIG_X86_PAE
|
|
/*
|
|
/*
|
|
* Mop up any pmd pages which may still be attached to the pgd.
|
|
* Mop up any pmd pages which may still be attached to the pgd.
|