|
@@ -915,15 +915,15 @@ LIST_HEAD(pgd_list);
|
|
|
|
|
|
void vmalloc_sync_all(void)
|
|
|
{
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
- unsigned long start = VMALLOC_START & PGDIR_MASK;
|
|
|
unsigned long address;
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
if (SHARED_KERNEL_PMD)
|
|
|
return;
|
|
|
|
|
|
- BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
|
|
|
- for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
|
|
|
+ for (address = VMALLOC_START & PMD_MASK;
|
|
|
+ address >= TASK_SIZE && address < FIXADDR_TOP;
|
|
|
+ address += PMD_SIZE) {
|
|
|
unsigned long flags;
|
|
|
struct page *page;
|
|
|
|
|
@@ -936,10 +936,8 @@ void vmalloc_sync_all(void)
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
|
|
}
|
|
|
#else /* CONFIG_X86_64 */
|
|
|
- unsigned long start = VMALLOC_START & PGDIR_MASK;
|
|
|
- unsigned long address;
|
|
|
-
|
|
|
- for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
|
|
|
+ for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
|
|
|
+ address += PGDIR_SIZE) {
|
|
|
const pgd_t *pgd_ref = pgd_offset_k(address);
|
|
|
unsigned long flags;
|
|
|
struct page *page;
|