Browse Source

s390/mm: downgrade page table after fork of a 31 bit process

The downgrade of the 4 level page table created by init_new_context is
currently done only in start_thread31. If a 31 bit process forks the
new mm uses a 4 level page table, including the task size of 2<<42
that goes along with it. This is incorrect as now a 31 bit process
can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade
after fork.

Cc: stable@vger.kernel.org
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Martin Schwidefsky 13 years ago
parent
commit
0f6f281b73

+ 13 - 1
arch/s390/include/asm/mmu_context.h

@@ -11,7 +11,6 @@
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include <asm/ctl_reg.h>
-#include <asm-generic/mm_hooks.h>
 
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
         switch_mm(prev, next, current);
 }
 
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+				 struct mm_struct *mm)
+{
+#ifdef CONFIG_64BIT
+	if (oldmm->context.asce_limit < mm->context.asce_limit)
+		crst_table_downgrade(mm, oldmm->context.asce_limit);
+#endif
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
 #endif /* __S390_MMU_CONTEXT_H */

+ 2 - 0
arch/s390/include/asm/processor.h

@@ -120,7 +120,9 @@ struct stack_frame {
 	regs->psw.mask	= psw_user_bits | PSW_MASK_BA;			\
 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;			\
 	regs->gprs[15]	= new_stackp;					\
+	__tlb_flush_mm(current->mm);					\
 	crst_table_downgrade(current->mm, 1UL << 31);			\
+	update_mm(current->mm, current);				\
 } while (0)
 
 /* Forward declaration, a strange C thing */

+ 10 - 2
arch/s390/mm/mmap.c

@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
 int s390_mmap_check(unsigned long addr, unsigned long len)
 {
+	int rc;
+
 	if (!is_compat_task() &&
-	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
-		return crst_table_upgrade(current->mm, 1UL << 53);
+	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+		rc = crst_table_upgrade(current->mm, 1UL << 53);
+		if (rc)
+			return rc;
+		update_mm(current->mm, current);
+	}
 	return 0;
 }
 
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
 		rc = crst_table_upgrade(mm, 1UL << 53);
 		if (rc)
 			return (unsigned long) rc;
+		update_mm(mm, current);
 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 	}
 	return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
 		rc = crst_table_upgrade(mm, 1UL << 53);
 		if (rc)
 			return (unsigned long) rc;
+		update_mm(mm, current);
 		area = arch_get_unmapped_area_topdown(filp, addr, len,
 						      pgoff, flags);
 	}

+ 0 - 5
arch/s390/mm/pgtable.c

@@ -85,7 +85,6 @@ repeat:
 		crst_table_free(mm, table);
 	if (mm->context.asce_limit < limit)
 		goto repeat;
-	update_mm(mm, current);
 	return 0;
 }
 
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 {
 	pgd_t *pgd;
 
-	if (mm->context.asce_limit <= limit)
-		return;
-	__tlb_flush_mm(mm);
 	while (mm->context.asce_limit > limit) {
 		pgd = mm->pgd;
 		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 		mm->task_size = mm->context.asce_limit;
 		crst_table_free(mm, (unsigned long *) pgd);
 	}
-	update_mm(mm, current);
 }
 #endif