Эх сурвалжийг харах

[S390] Improve address space mode selection.

Introduce user_mode to replace the two variables switch_amode and
s390_noexec. There are three valid combinations of the old values:
  1) switch_amode == 0 && s390_noexec == 0
  2) switch_amode == 1 && s390_noexec == 0
  3) switch_amode == 1 && s390_noexec == 1
They get replaced by
  1) user_mode == HOME_SPACE_MODE
  2) user_mode == PRIMARY_SPACE_MODE
  3) user_mode == SECONDARY_SPACE_MODE
The new kernel parameter user_mode=[primary,secondary,home] lets
you choose the address space mode the user space processes should
use. In addition the CONFIG_S390_SWITCH_AMODE config option
is removed.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Martin Schwidefsky 15 жил өмнө
parent
commit
b11b533427

+ 0 - 15
arch/s390/Kconfig

@@ -220,23 +220,8 @@ config AUDIT_ARCH
 	bool
 	bool
 	default y
 	default y
 
 
-config S390_SWITCH_AMODE
-	bool "Switch kernel/user addressing modes"
-	help
-	  This option allows to switch the addressing modes of kernel and user
-	  space. The kernel parameter switch_amode=on will enable this feature,
-	  default is disabled. Enabling this (via kernel parameter) on machines
-	  earlier than IBM System z9-109 EC/BC will reduce system performance.
-
-	  Note that this option will also be selected by selecting the execute
-	  protection option below. Enabling the execute protection via the
-	  noexec kernel parameter will also switch the addressing modes,
-	  independent of the switch_amode kernel parameter.
-
-
 config S390_EXEC_PROTECT
 config S390_EXEC_PROTECT
 	bool "Data execute protection"
 	bool "Data execute protection"
-	select S390_SWITCH_AMODE
 	help
 	help
 	  This option allows to enable a buffer overflow protection for user
 	  This option allows to enable a buffer overflow protection for user
 	  space programs and it also selects the addressing mode option above.
 	  space programs and it also selects the addressing mode option above.

+ 0 - 1
arch/s390/defconfig

@@ -185,7 +185,6 @@ CONFIG_HOTPLUG_CPU=y
 CONFIG_COMPAT=y
 CONFIG_COMPAT=y
 CONFIG_SYSVIPC_COMPAT=y
 CONFIG_SYSVIPC_COMPAT=y
 CONFIG_AUDIT_ARCH=y
 CONFIG_AUDIT_ARCH=y
-CONFIG_S390_SWITCH_AMODE=y
 CONFIG_S390_EXEC_PROTECT=y
 CONFIG_S390_EXEC_PROTECT=y
 
 
 #
 #

+ 2 - 2
arch/s390/include/asm/mmu_context.h

@@ -36,7 +36,7 @@ static inline int init_new_context(struct task_struct *tsk,
 		mm->context.has_pgste = 1;
 		mm->context.has_pgste = 1;
 		mm->context.alloc_pgste = 1;
 		mm->context.alloc_pgste = 1;
 	} else {
 	} else {
-		mm->context.noexec = s390_noexec;
+		mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
 		mm->context.has_pgste = 0;
 		mm->context.has_pgste = 0;
 		mm->context.alloc_pgste = 0;
 		mm->context.alloc_pgste = 0;
 	}
 	}
@@ -58,7 +58,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
 	pgd_t *pgd = mm->pgd;
 	pgd_t *pgd = mm->pgd;
 
 
 	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
 	S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-	if (switch_amode) {
+	if (user_mode != HOME_SPACE_MODE) {
 		/* Load primary space page table origin. */
 		/* Load primary space page table origin. */
 		pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
 		pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
 		S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
 		S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);

+ 2 - 1
arch/s390/include/asm/pgalloc.h

@@ -143,7 +143,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 	spin_lock_init(&mm->context.list_lock);
 	spin_lock_init(&mm->context.list_lock);
 	INIT_LIST_HEAD(&mm->context.crst_list);
 	INIT_LIST_HEAD(&mm->context.crst_list);
 	INIT_LIST_HEAD(&mm->context.pgtable_list);
 	INIT_LIST_HEAD(&mm->context.pgtable_list);
-	return (pgd_t *) crst_table_alloc(mm, s390_noexec);
+	return (pgd_t *)
+		crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
 }
 }
 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
 
 

+ 6 - 11
arch/s390/include/asm/setup.h

@@ -49,17 +49,12 @@ extern unsigned long memory_end;
 
 
 void detect_memory_layout(struct mem_chunk chunk[]);
 void detect_memory_layout(struct mem_chunk chunk[]);
 
 
-#ifdef CONFIG_S390_SWITCH_AMODE
-extern unsigned int switch_amode;
-#else
-#define switch_amode	(0)
-#endif
-
-#ifdef CONFIG_S390_EXEC_PROTECT
-extern unsigned int s390_noexec;
-#else
-#define s390_noexec	(0)
-#endif
+#define PRIMARY_SPACE_MODE	0
+#define ACCESS_REGISTER_MODE	1
+#define SECONDARY_SPACE_MODE	2
+#define HOME_SPACE_MODE		3
+
+extern unsigned int user_mode;
 
 
 /*
 /*
  * Machine features detected in head.S
  * Machine features detected in head.S

+ 20 - 16
arch/s390/kernel/setup.c

@@ -305,9 +305,8 @@ static int __init early_parse_mem(char *p)
 }
 }
 early_param("mem", early_parse_mem);
 early_param("mem", early_parse_mem);
 
 
-#ifdef CONFIG_S390_SWITCH_AMODE
-unsigned int switch_amode = 0;
-EXPORT_SYMBOL_GPL(switch_amode);
+unsigned int user_mode = HOME_SPACE_MODE;
+EXPORT_SYMBOL_GPL(user_mode);
 
 
 static int set_amode_and_uaccess(unsigned long user_amode,
 static int set_amode_and_uaccess(unsigned long user_amode,
 				 unsigned long user32_amode)
 				 unsigned long user32_amode)
@@ -340,23 +339,29 @@ static int set_amode_and_uaccess(unsigned long user_amode,
  */
  */
 static int __init early_parse_switch_amode(char *p)
 static int __init early_parse_switch_amode(char *p)
 {
 {
-	switch_amode = 1;
+	if (user_mode != SECONDARY_SPACE_MODE)
+		user_mode = PRIMARY_SPACE_MODE;
 	return 0;
 	return 0;
 }
 }
 early_param("switch_amode", early_parse_switch_amode);
 early_param("switch_amode", early_parse_switch_amode);
 
 
-#else /* CONFIG_S390_SWITCH_AMODE */
-static inline int set_amode_and_uaccess(unsigned long user_amode,
-					unsigned long user32_amode)
+static int __init early_parse_user_mode(char *p)
 {
 {
+	if (p && strcmp(p, "primary") == 0)
+		user_mode = PRIMARY_SPACE_MODE;
+#ifdef CONFIG_S390_EXEC_PROTECT
+	else if (p && strcmp(p, "secondary") == 0)
+		user_mode = SECONDARY_SPACE_MODE;
+#endif
+	else if (!p || strcmp(p, "home") == 0)
+		user_mode = HOME_SPACE_MODE;
+	else
+		return 1;
 	return 0;
 	return 0;
 }
 }
-#endif /* CONFIG_S390_SWITCH_AMODE */
+early_param("user_mode", early_parse_user_mode);
 
 
 #ifdef CONFIG_S390_EXEC_PROTECT
 #ifdef CONFIG_S390_EXEC_PROTECT
-unsigned int s390_noexec = 0;
-EXPORT_SYMBOL_GPL(s390_noexec);
-
 /*
 /*
  * Enable execute protection?
  * Enable execute protection?
  */
  */
@@ -364,8 +369,7 @@ static int __init early_parse_noexec(char *p)
 {
 {
 	if (!strncmp(p, "off", 3))
 	if (!strncmp(p, "off", 3))
 		return 0;
 		return 0;
-	switch_amode = 1;
-	s390_noexec = 1;
+	user_mode = SECONDARY_SPACE_MODE;
 	return 0;
 	return 0;
 }
 }
 early_param("noexec", early_parse_noexec);
 early_param("noexec", early_parse_noexec);
@@ -373,7 +377,7 @@ early_param("noexec", early_parse_noexec);
 
 
 static void setup_addressing_mode(void)
 static void setup_addressing_mode(void)
 {
 {
-	if (s390_noexec) {
+	if (user_mode == SECONDARY_SPACE_MODE) {
 		if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
 		if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
 					  PSW32_ASC_SECONDARY))
 					  PSW32_ASC_SECONDARY))
 			pr_info("Execute protection active, "
 			pr_info("Execute protection active, "
@@ -381,7 +385,7 @@ static void setup_addressing_mode(void)
 		else
 		else
 			pr_info("Execute protection active, "
 			pr_info("Execute protection active, "
 				"mvcos not available\n");
 				"mvcos not available\n");
-	} else if (switch_amode) {
+	} else if (user_mode == PRIMARY_SPACE_MODE) {
 		if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
 		if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
 			pr_info("Address spaces switched, "
 			pr_info("Address spaces switched, "
 				"mvcos available\n");
 				"mvcos available\n");
@@ -411,7 +415,7 @@ setup_lowcore(void)
 	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 	lc->restart_psw.addr =
 	lc->restart_psw.addr =
 		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
 		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
-	if (switch_amode)
+	if (user_mode != HOME_SPACE_MODE)
 		lc->restart_psw.mask |= PSW_ASC_HOME;
 		lc->restart_psw.mask |= PSW_ASC_HOME;
 	lc->external_new_psw.mask = psw_kernel_bits;
 	lc->external_new_psw.mask = psw_kernel_bits;
 	lc->external_new_psw.addr =
 	lc->external_new_psw.addr =

+ 5 - 4
arch/s390/kernel/vdso.c

@@ -86,7 +86,8 @@ static void vdso_init_data(struct vdso_data *vd)
 	unsigned int facility_list;
 	unsigned int facility_list;
 
 
 	facility_list = stfl();
 	facility_list = stfl();
-	vd->ectg_available = switch_amode && (facility_list & 1);
+	vd->ectg_available =
+		user_mode != HOME_SPACE_MODE && (facility_list & 1);
 }
 }
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
@@ -114,7 +115,7 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
 
 
 	lowcore->vdso_per_cpu_data = __LC_PASTE;
 	lowcore->vdso_per_cpu_data = __LC_PASTE;
 
 
-	if (!switch_amode || !vdso_enabled)
+	if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
 		return 0;
 		return 0;
 
 
 	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
 	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -160,7 +161,7 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
 	unsigned long segment_table, page_table, page_frame;
 	unsigned long segment_table, page_table, page_frame;
 	u32 *psal, *aste;
 	u32 *psal, *aste;
 
 
-	if (!switch_amode || !vdso_enabled)
+	if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
 		return;
 		return;
 
 
 	psal = (u32 *)(addr_t) lowcore->paste[4];
 	psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -184,7 +185,7 @@ static void __vdso_init_cr5(void *dummy)
 
 
 static void vdso_init_cr5(void)
 static void vdso_init_cr5(void)
 {
 {
-	if (switch_amode && vdso_enabled)
+	if (user_mode != HOME_SPACE_MODE && vdso_enabled)
 		on_each_cpu(__vdso_init_cr5, NULL, 1);
 		on_each_cpu(__vdso_init_cr5, NULL, 1);
 }
 }
 #endif /* CONFIG_64BIT */
 #endif /* CONFIG_64BIT */

+ 0 - 1
arch/s390/kvm/Kconfig

@@ -20,7 +20,6 @@ config KVM
 	depends on HAVE_KVM && EXPERIMENTAL
 	depends on HAVE_KVM && EXPERIMENTAL
 	select PREEMPT_NOTIFIERS
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
 	select ANON_INODES
-	select S390_SWITCH_AMODE
 	---help---
 	---help---
 	  Support hosting paravirtualized guest machines using the SIE
 	  Support hosting paravirtualized guest machines using the SIE
 	  virtualization capability on the mainframe. This should work
 	  virtualization capability on the mainframe. This should work

+ 0 - 4
arch/s390/lib/uaccess_mvcos.c

@@ -162,7 +162,6 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
 	return size;
 	return size;
 }
 }
 
 
-#ifdef CONFIG_S390_SWITCH_AMODE
 static size_t strnlen_user_mvcos(size_t count, const char __user *src)
 static size_t strnlen_user_mvcos(size_t count, const char __user *src)
 {
 {
 	char buf[256];
 	char buf[256];
@@ -200,7 +199,6 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
 	} while ((len_str == len) && (done < count));
 	} while ((len_str == len) && (done < count));
 	return done;
 	return done;
 }
 }
-#endif /* CONFIG_S390_SWITCH_AMODE */
 
 
 struct uaccess_ops uaccess_mvcos = {
 struct uaccess_ops uaccess_mvcos = {
 	.copy_from_user = copy_from_user_mvcos_check,
 	.copy_from_user = copy_from_user_mvcos_check,
@@ -215,7 +213,6 @@ struct uaccess_ops uaccess_mvcos = {
 	.futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
 	.futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
 };
 };
 
 
-#ifdef CONFIG_S390_SWITCH_AMODE
 struct uaccess_ops uaccess_mvcos_switch = {
 struct uaccess_ops uaccess_mvcos_switch = {
 	.copy_from_user = copy_from_user_mvcos,
 	.copy_from_user = copy_from_user_mvcos,
 	.copy_from_user_small = copy_from_user_mvcos,
 	.copy_from_user_small = copy_from_user_mvcos,
@@ -228,4 +225,3 @@ struct uaccess_ops uaccess_mvcos_switch = {
 	.futex_atomic_op = futex_atomic_op_pt,
 	.futex_atomic_op = futex_atomic_op_pt,
 	.futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
 	.futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
 };
 };
-#endif

+ 2 - 2
arch/s390/mm/fault.c

@@ -112,7 +112,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
 	if (trans_exc_code == 2)
 	if (trans_exc_code == 2)
 		/* Access via secondary space, set_fs setting decides */
 		/* Access via secondary space, set_fs setting decides */
 		return current->thread.mm_segment.ar4;
 		return current->thread.mm_segment.ar4;
-	if (!switch_amode)
+	if (user_mode == HOME_SPACE_MODE)
 		/* User space if the access has been done via home space. */
 		/* User space if the access has been done via home space. */
 		return trans_exc_code == 3;
 		return trans_exc_code == 3;
 	/*
 	/*
@@ -168,7 +168,7 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
 	 * terminate things with extreme prejudice.
 	 * terminate things with extreme prejudice.
 	 */
 	 */
 	address = trans_exc_code & __FAIL_ADDR_MASK;
 	address = trans_exc_code & __FAIL_ADDR_MASK;
-	if (user_space_fault(trans_exc_code) == 0)
+	if (!user_space_fault(trans_exc_code))
 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
 		       " at virtual kernel address %p\n", (void *)address);
 		       " at virtual kernel address %p\n", (void *)address);
 	else
 	else

+ 1 - 1
arch/s390/mm/pgtable.c

@@ -269,7 +269,7 @@ int s390_enable_sie(void)
 	struct mm_struct *mm, *old_mm;
 	struct mm_struct *mm, *old_mm;
 
 
 	/* Do we have switched amode? If no, we cannot do sie */
 	/* Do we have switched amode? If no, we cannot do sie */
-	if (!switch_amode)
+	if (user_mode == HOME_SPACE_MODE)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* Do we have pgstes? if yes, we are done */
 	/* Do we have pgstes? if yes, we are done */