浏览代码

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 patches from Martin Schwidefsky:
 "The biggest patch is the rework of the smp code, something I wanted to
  do for some time.  There are some patches for our various dump methods
  and one new thing: z/VM LGR detection.  LGR stands for linux-guest-
  relocation and is the guest migration feature of z/VM.  For debugging
  purposes we keep a log of the systems where a specific guest has lived."

Fix up trivial conflict in arch/s390/kernel/smp.c due to the scheduler
cleanup having removed some code next to removed s390 code.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  [S390] kernel: Pass correct stack for smp_call_ipl_cpu()
  [S390] Ensure that vmcore_info pointer is never accessed directly
  [S390] dasd: prevent validate server for offline devices
  [S390] Remove monolithic build option for zcrypt driver.
  [S390] stack dump: fix indentation in output
  [S390] kernel: Add OS info memory interface
  [S390] Use block_sigmask()
  [S390] kernel: Add z/VM LGR detection
  [S390] irq: external interrupt code passing
  [S390] irq: set __ARCH_IRQ_EXIT_IRQS_DISABLED
  [S390] zfcpdump: Implement async sdias event processing
  [S390] Use copy_to_absolute_zero() instead of "stura/sturg"
  [S390] rework idle code
  [S390] rework smp code
  [S390] rename lowcore field
  [S390] Fix gcc 4.6.0 compile warning
Linus Torvalds 13 年之前
父节点
当前提交
db14179679
共有 64 个文件被更改,包括 1597 次插入1435 次删除
  1. 1 8
      arch/s390/include/asm/cputime.h
  2. 1 0
      arch/s390/include/asm/debug.h
  3. 1 0
      arch/s390/include/asm/hardirq.h
  4. 1 0
      arch/s390/include/asm/ipl.h
  5. 6 1
      arch/s390/include/asm/irq.h
  6. 65 54
      arch/s390/include/asm/lowcore.h
  7. 50 0
      arch/s390/include/asm/os_info.h
  8. 0 132
      arch/s390/include/asm/sigp.h
  9. 22 41
      arch/s390/include/asm/smp.h
  10. 34 0
      arch/s390/include/asm/system.h
  11. 2 2
      arch/s390/include/asm/timer.h
  12. 2 2
      arch/s390/include/asm/vdso.h
  13. 1 3
      arch/s390/kernel/Makefile
  14. 16 11
      arch/s390/kernel/asm-offsets.c
  15. 1 5
      arch/s390/kernel/compat_signal.c
  16. 27 10
      arch/s390/kernel/crash_dump.c
  17. 31 9
      arch/s390/kernel/debug.c
  18. 3 19
      arch/s390/kernel/early.c
  19. 88 71
      arch/s390/kernel/entry.S
  20. 14 3
      arch/s390/kernel/entry.h
  21. 80 59
      arch/s390/kernel/entry64.S
  22. 58 41
      arch/s390/kernel/ipl.c
  23. 5 9
      arch/s390/kernel/irq.c
  24. 200 0
      arch/s390/kernel/lgr.c
  25. 14 38
      arch/s390/kernel/machine_kexec.c
  26. 0 2
      arch/s390/kernel/nmi.c
  27. 169 0
      arch/s390/kernel/os_info.c
  28. 1 6
      arch/s390/kernel/process.c
  29. 30 31
      arch/s390/kernel/setup.c
  30. 1 5
      arch/s390/kernel/signal.c
  31. 473 372
      arch/s390/kernel/smp.c
  32. 0 58
      arch/s390/kernel/switch_cpu.S
  33. 0 51
      arch/s390/kernel/switch_cpu64.S
  34. 11 8
      arch/s390/kernel/swsusp_asm64.S
  35. 2 2
      arch/s390/kernel/time.c
  36. 4 4
      arch/s390/kernel/topology.c
  37. 4 2
      arch/s390/kernel/traps.c
  38. 7 21
      arch/s390/kernel/vdso.c
  39. 35 133
      arch/s390/kernel/vtime.c
  40. 3 3
      arch/s390/kvm/interrupt.c
  41. 14 17
      arch/s390/lib/delay.c
  42. 8 22
      arch/s390/lib/spinlock.c
  43. 2 2
      arch/s390/mm/fault.c
  44. 3 3
      arch/s390/oprofile/hwsampler.c
  45. 0 9
      drivers/crypto/Kconfig
  46. 4 0
      drivers/s390/block/dasd.c
  47. 4 4
      drivers/s390/block/dasd_diag.c
  48. 8 0
      drivers/s390/block/dasd_eckd.c
  49. 2 2
      drivers/s390/char/sclp.c
  50. 0 1
      drivers/s390/char/sclp_quiesce.c
  51. 80 21
      drivers/s390/char/sclp_sdias.c
  52. 0 1
      drivers/s390/char/zcore.c
  53. 0 2
      drivers/s390/cio/cio.c
  54. 6 0
      drivers/s390/cio/qdio_main.c
  55. 0 10
      drivers/s390/crypto/Makefile
  56. 0 2
      drivers/s390/crypto/ap_bus.c
  57. 0 2
      drivers/s390/crypto/zcrypt_api.c
  58. 0 4
      drivers/s390/crypto/zcrypt_cex2a.c
  59. 0 100
      drivers/s390/crypto/zcrypt_mono.c
  60. 0 4
      drivers/s390/crypto/zcrypt_pcica.c
  61. 0 4
      drivers/s390/crypto/zcrypt_pcicc.c
  62. 0 4
      drivers/s390/crypto/zcrypt_pcixcc.c
  63. 2 4
      drivers/s390/kvm/kvm_virtio.c
  64. 1 1
      net/iucv/iucv.c

+ 1 - 8
arch/s390/include/asm/cputime.h

@@ -170,24 +170,17 @@ struct s390_idle_data {
 	unsigned int sequence;
 	unsigned long long idle_count;
 	unsigned long long idle_enter;
+	unsigned long long idle_exit;
 	unsigned long long idle_time;
 	int nohz_delay;
 };
 
 DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
 
-void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
 cputime64_t s390_get_idle_time(int cpu);
 
 #define arch_idle_time(cpu) s390_get_idle_time(cpu)
 
-static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
-				   __u64 enter_timer)
-{
-	if (regs->psw.mask & PSW_MASK_WAIT)
-		vtime_start_cpu(int_clock, enter_timer);
-}
-
 static inline int s390_nohz_delay(int cpu)
 {
 	return __get_cpu_var(s390_idle).nohz_delay != 0;

+ 1 - 0
arch/s390/include/asm/debug.h

@@ -131,6 +131,7 @@ void debug_unregister(debug_info_t* id);
 
 void debug_set_level(debug_info_t* id, int new_level);
 
+void debug_set_critical(void);
 void debug_stop_all(void);
 
 static inline debug_entry_t*

+ 1 - 0
arch/s390/include/asm/hardirq.h

@@ -18,6 +18,7 @@
 
 #define __ARCH_IRQ_STAT
 #define __ARCH_HAS_DO_SOFTIRQ
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED
 
 #define HARDIRQ_BITS	8
 

+ 1 - 0
arch/s390/include/asm/ipl.h

@@ -169,5 +169,6 @@ enum diag308_rc {
 extern int diag308(unsigned long subcode, void *addr);
 extern void diag308_reset(void);
 extern void store_status(void);
+extern void lgr_info_log(void);
 
 #endif /* _ASM_S390_IPL_H */

+ 6 - 1
arch/s390/include/asm/irq.h

@@ -34,7 +34,12 @@ enum interruption_class {
 	NR_IRQS,
 };
 
-typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
+struct ext_code {
+	unsigned short subcode;
+	unsigned short code;
+};
+
+typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
 
 int register_external_interrupt(u16 code, ext_int_handler_t handler);
 int unregister_external_interrupt(u16 code, ext_int_handler_t handler);

+ 65 - 54
arch/s390/include/asm/lowcore.h

@@ -1,5 +1,5 @@
 /*
- *    Copyright IBM Corp. 1999,2010
+ *    Copyright IBM Corp. 1999,2012
  *    Author(s): Hartmut Penner <hp@de.ibm.com>,
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *		 Denis Joseph Barrow,
@@ -12,14 +12,6 @@
 #include <asm/ptrace.h>
 #include <asm/cpu.h>
 
-void restart_int_handler(void);
-void ext_int_handler(void);
-void system_call(void);
-void pgm_check_handler(void);
-void mcck_int_handler(void);
-void io_int_handler(void);
-void psw_restart_int_handler(void);
-
 #ifdef CONFIG_32BIT
 
 #define LC_ORDER 0
@@ -56,7 +48,7 @@ struct _lowcore {
 	psw_t	mcck_new_psw;			/* 0x0070 */
 	psw_t	io_new_psw;			/* 0x0078 */
 	__u32	ext_params;			/* 0x0080 */
-	__u16	cpu_addr;			/* 0x0084 */
+	__u16	ext_cpu_addr;			/* 0x0084 */
 	__u16	ext_int_code;			/* 0x0086 */
 	__u16	svc_ilc;			/* 0x0088 */
 	__u16	svc_code;			/* 0x008a */
@@ -117,32 +109,37 @@ struct _lowcore {
 	__u64	steal_timer;			/* 0x0288 */
 	__u64	last_update_timer;		/* 0x0290 */
 	__u64	last_update_clock;		/* 0x0298 */
+	__u64	int_clock;			/* 0x02a0 */
+	__u64	mcck_clock;			/* 0x02a8 */
+	__u64	clock_comparator;		/* 0x02b0 */
 
 	/* Current process. */
-	__u32	current_task;			/* 0x02a0 */
-	__u32	thread_info;			/* 0x02a4 */
-	__u32	kernel_stack;			/* 0x02a8 */
+	__u32	current_task;			/* 0x02b8 */
+	__u32	thread_info;			/* 0x02bc */
+	__u32	kernel_stack;			/* 0x02c0 */
+
+	/* Interrupt, panic and restart stack. */
+	__u32	async_stack;			/* 0x02c4 */
+	__u32	panic_stack;			/* 0x02c8 */
+	__u32	restart_stack;			/* 0x02cc */
 
-	/* Interrupt and panic stack. */
-	__u32	async_stack;			/* 0x02ac */
-	__u32	panic_stack;			/* 0x02b0 */
+	/* Restart function and parameter. */
+	__u32	restart_fn;			/* 0x02d0 */
+	__u32	restart_data;			/* 0x02d4 */
+	__u32	restart_source;			/* 0x02d8 */
 
 	/* Address space pointer. */
-	__u32	kernel_asce;			/* 0x02b4 */
-	__u32	user_asce;			/* 0x02b8 */
-	__u32	current_pid;			/* 0x02bc */
+	__u32	kernel_asce;			/* 0x02dc */
+	__u32	user_asce;			/* 0x02e0 */
+	__u32	current_pid;			/* 0x02e4 */
 
 	/* SMP info area */
-	__u32	cpu_nr;				/* 0x02c0 */
-	__u32	softirq_pending;		/* 0x02c4 */
-	__u32	percpu_offset;			/* 0x02c8 */
-	__u32	ext_call_fast;			/* 0x02cc */
-	__u64	int_clock;			/* 0x02d0 */
-	__u64	mcck_clock;			/* 0x02d8 */
-	__u64	clock_comparator;		/* 0x02e0 */
-	__u32	machine_flags;			/* 0x02e8 */
-	__u32	ftrace_func;			/* 0x02ec */
-	__u8	pad_0x02f8[0x0300-0x02f0];	/* 0x02f0 */
+	__u32	cpu_nr;				/* 0x02e8 */
+	__u32	softirq_pending;		/* 0x02ec */
+	__u32	percpu_offset;			/* 0x02f0 */
+	__u32	machine_flags;			/* 0x02f4 */
+	__u32	ftrace_func;			/* 0x02f8 */
+	__u8	pad_0x02fc[0x0300-0x02fc];	/* 0x02fc */
 
 	/* Interrupt response block */
 	__u8	irb[64];			/* 0x0300 */
@@ -157,7 +154,9 @@ struct _lowcore {
 	__u32	ipib;				/* 0x0e00 */
 	__u32	ipib_checksum;			/* 0x0e04 */
 	__u32	vmcore_info;			/* 0x0e08 */
-	__u8	pad_0x0e0c[0x0f00-0x0e0c];	/* 0x0e0c */
+	__u8	pad_0x0e0c[0x0e18-0x0e0c];	/* 0x0e0c */
+	__u32	os_info;			/* 0x0e18 */
+	__u8	pad_0x0e1c[0x0f00-0x0e1c];	/* 0x0e1c */
 
 	/* Extended facility list */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */
@@ -189,7 +188,7 @@ struct _lowcore {
 	__u32	ipl_parmblock_ptr;		/* 0x0014 */
 	__u8	pad_0x0018[0x0080-0x0018];	/* 0x0018 */
 	__u32	ext_params;			/* 0x0080 */
-	__u16	cpu_addr;			/* 0x0084 */
+	__u16	ext_cpu_addr;			/* 0x0084 */
 	__u16	ext_int_code;			/* 0x0086 */
 	__u16	svc_ilc;			/* 0x0088 */
 	__u16	svc_code;			/* 0x008a */
@@ -254,34 +253,39 @@ struct _lowcore {
 	__u64	steal_timer;			/* 0x02e0 */
 	__u64	last_update_timer;		/* 0x02e8 */
 	__u64	last_update_clock;		/* 0x02f0 */
+	__u64	int_clock;			/* 0x02f8 */
+	__u64	mcck_clock;			/* 0x0300 */
+	__u64	clock_comparator;		/* 0x0308 */
 
 	/* Current process. */
-	__u64	current_task;			/* 0x02f8 */
-	__u64	thread_info;			/* 0x0300 */
-	__u64	kernel_stack;			/* 0x0308 */
+	__u64	current_task;			/* 0x0310 */
+	__u64	thread_info;			/* 0x0318 */
+	__u64	kernel_stack;			/* 0x0320 */
+
+	/* Interrupt, panic and restart stack. */
+	__u64	async_stack;			/* 0x0328 */
+	__u64	panic_stack;			/* 0x0330 */
+	__u64	restart_stack;			/* 0x0338 */
 
-	/* Interrupt and panic stack. */
-	__u64	async_stack;			/* 0x0310 */
-	__u64	panic_stack;			/* 0x0318 */
+	/* Restart function and parameter. */
+	__u64	restart_fn;			/* 0x0340 */
+	__u64	restart_data;			/* 0x0348 */
+	__u64	restart_source;			/* 0x0350 */
 
 	/* Address space pointer. */
-	__u64	kernel_asce;			/* 0x0320 */
-	__u64	user_asce;			/* 0x0328 */
-	__u64	current_pid;			/* 0x0330 */
+	__u64	kernel_asce;			/* 0x0358 */
+	__u64	user_asce;			/* 0x0360 */
+	__u64	current_pid;			/* 0x0368 */
 
 	/* SMP info area */
-	__u32	cpu_nr;				/* 0x0338 */
-	__u32	softirq_pending;		/* 0x033c */
-	__u64	percpu_offset;			/* 0x0340 */
-	__u64	ext_call_fast;			/* 0x0348 */
-	__u64	int_clock;			/* 0x0350 */
-	__u64	mcck_clock;			/* 0x0358 */
-	__u64	clock_comparator;		/* 0x0360 */
-	__u64	vdso_per_cpu_data;		/* 0x0368 */
-	__u64	machine_flags;			/* 0x0370 */
-	__u64	ftrace_func;			/* 0x0378 */
-	__u64	gmap;				/* 0x0380 */
-	__u8	pad_0x0388[0x0400-0x0388];	/* 0x0388 */
+	__u32	cpu_nr;				/* 0x0370 */
+	__u32	softirq_pending;		/* 0x0374 */
+	__u64	percpu_offset;			/* 0x0378 */
+	__u64	vdso_per_cpu_data;		/* 0x0380 */
+	__u64	machine_flags;			/* 0x0388 */
+	__u64	ftrace_func;			/* 0x0390 */
+	__u64	gmap;				/* 0x0398 */
+	__u8	pad_0x03a0[0x0400-0x03a0];	/* 0x03a0 */
 
 	/* Interrupt response block. */
 	__u8	irb[64];			/* 0x0400 */
@@ -298,8 +302,15 @@ struct _lowcore {
 	 */
 	__u64	ipib;				/* 0x0e00 */
 	__u32	ipib_checksum;			/* 0x0e08 */
-	__u64	vmcore_info;			/* 0x0e0c */
-	__u8	pad_0x0e14[0x0f00-0x0e14];	/* 0x0e14 */
+	/*
+	 * Because the vmcore_info pointer is not 8 byte aligned it never
+	 * should not be accessed directly. For accessing the pointer, first
+	 * copy it to a local pointer variable.
+	 */
+	__u8	vmcore_info[8];			/* 0x0e0c */
+	__u8	pad_0x0e14[0x0e18-0x0e14];	/* 0x0e14 */
+	__u64	os_info;			/* 0x0e18 */
+	__u8	pad_0x0e20[0x0f00-0x0e20];	/* 0x0e20 */
 
 	/* Extended facility list */
 	__u64	stfle_fac_list[32];		/* 0x0f00 */

+ 50 - 0
arch/s390/include/asm/os_info.h

@@ -0,0 +1,50 @@
+/*
+ * OS info memory interface
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+#ifndef _ASM_S390_OS_INFO_H
+#define _ASM_S390_OS_INFO_H
+
+#define OS_INFO_VERSION_MAJOR	1
+#define OS_INFO_VERSION_MINOR	1
+#define OS_INFO_MAGIC		0x4f53494e464f535aULL /* OSINFOSZ */
+
+#define OS_INFO_VMCOREINFO	0
+#define OS_INFO_REIPL_BLOCK	1
+#define OS_INFO_INIT_FN		2
+
+struct os_info_entry {
+	u64	addr;
+	u64	size;
+	u32	csum;
+} __packed;
+
+struct os_info {
+	u64	magic;
+	u32	csum;
+	u16	version_major;
+	u16	version_minor;
+	u64	crashkernel_addr;
+	u64	crashkernel_size;
+	struct os_info_entry entry[3];
+	u8	reserved[4004];
+} __packed;
+
+void os_info_init(void);
+void os_info_entry_add(int nr, void *ptr, u64 len);
+void os_info_crashkernel_add(unsigned long base, unsigned long size);
+u32 os_info_csum(struct os_info *os_info);
+
+#ifdef CONFIG_CRASH_DUMP
+void *os_info_old_entry(int nr, unsigned long *size);
+int copy_from_oldmem(void *dest, void *src, size_t count);
+#else
+static inline void *os_info_old_entry(int nr, unsigned long *size)
+{
+	return NULL;
+}
+#endif
+
+#endif /* _ASM_S390_OS_INFO_H */

+ 0 - 132
arch/s390/include/asm/sigp.h

@@ -1,132 +0,0 @@
-/*
- *  Routines and structures for signalling other processors.
- *
- *    Copyright IBM Corp. 1999,2010
- *    Author(s): Denis Joseph Barrow,
- *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
- *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
- */
-
-#ifndef __ASM_SIGP_H
-#define __ASM_SIGP_H
-
-#include <asm/system.h>
-
-/* Get real cpu address from logical cpu number. */
-extern unsigned short __cpu_logical_map[];
-
-static inline int cpu_logical_map(int cpu)
-{
-#ifdef CONFIG_SMP
-	return __cpu_logical_map[cpu];
-#else
-	return stap();
-#endif
-}
-
-enum {
-	sigp_sense = 1,
-	sigp_external_call = 2,
-	sigp_emergency_signal = 3,
-	sigp_start = 4,
-	sigp_stop = 5,
-	sigp_restart = 6,
-	sigp_stop_and_store_status = 9,
-	sigp_initial_cpu_reset = 11,
-	sigp_cpu_reset = 12,
-	sigp_set_prefix = 13,
-	sigp_store_status_at_address = 14,
-	sigp_store_extended_status_at_address = 15,
-	sigp_set_architecture = 18,
-	sigp_conditional_emergency_signal = 19,
-	sigp_sense_running = 21,
-};
-
-enum {
-	sigp_order_code_accepted = 0,
-	sigp_status_stored = 1,
-	sigp_busy = 2,
-	sigp_not_operational = 3,
-};
-
-/*
- * Definitions for external call.
- */
-enum {
-	ec_schedule = 0,
-	ec_call_function,
-	ec_call_function_single,
-	ec_stop_cpu,
-};
-
-/*
- * Signal processor.
- */
-static inline int raw_sigp(u16 cpu, int order)
-{
-	register unsigned long reg1 asm ("1") = 0;
-	int ccode;
-
-	asm volatile(
-		"	sigp	%1,%2,0(%3)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		:	"=d"	(ccode)
-		: "d" (reg1), "d" (cpu),
-		  "a" (order) : "cc" , "memory");
-	return ccode;
-}
-
-/*
- * Signal processor with parameter.
- */
-static inline int raw_sigp_p(u32 parameter, u16 cpu, int order)
-{
-	register unsigned int reg1 asm ("1") = parameter;
-	int ccode;
-
-	asm volatile(
-		"	sigp	%1,%2,0(%3)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (ccode)
-		: "d" (reg1), "d" (cpu),
-		  "a" (order) : "cc" , "memory");
-	return ccode;
-}
-
-/*
- * Signal processor with parameter and return status.
- */
-static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order)
-{
-	register unsigned int reg1 asm ("1") = parm;
-	int ccode;
-
-	asm volatile(
-		"	sigp	%1,%2,0(%3)\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (ccode), "+d" (reg1)
-		: "d" (cpu), "a" (order)
-		: "cc" , "memory");
-	*status = reg1;
-	return ccode;
-}
-
-static inline int sigp(int cpu, int order)
-{
-	return raw_sigp(cpu_logical_map(cpu), order);
-}
-
-static inline int sigp_p(u32 parameter, int cpu, int order)
-{
-	return raw_sigp_p(parameter, cpu_logical_map(cpu), order);
-}
-
-static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order)
-{
-	return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order);
-}
-
-#endif /* __ASM_SIGP_H */

+ 22 - 41
arch/s390/include/asm/smp.h

@@ -1,5 +1,5 @@
 /*
- *    Copyright IBM Corp. 1999,2009
+ *    Copyright IBM Corp. 1999,2012
  *    Author(s): Denis Joseph Barrow,
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
@@ -10,71 +10,52 @@
 #ifdef CONFIG_SMP
 
 #include <asm/system.h>
-#include <asm/sigp.h>
-
-extern void machine_restart_smp(char *);
-extern void machine_halt_smp(void);
-extern void machine_power_off_smp(void);
 
 #define raw_smp_processor_id()	(S390_lowcore.cpu_nr)
 
-extern int __cpu_disable (void);
-extern void __cpu_die (unsigned int cpu);
-extern int __cpu_up (unsigned int cpu);
-
 extern struct mutex smp_cpu_state_mutex;
+extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
+
+extern int __cpu_up(unsigned int cpu);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-
-extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *);
-extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
-			      int from, int to);
-extern void smp_restart_with_online_cpu(void);
-extern void smp_restart_cpu(void);
+extern void smp_call_online_cpu(void (*func)(void *), void *);
+extern void smp_call_ipl_cpu(void (*func)(void *), void *);
 
-/*
- * returns 1 if (virtual) cpu is scheduled
- * returns 0 otherwise
- */
-static inline int smp_vcpu_scheduled(int cpu)
-{
-	u32 status;
-
-	switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
-	case sigp_status_stored:
-		/* Check for running status */
-		if (status & 0x400)
-			return 0;
-		break;
-	case sigp_not_operational:
-		return 0;
-	default:
-		break;
-	}
-	return 1;
-}
+extern int smp_find_processor_id(u16 address);
+extern int smp_store_status(int cpu);
+extern int smp_vcpu_scheduled(int cpu);
+extern void smp_yield_cpu(int cpu);
+extern void smp_yield(void);
+extern void smp_stop_cpu(void);
 
 #else /* CONFIG_SMP */
 
-static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
+static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
 	func(data);
 }
 
-static inline void smp_restart_with_online_cpu(void)
+static inline void smp_call_online_cpu(void (*func)(void *), void *data)
 {
+	func(data);
 }
 
-#define smp_vcpu_scheduled	(1)
+static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_vcpu_scheduled(int cpu) { return 1; }
+static inline void smp_yield_cpu(int cpu) { }
+static inline void smp_yield(void) { }
+static inline void smp_stop_cpu(void) { }
 
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_HOTPLUG_CPU
 extern int smp_rescan_cpus(void);
 extern void __noreturn cpu_die(void);
+extern void __cpu_die(unsigned int cpu);
+extern int __cpu_disable(void);
 #else
 static inline int smp_rescan_cpus(void) { return 0; }
 static inline void cpu_die(void) { }

+ 34 - 0
arch/s390/include/asm/system.h

@@ -7,8 +7,10 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
+#include <linux/preempt.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
+#include <linux/string.h>
 #include <asm/types.h>
 #include <asm/ptrace.h>
 #include <asm/setup.h>
@@ -248,6 +250,38 @@ static inline int test_facility(unsigned long nr)
 	return (*ptr & (0x80 >> (nr & 7))) != 0;
 }
 
+/**
+ * stfle - Store facility list extended
+ * @stfle_fac_list: array where facility list can be stored
+ * @size: size of passed in array in double words
+ */
+static inline void stfle(u64 *stfle_fac_list, int size)
+{
+	unsigned long nr;
+
+	preempt_disable();
+	S390_lowcore.stfl_fac_list = 0;
+	asm volatile(
+		"	.insn s,0xb2b10000,0(0)\n" /* stfl */
+		"0:\n"
+		EX_TABLE(0b, 0b)
+		: "=m" (S390_lowcore.stfl_fac_list));
+	nr = 4; /* bytes stored by stfl */
+	memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+	if (S390_lowcore.stfl_fac_list & 0x01000000) {
+		/* More facility bits available with stfle */
+		register unsigned long reg0 asm("0") = size - 1;
+
+		asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+			     : "+d" (reg0)
+			     : "a" (stfle_fac_list)
+			     : "memory", "cc");
+		nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+	}
+	memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+	preempt_enable();
+}
+
 static inline unsigned short stap(void)
 {
 	unsigned short cpu_address;

+ 2 - 2
arch/s390/include/asm/timer.h

@@ -33,8 +33,8 @@ struct vtimer_queue {
 	spinlock_t lock;
 	__u64 timer;		/* last programmed timer */
 	__u64 elapsed;		/* elapsed time of timer expire values */
-	__u64 idle;		/* temp var for idle */
-	int do_spt;		/* =1: reprogram cpu timer in idle */
+	__u64 idle_enter;	/* cpu timer on idle enter */
+	__u64 idle_exit;	/* cpu timer on idle exit */
 };
 
 extern void init_virt_timer(struct vtimer_list *timer);

+ 2 - 2
arch/s390/include/asm/vdso.h

@@ -40,8 +40,8 @@ struct vdso_per_cpu_data {
 extern struct vdso_data *vdso_data;
 
 #ifdef CONFIG_64BIT
-int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
-void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
+int vdso_alloc_per_cpu(struct _lowcore *lowcore);
+void vdso_free_per_cpu(struct _lowcore *lowcore);
 #endif
 
 #endif /* __ASSEMBLY__ */

+ 1 - 3
arch/s390/kernel/Makefile

@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 obj-y	:=  bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
 	    processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
 	    debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
-	    sysinfo.o jump_label.o
+	    sysinfo.o jump_label.o lgr.o os_info.o
 
 obj-y	+= $(if $(CONFIG_64BIT),entry64.o,entry.o)
 obj-y	+= $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -34,8 +34,6 @@ extra-y				+= $(if $(CONFIG_64BIT),head64.o,head31.o)
 obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SCHED_BOOK)	+= topology.o
-obj-$(CONFIG_SMP)		+= $(if $(CONFIG_64BIT),switch_cpu64.o, \
-							switch_cpu.o)
 obj-$(CONFIG_HIBERNATION)	+= suspend.o swsusp_asm64.o
 obj-$(CONFIG_AUDIT)		+= audit.o
 compat-obj-$(CONFIG_AUDIT)	+= compat_audit.o

+ 16 - 11
arch/s390/kernel/asm-offsets.c

@@ -8,9 +8,11 @@
 
 #include <linux/kbuild.h>
 #include <linux/sched.h>
+#include <asm/cputime.h>
+#include <asm/timer.h>
 #include <asm/vdso.h>
-#include <asm/sigp.h>
 #include <asm/pgtable.h>
+#include <asm/system.h>
 
 /*
  * Make sure that the compiler is new enough. We want a compiler that
@@ -70,15 +72,15 @@ int main(void)
 	DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
 	DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
 	BLANK();
-	/* constants for SIGP */
-	DEFINE(__SIGP_STOP, sigp_stop);
-	DEFINE(__SIGP_RESTART, sigp_restart);
-	DEFINE(__SIGP_SENSE, sigp_sense);
-	DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
-	BLANK();
+	/* idle data offsets */
+	DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter));
+	DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit));
+	/* vtimer queue offsets */
+	DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter));
+	DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
 	/* lowcore offsets */
 	DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
-	DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
+	DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
 	DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
 	DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
 	DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
@@ -95,20 +97,19 @@ int main(void)
 	DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
 	DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
 	DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
-	DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
-	BLANK();
-	DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
 	DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
 	DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
 	DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
 	DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
 	DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
 	DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
+	DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
 	DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
 	DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
 	DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
 	DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
 	DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
+	BLANK();
 	DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
 	DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
 	DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
@@ -129,12 +130,16 @@ int main(void)
 	DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
 	DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
 	DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
+	DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
+	DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
 	DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
 	DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
 	DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
 	DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
 	DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
 	DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
+	DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
+	BLANK();
 	DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
 	DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
 	DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));

+ 1 - 5
arch/s390/kernel/compat_signal.c

@@ -581,7 +581,6 @@ give_sigsegv:
 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
-	sigset_t blocked;
 	int ret;
 
 	/* Set up the stack frame */
@@ -591,10 +590,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		ret = setup_frame32(sig, ka, oldset, regs);
 	if (ret)
 		return ret;
-	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&blocked, sig);
-	set_current_blocked(&blocked);
+	block_sigmask(ka, sig);
 	return 0;
 }
 

+ 27 - 10
arch/s390/kernel/crash_dump.c

@@ -14,6 +14,7 @@
 #include <linux/bootmem.h>
 #include <linux/elf.h>
 #include <asm/ipl.h>
+#include <asm/os_info.h>
 
 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
@@ -51,7 +52,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 /*
  * Copy memory from old kernel
  */
-static int copy_from_oldmem(void *dest, void *src, size_t count)
+int copy_from_oldmem(void *dest, void *src, size_t count)
 {
 	unsigned long copied = 0;
 	int rc;
@@ -224,28 +225,44 @@ static void *nt_prpsinfo(void *ptr)
 }
 
 /*
- * Initialize vmcoreinfo note (new kernel)
+ * Get vmcoreinfo using lowcore->vmcore_info (new kernel)
  */
-static void *nt_vmcoreinfo(void *ptr)
+static void *get_vmcoreinfo_old(unsigned long *size)
 {
 	char nt_name[11], *vmcoreinfo;
 	Elf64_Nhdr note;
 	void *addr;
 
 	if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
-		return ptr;
+		return NULL;
 	memset(nt_name, 0, sizeof(nt_name));
 	if (copy_from_oldmem(&note, addr, sizeof(note)))
-		return ptr;
+		return NULL;
 	if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
-		return ptr;
+		return NULL;
 	if (strcmp(nt_name, "VMCOREINFO") != 0)
-		return ptr;
-	vmcoreinfo = kzalloc_panic(note.n_descsz + 1);
+		return NULL;
+	vmcoreinfo = kzalloc_panic(note.n_descsz);
 	if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
+		return NULL;
+	*size = note.n_descsz;
+	return vmcoreinfo;
+}
+
+/*
+ * Initialize vmcoreinfo note (new kernel)
+ */
+static void *nt_vmcoreinfo(void *ptr)
+{
+	unsigned long size;
+	void *vmcoreinfo;
+
+	vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
+	if (!vmcoreinfo)
+		vmcoreinfo = get_vmcoreinfo_old(&size);
+	if (!vmcoreinfo)
 		return ptr;
-	vmcoreinfo[note.n_descsz + 1] = 0;
-	return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO");
+	return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
 }
 
 /*

+ 31 - 9
arch/s390/kernel/debug.c

@@ -2,8 +2,8 @@
  *  arch/s390/kernel/debug.c
  *   S/390 debug facility
  *
- *    Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
- *                             IBM Corporation
+ *    Copyright IBM Corp. 1999, 2012
+ *
  *    Author(s): Michael Holzheu (holzheu@de.ibm.com),
  *               Holger Smolinski (Holger.Smolinski@de.ibm.com)
  *
@@ -167,6 +167,7 @@ static debug_info_t *debug_area_last = NULL;
 static DEFINE_MUTEX(debug_mutex);
 
 static int initialized;
+static int debug_critical;
 
 static const struct file_operations debug_file_ops = {
 	.owner   = THIS_MODULE,
@@ -932,6 +933,11 @@ debug_stop_all(void)
 }
 
 
+void debug_set_critical(void)
+{
+	debug_critical = 1;
+}
+
 /*
  * debug_event_common:
  * - write debug entry with given size
@@ -945,7 +951,11 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
 
 	if (!debug_active || !id->areas)
 		return NULL;
-	spin_lock_irqsave(&id->lock, flags);
+	if (debug_critical) {
+		if (!spin_trylock_irqsave(&id->lock, flags))
+			return NULL;
+	} else
+		spin_lock_irqsave(&id->lock, flags);
 	active = get_active_entry(id);
 	memset(DEBUG_DATA(active), 0, id->buf_size);
 	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -968,7 +978,11 @@ debug_entry_t
 
 	if (!debug_active || !id->areas)
 		return NULL;
-	spin_lock_irqsave(&id->lock, flags);
+	if (debug_critical) {
+		if (!spin_trylock_irqsave(&id->lock, flags))
+			return NULL;
+	} else
+		spin_lock_irqsave(&id->lock, flags);
 	active = get_active_entry(id);
 	memset(DEBUG_DATA(active), 0, id->buf_size);
 	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
@@ -1013,7 +1027,11 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
 		return NULL;
 	numargs=debug_count_numargs(string);
 
-	spin_lock_irqsave(&id->lock, flags);
+	if (debug_critical) {
+		if (!spin_trylock_irqsave(&id->lock, flags))
+			return NULL;
+	} else
+		spin_lock_irqsave(&id->lock, flags);
 	active = get_active_entry(id);
 	curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
 	va_start(ap,string);
@@ -1047,7 +1065,11 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
 
 	numargs=debug_count_numargs(string);
 
-	spin_lock_irqsave(&id->lock, flags);
+	if (debug_critical) {
+		if (!spin_trylock_irqsave(&id->lock, flags))
+			return NULL;
+	} else
+		spin_lock_irqsave(&id->lock, flags);
 	active = get_active_entry(id);
 	curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
 	va_start(ap,string);
@@ -1428,10 +1450,10 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
 	rc += sprintf(out_buf + rc, "| ");
 	for (i = 0; i < id->buf_size; i++) {
 		unsigned char c = in_buf[i];
-		if (!isprint(c))
-			rc += sprintf(out_buf + rc, ".");
-		else
+		if (isascii(c) && isprint(c))
 			rc += sprintf(out_buf + rc, "%c", c);
+		else
+			rc += sprintf(out_buf + rc, ".");
 	}
 	rc += sprintf(out_buf + rc, "\n");
 	return rc;

+ 3 - 19
arch/s390/kernel/early.c

@@ -29,6 +29,7 @@
 #include <asm/sysinfo.h>
 #include <asm/cpcmd.h>
 #include <asm/sclp.h>
+#include <asm/system.h>
 #include "entry.h"
 
 /*
@@ -262,25 +263,8 @@ static noinline __init void setup_lowcore_early(void)
 
 static noinline __init void setup_facility_list(void)
 {
-	unsigned long nr;
-
-	S390_lowcore.stfl_fac_list = 0;
-	asm volatile(
-		"	.insn	s,0xb2b10000,0(0)\n" /* stfl */
-		"0:\n"
-		EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
-	memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
-	nr = 4;				/* # bytes stored by stfl */
-	if (test_facility(7)) {
-		/* More facility bits available with stfle */
-		register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
-		asm volatile(".insn s,0xb2b00000,%0" /* stfle */
-			     : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
-			     : : "cc");
-		nr = (reg0 + 1) * 8;	/* # bytes stored by stfle */
-	}
-	memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
-	       MAX_FACILITY_BIT/8 - nr);
+	stfle(S390_lowcore.stfle_fac_list,
+	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
 }
 
 static noinline __init void setup_hpage(void)

+ 88 - 71
arch/s390/kernel/entry.S

@@ -2,7 +2,7 @@
  *  arch/s390/kernel/entry.S
  *    S390 low-level entry points.
  *
- *    Copyright (C) IBM Corp. 1999,2006
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *		 Hartmut Penner (hp@de.ibm.com),
  *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -105,14 +105,14 @@ STACK_SIZE  = 1 << STACK_SHIFT
 
 	.macro	ADD64 high,low,timer
 	al	\high,\timer
-	al	\low,\timer+4
+	al	\low,4+\timer
 	brc	12,.+8
 	ahi	\high,1
 	.endm
 
 	.macro	SUB64 high,low,timer
 	sl	\high,\timer
-	sl	\low,\timer+4
+	sl	\low,4+\timer
 	brc	3,.+8
 	ahi	\high,-1
 	.endm
@@ -471,7 +471,6 @@ io_tif:
 	jnz	io_work			# there is work to do (signals etc.)
 io_restore:
 	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r11)
-	ni	__LC_RETURN_PSW+1,0xfd	# clean wait state bit
 	stpt	__LC_EXIT_TIMER
 	lm	%r0,%r15,__PT_R0(%r11)
 	lpsw	__LC_RETURN_PSW
@@ -606,12 +605,32 @@ ext_skip:
 	stm	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
 	lr	%r2,%r11		# pass pointer to pt_regs
-	l	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
+	l	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
 	l	%r4,__LC_EXT_PARAMS	# get external parameters
 	l	%r1,BASED(.Ldo_extint)
 	basr	%r14,%r1		# call do_extint
 	j	io_return
 
+/*
+ * Load idle PSW. The second "half" of this function is in cleanup_idle.
+ */
+ENTRY(psw_idle)
+	st	%r4,__SF_EMPTY(%r15)
+	basr	%r1,0
+	la	%r1,psw_idle_lpsw+4-.(%r1)
+	st	%r1,__SF_EMPTY+4(%r15)
+	oi	__SF_EMPTY+4(%r15),0x80
+	la	%r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
+	stck	__IDLE_ENTER(%r2)
+	ltr	%r5,%r5
+	stpt	__VQ_IDLE_ENTER(%r3)
+	jz	psw_idle_lpsw
+	spt	0(%r1)
+psw_idle_lpsw:
+	lpsw	__SF_EMPTY(%r15)
+	br	%r14
+psw_idle_end:
+
 __critical_end:
 
 /*
@@ -673,7 +692,6 @@ mcck_skip:
 	TRACE_IRQS_ON
 mcck_return:
 	mvc	__LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
-	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 	jno	0f
 	lm	%r0,%r15,__PT_R0(%r11)
@@ -691,77 +709,30 @@ mcck_panic:
 0:	ahi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	j	mcck_skip
 
-/*
- * Restart interruption handler, kick starter for additional CPUs
- */
-#ifdef CONFIG_SMP
-	__CPUINIT
-ENTRY(restart_int_handler)
-	basr	%r1,0
-restart_base:
-	spt	restart_vtime-restart_base(%r1)
-	stck	__LC_LAST_UPDATE_CLOCK
-	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
-	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-	l	%r15,__LC_GPREGS_SAVE_AREA+60 # load ksp
-	lctl	%c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
-	lam	%a0,%a15,__LC_AREGS_SAVE_AREA
-	lm	%r6,%r15,__SF_GPRS(%r15)# load registers from clone
-	l	%r1,__LC_THREAD_INFO
-	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
-	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
-	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
-	basr	%r14,0
-	l	%r14,restart_addr-.(%r14)
-	basr	%r14,%r14		# call start_secondary
-restart_addr:
-	.long	start_secondary
-	.align	8
-restart_vtime:
-	.long	0x7fffffff,0xffffffff
-	.previous
-#else
-/*
- * If we do not run with SMP enabled, let the new CPU crash ...
- */
-ENTRY(restart_int_handler)
-	basr	%r1,0
-restart_base:
-	lpsw	restart_crash-restart_base(%r1)
-	.align	8
-restart_crash:
-	.long	0x000a0000,0x00000000
-restart_go:
-#endif
-
 #
 # PSW restart interrupt handler
 #
-ENTRY(psw_restart_int_handler)
+ENTRY(restart_int_handler)
 	st	%r15,__LC_SAVE_AREA_RESTART
-	basr	%r15,0
-0:	l	%r15,.Lrestart_stack-0b(%r15)	# load restart stack
-	l	%r15,0(%r15)
+	l	%r15,__LC_RESTART_STACK
 	ahi	%r15,-__PT_SIZE			# create pt_regs on stack
+	xc	0(__PT_SIZE,%r15),0(%r15)
 	stm	%r0,%r14,__PT_R0(%r15)
 	mvc	__PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
 	mvc	__PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
-	ahi	%r15,-STACK_FRAME_OVERHEAD
-	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
-	basr	%r14,0
-1:	l	%r14,.Ldo_restart-1b(%r14)
-	basr	%r14,%r14
-	basr	%r14,0				# load disabled wait PSW if
-2:	lpsw	restart_psw_crash-2b(%r14)	# do_restart returns
-	.align 4
-.Ldo_restart:
-	.long	do_restart
-.Lrestart_stack:
-	.long	restart_stack
-	.align 8
-restart_psw_crash:
-	.long	0x000a0000,0x00000000 + restart_psw_crash
+	ahi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
+	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
+	lm	%r1,%r3,__LC_RESTART_FN		# load fn, parm & source cpu
+	ltr	%r3,%r3				# test source cpu address
+	jm	1f				# negative -> skip source stop
+0:	sigp	%r4,%r3,1			# sigp sense to source cpu
+	brc	10,0b				# wait for status stored
+1:	basr	%r14,%r1			# call function
+	stap	__SF_EMPTY(%r15)		# store cpu address
+	lh	%r3,__SF_EMPTY(%r15)
+2:	sigp	%r4,%r3,5			# sigp stop to current cpu
+	brc	2,2b
+3:	j	3b
 
 	.section .kprobes.text, "ax"
 
@@ -795,6 +766,8 @@ cleanup_table:
 	.long	io_tif + 0x80000000
 	.long	io_restore + 0x80000000
 	.long	io_done + 0x80000000
+	.long	psw_idle + 0x80000000
+	.long	psw_idle_end + 0x80000000
 
 cleanup_critical:
 	cl	%r9,BASED(cleanup_table)	# system_call
@@ -813,6 +786,10 @@ cleanup_critical:
 	jl	cleanup_io_tif
 	cl	%r9,BASED(cleanup_table+28)	# io_done
 	jl	cleanup_io_restore
+	cl	%r9,BASED(cleanup_table+32)	# psw_idle
+	jl	0f
+	cl	%r9,BASED(cleanup_table+36)	# psw_idle_end
+	jl	cleanup_idle
 0:	br	%r14
 
 cleanup_system_call:
@@ -896,7 +873,6 @@ cleanup_io_restore:
 	jhe	0f
 	l	%r9,12(%r11)		# get saved r11 pointer to pt_regs
 	mvc	__LC_RETURN_PSW(8),__PT_PSW(%r9)
-	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
 	mvc	0(32,%r11),__PT_R8(%r9)
 	lm	%r0,%r7,__PT_R0(%r9)
 0:	lm	%r8,%r9,__LC_RETURN_PSW
@@ -904,11 +880,52 @@ cleanup_io_restore:
 cleanup_io_restore_insn:
 	.long	io_done - 4 + 0x80000000
 
+cleanup_idle:
+	# copy interrupt clock & cpu timer
+	mvc	__IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+	chi	%r11,__LC_SAVE_AREA_ASYNC
+	je	0f
+	mvc	__IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+0:	# check if stck has been executed
+	cl	%r9,BASED(cleanup_idle_insn)
+	jhe	1f
+	mvc	__IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
+	mvc	__VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
+	j	2f
+1:	# check if the cpu timer has been reprogrammed
+	ltr	%r5,%r5
+	jz	2f
+	spt	__VQ_IDLE_ENTER(%r3)
+2:	# account system time going idle
+	lm	%r9,%r10,__LC_STEAL_TIMER
+	ADD64	%r9,%r10,__IDLE_ENTER(%r2)
+	SUB64	%r9,%r10,__LC_LAST_UPDATE_CLOCK
+	stm	%r9,%r10,__LC_STEAL_TIMER
+	mvc	__LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+	lm	%r9,%r10,__LC_SYSTEM_TIMER
+	ADD64	%r9,%r10,__LC_LAST_UPDATE_TIMER
+	SUB64	%r9,%r10,__VQ_IDLE_ENTER(%r3)
+	stm	%r9,%r10,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+	# prepare return psw
+	n	%r8,BASED(cleanup_idle_wait)	# clear wait state bit
+	l	%r9,24(%r11)			# return from psw_idle
+	br	%r14
+cleanup_idle_insn:
+	.long	psw_idle_lpsw + 0x80000000
+cleanup_idle_wait:
+	.long	0xfffdffff
+
 /*
  * Integer constants
  */
 	.align	4
-.Lnr_syscalls:		.long	NR_syscalls
+.Lnr_syscalls:
+	.long	NR_syscalls
+.Lvtimer_max:
+	.quad	0x7fffffffffffffff
 
 /*
  * Symbol constants

+ 14 - 3
arch/s390/kernel/entry.h

@@ -4,11 +4,22 @@
 #include <linux/types.h>
 #include <linux/signal.h>
 #include <asm/ptrace.h>
-
+#include <asm/cputime.h>
+#include <asm/timer.h>
 
 extern void (*pgm_check_table[128])(struct pt_regs *);
 extern void *restart_stack;
 
+void system_call(void);
+void pgm_check_handler(void);
+void ext_int_handler(void);
+void io_int_handler(void);
+void mcck_int_handler(void);
+void restart_int_handler(void);
+void restart_call_handler(void);
+void psw_idle(struct s390_idle_data *, struct vtimer_queue *,
+	      unsigned long, int);
+
 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
 
@@ -24,9 +35,9 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
 void do_notify_resume(struct pt_regs *regs);
 
-void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long);
+struct ext_code;
+void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long);
 void do_restart(void);
-int __cpuinit start_secondary(void *cpuvoid);
 void __init startup_init(void);
 void die(struct pt_regs *regs, const char *str);
 

+ 80 - 59
arch/s390/kernel/entry64.S

@@ -2,7 +2,7 @@
  *  arch/s390/kernel/entry64.S
  *    S390 low-level entry points.
  *
- *    Copyright (C) IBM Corp. 1999,2010
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *		 Hartmut Penner (hp@de.ibm.com),
  *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -489,7 +489,6 @@ io_restore:
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
-	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -625,12 +624,30 @@ ext_skip:
 	TRACE_IRQS_OFF
 	lghi	%r1,4096
 	lgr	%r2,%r11		# pass pointer to pt_regs
-	llgf	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
+	llgf	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
 	llgf	%r4,__LC_EXT_PARAMS	# get external parameter
 	lg	%r5,__LC_EXT_PARAMS2-4096(%r1)	# get 64 bit external parameter
 	brasl	%r14,do_extint
 	j	io_return
 
+/*
+ * Load idle PSW. The second "half" of this function is in cleanup_idle.
+ */
+ENTRY(psw_idle)
+	stg	%r4,__SF_EMPTY(%r15)
+	larl	%r1,psw_idle_lpsw+4
+	stg	%r1,__SF_EMPTY+8(%r15)
+	larl	%r1,.Lvtimer_max
+	stck	__IDLE_ENTER(%r2)
+	ltr	%r5,%r5
+	stpt	__VQ_IDLE_ENTER(%r3)
+	jz	psw_idle_lpsw
+	spt	0(%r1)
+psw_idle_lpsw:
+	lpswe	__SF_EMPTY(%r15)
+	br	%r14
+psw_idle_end:
+
 __critical_end:
 
 /*
@@ -696,7 +713,6 @@ mcck_return:
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
-	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
 	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 	jno	0f
 	stpt	__LC_EXIT_TIMER
@@ -713,68 +729,30 @@ mcck_panic:
 0:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	j	mcck_skip
 
-/*
- * Restart interruption handler, kick starter for additional CPUs
- */
-#ifdef CONFIG_SMP
-	__CPUINIT
-ENTRY(restart_int_handler)
-	basr	%r1,0
-restart_base:
-	spt	restart_vtime-restart_base(%r1)
-	stck	__LC_LAST_UPDATE_CLOCK
-	mvc	__LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
-	mvc	__LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
-	lghi	%r10,__LC_GPREGS_SAVE_AREA
-	lg	%r15,120(%r10)		# load ksp
-	lghi	%r10,__LC_CREGS_SAVE_AREA
-	lctlg	%c0,%c15,0(%r10)	# get new ctl regs
-	lghi	%r10,__LC_AREGS_SAVE_AREA
-	lam	%a0,%a15,0(%r10)
-	lmg	%r6,%r15,__SF_GPRS(%r15)# load registers from clone
-	lg	%r1,__LC_THREAD_INFO
-	mvc	__LC_USER_TIMER(8),__TI_user_timer(%r1)
-	mvc	__LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
-	xc	__LC_STEAL_TIMER(8),__LC_STEAL_TIMER
-	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
-	brasl	%r14,start_secondary
-	.align	8
-restart_vtime:
-	.long	0x7fffffff,0xffffffff
-	.previous
-#else
-/*
- * If we do not run with SMP enabled, let the new CPU crash ...
- */
-ENTRY(restart_int_handler)
-	basr	%r1,0
-restart_base:
-	lpswe	restart_crash-restart_base(%r1)
-	.align 8
-restart_crash:
-	.long  0x000a0000,0x00000000,0x00000000,0x00000000
-restart_go:
-#endif
-
 #
 # PSW restart interrupt handler
 #
-ENTRY(psw_restart_int_handler)
+ENTRY(restart_int_handler)
 	stg	%r15,__LC_SAVE_AREA_RESTART
-	larl	%r15,restart_stack		# load restart stack
-	lg	%r15,0(%r15)
+	lg	%r15,__LC_RESTART_STACK
 	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
+	xc	0(__PT_SIZE,%r15),0(%r15)
 	stmg	%r0,%r14,__PT_R0(%r15)
 	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
 	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
-	aghi	%r15,-STACK_FRAME_OVERHEAD
-	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-	brasl	%r14,do_restart
-	larl	%r14,restart_psw_crash		# load disabled wait PSW if
-	lpswe	0(%r14)				# do_restart returns
-	.align 8
-restart_psw_crash:
-	.quad	0x0002000080000000,0x0000000000000000 + restart_psw_crash
+	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
+	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
+	lmg	%r1,%r3,__LC_RESTART_FN		# load fn, parm & source cpu
+	ltgr	%r3,%r3				# test source cpu address
+	jm	1f				# negative -> skip source stop
+0:	sigp	%r4,%r3,1			# sigp sense to source cpu
+	brc	10,0b				# wait for status stored
+1:	basr	%r14,%r1			# call function
+	stap	__SF_EMPTY(%r15)		# store cpu address
+	llgh	%r3,__SF_EMPTY(%r15)
+2:	sigp	%r4,%r3,5			# sigp stop to current cpu
+	brc	2,2b
+3:	j	3b
 
 	.section .kprobes.text, "ax"
 
@@ -808,6 +786,8 @@ cleanup_table:
 	.quad	io_tif
 	.quad	io_restore
 	.quad	io_done
+	.quad	psw_idle
+	.quad	psw_idle_end
 
 cleanup_critical:
 	clg	%r9,BASED(cleanup_table)	# system_call
@@ -826,6 +806,10 @@ cleanup_critical:
 	jl	cleanup_io_tif
 	clg	%r9,BASED(cleanup_table+56)	# io_done
 	jl	cleanup_io_restore
+	clg	%r9,BASED(cleanup_table+64)	# psw_idle
+	jl	0f
+	clg	%r9,BASED(cleanup_table+72)	# psw_idle_end
+	jl	cleanup_idle
 0:	br	%r14
 
 
@@ -915,7 +899,6 @@ cleanup_io_restore:
 	je	0f
 	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
-	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
 0:	lmg	%r8,%r9,__LC_RETURN_PSW
@@ -923,6 +906,42 @@ cleanup_io_restore:
 cleanup_io_restore_insn:
 	.quad	io_done - 4
 
+cleanup_idle:
+	# copy interrupt clock & cpu timer
+	mvc	__IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+	cghi	%r11,__LC_SAVE_AREA_ASYNC
+	je	0f
+	mvc	__IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+	mvc	__VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+0:	# check if stck & stpt have been executed
+	clg	%r9,BASED(cleanup_idle_insn)
+	jhe	1f
+	mvc	__IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
+	mvc	__VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
+	j	2f
+1:	# check if the cpu timer has been reprogrammed
+	ltr	%r5,%r5
+	jz	2f
+	spt	__VQ_IDLE_ENTER(%r3)
+2:	# account system time going idle
+	lg	%r9,__LC_STEAL_TIMER
+	alg	%r9,__IDLE_ENTER(%r2)
+	slg	%r9,__LC_LAST_UPDATE_CLOCK
+	stg	%r9,__LC_STEAL_TIMER
+	mvc	__LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+	lg	%r9,__LC_SYSTEM_TIMER
+	alg	%r9,__LC_LAST_UPDATE_TIMER
+	slg	%r9,__VQ_IDLE_ENTER(%r3)
+	stg	%r9,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+	# prepare return psw
+	nihh	%r8,0xfffd		# clear wait state bit
+	lg	%r9,48(%r11)		# return from psw_idle
+	br	%r14
+cleanup_idle_insn:
+	.quad	psw_idle_lpsw
+
 /*
  * Integer constants
  */
@@ -931,6 +950,8 @@ cleanup_io_restore_insn:
 	.quad	__critical_start
 .Lcritical_length:
 	.quad	__critical_end - __critical_start
+.Lvtimer_max:
+	.quad	0x7fffffffffffffff
 
 
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)

+ 58 - 41
arch/s390/kernel/ipl.c

@@ -2,7 +2,7 @@
  *  arch/s390/kernel/ipl.c
  *    ipl/reipl/dump support for Linux on s390.
  *
- *    Copyright IBM Corp. 2005,2007
+ *    Copyright IBM Corp. 2005,2012
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  *		 Volker Sameske <sameske@de.ibm.com>
@@ -17,6 +17,7 @@
 #include <linux/fs.h>
 #include <linux/gfp.h>
 #include <linux/crash_dump.h>
+#include <linux/debug_locks.h>
 #include <asm/ipl.h>
 #include <asm/smp.h>
 #include <asm/setup.h>
@@ -25,8 +26,9 @@
 #include <asm/ebcdic.h>
 #include <asm/reset.h>
 #include <asm/sclp.h>
-#include <asm/sigp.h>
 #include <asm/checksum.h>
+#include <asm/debug.h>
+#include <asm/os_info.h>
 #include "entry.h"
 
 #define IPL_PARM_BLOCK_VERSION 0
@@ -571,7 +573,7 @@ static void __ipl_run(void *unused)
 
 static void ipl_run(struct shutdown_trigger *trigger)
 {
-	smp_switch_to_ipl_cpu(__ipl_run, NULL);
+	smp_call_ipl_cpu(__ipl_run, NULL);
 }
 
 static int __init ipl_init(void)
@@ -950,6 +952,13 @@ static struct attribute_group reipl_nss_attr_group = {
 	.attrs = reipl_nss_attrs,
 };
 
+static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block)
+{
+	reipl_block_actual = reipl_block;
+	os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
+			  reipl_block->hdr.len);
+}
+
 /* reipl type */
 
 static int reipl_set_type(enum ipl_type type)
@@ -965,7 +974,7 @@ static int reipl_set_type(enum ipl_type type)
 			reipl_method = REIPL_METHOD_CCW_VM;
 		else
 			reipl_method = REIPL_METHOD_CCW_CIO;
-		reipl_block_actual = reipl_block_ccw;
+		set_reipl_block_actual(reipl_block_ccw);
 		break;
 	case IPL_TYPE_FCP:
 		if (diag308_set_works)
@@ -974,7 +983,7 @@ static int reipl_set_type(enum ipl_type type)
 			reipl_method = REIPL_METHOD_FCP_RO_VM;
 		else
 			reipl_method = REIPL_METHOD_FCP_RO_DIAG;
-		reipl_block_actual = reipl_block_fcp;
+		set_reipl_block_actual(reipl_block_fcp);
 		break;
 	case IPL_TYPE_FCP_DUMP:
 		reipl_method = REIPL_METHOD_FCP_DUMP;
@@ -984,7 +993,7 @@ static int reipl_set_type(enum ipl_type type)
 			reipl_method = REIPL_METHOD_NSS_DIAG;
 		else
 			reipl_method = REIPL_METHOD_NSS;
-		reipl_block_actual = reipl_block_nss;
+		set_reipl_block_actual(reipl_block_nss);
 		break;
 	case IPL_TYPE_UNKNOWN:
 		reipl_method = REIPL_METHOD_DEFAULT;
@@ -1101,7 +1110,7 @@ static void __reipl_run(void *unused)
 
 static void reipl_run(struct shutdown_trigger *trigger)
 {
-	smp_switch_to_ipl_cpu(__reipl_run, NULL);
+	smp_call_ipl_cpu(__reipl_run, NULL);
 }
 
 static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
@@ -1256,6 +1265,29 @@ static int __init reipl_fcp_init(void)
 	return 0;
 }
 
+static int __init reipl_type_init(void)
+{
+	enum ipl_type reipl_type = ipl_info.type;
+	struct ipl_parameter_block *reipl_block;
+	unsigned long size;
+
+	reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size);
+	if (!reipl_block)
+		goto out;
+	/*
+	 * If we have an OS info reipl block, this will be used
+	 */
+	if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
+		memcpy(reipl_block_fcp, reipl_block, size);
+		reipl_type = IPL_TYPE_FCP;
+	} else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
+		memcpy(reipl_block_ccw, reipl_block, size);
+		reipl_type = IPL_TYPE_CCW;
+	}
+out:
+	return reipl_set_type(reipl_type);
+}
+
 static int __init reipl_init(void)
 {
 	int rc;
@@ -1277,10 +1309,7 @@ static int __init reipl_init(void)
 	rc = reipl_nss_init();
 	if (rc)
 		return rc;
-	rc = reipl_set_type(ipl_info.type);
-	if (rc)
-		return rc;
-	return 0;
+	return reipl_type_init();
 }
 
 static struct shutdown_action __refdata reipl_action = {
@@ -1421,7 +1450,7 @@ static void dump_run(struct shutdown_trigger *trigger)
 	if (dump_method == DUMP_METHOD_NONE)
 		return;
 	smp_send_stop();
-	smp_switch_to_ipl_cpu(__dump_run, NULL);
+	smp_call_ipl_cpu(__dump_run, NULL);
 }
 
 static int __init dump_ccw_init(void)
@@ -1499,30 +1528,12 @@ static struct shutdown_action __refdata dump_action = {
 
 static void dump_reipl_run(struct shutdown_trigger *trigger)
 {
-	preempt_disable();
-	/*
-	 * Bypass dynamic address translation (DAT) when storing IPL parameter
-	 * information block address and checksum into the prefix area
-	 * (corresponding to absolute addresses 0-8191).
-	 * When enhanced DAT applies and the STE format control in one,
-	 * the absolute address is formed without prefixing. In this case a
-	 * normal store (stg/st) into the prefix area would no more match to
-	 * absolute addresses 0-8191.
-	 */
-#ifdef CONFIG_64BIT
-	asm volatile("sturg %0,%1"
-		:: "a" ((unsigned long) reipl_block_actual),
-		"a" (&lowcore_ptr[smp_processor_id()]->ipib));
-#else
-	asm volatile("stura %0,%1"
-		:: "a" ((unsigned long) reipl_block_actual),
-		"a" (&lowcore_ptr[smp_processor_id()]->ipib));
-#endif
-	asm volatile("stura %0,%1"
-		:: "a" (csum_partial(reipl_block_actual,
-				     reipl_block_actual->hdr.len, 0)),
-		"a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum));
-	preempt_enable();
+	u32 csum;
+
+	csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
+	copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum));
+	copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual,
+			      sizeof(reipl_block_actual));
 	dump_run(trigger);
 }
 
@@ -1623,9 +1634,7 @@ static void stop_run(struct shutdown_trigger *trigger)
 	if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
 	    strcmp(trigger->name, ON_RESTART_STR) == 0)
 		disabled_wait((unsigned long) __builtin_return_address(0));
-	while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
-		cpu_relax();
-	for (;;);
+	smp_stop_cpu();
 }
 
 static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
@@ -1713,6 +1722,7 @@ static struct kobj_attribute on_panic_attr =
 
 static void do_panic(void)
 {
+	lgr_info_log();
 	on_panic_trigger.action->fn(&on_panic_trigger);
 	stop_run(&on_panic_trigger);
 }
@@ -1738,9 +1748,8 @@ static ssize_t on_restart_store(struct kobject *kobj,
 static struct kobj_attribute on_restart_attr =
 	__ATTR(on_restart, 0644, on_restart_show, on_restart_store);
 
-void do_restart(void)
+static void __do_restart(void *ignore)
 {
-	smp_restart_with_online_cpu();
 	smp_send_stop();
 #ifdef CONFIG_CRASH_DUMP
 	crash_kexec(NULL);
@@ -1749,6 +1758,14 @@ void do_restart(void)
 	stop_run(&on_restart_trigger);
 }
 
+void do_restart(void)
+{
+	tracing_off();
+	debug_locks_off();
+	lgr_info_log();
+	smp_call_online_cpu(__do_restart, NULL);
+}
+
 /* on halt */
 
 static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};

+ 5 - 9
arch/s390/kernel/irq.c

@@ -202,31 +202,27 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
 }
 EXPORT_SYMBOL(unregister_external_interrupt);
 
-void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
+void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
 			   unsigned int param32, unsigned long param64)
 {
 	struct pt_regs *old_regs;
-	unsigned short code;
 	struct ext_int_info *p;
 	int index;
 
-	code = (unsigned short) ext_int_code;
 	old_regs = set_irq_regs(regs);
-	s390_idle_check(regs, S390_lowcore.int_clock,
-			S390_lowcore.async_enter_timer);
 	irq_enter();
 	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 		/* Serve timer interrupts first. */
 		clock_comparator_work();
 	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
-	if (code != 0x1004)
+	if (ext_code.code != 0x1004)
 		__get_cpu_var(s390_idle).nohz_delay = 1;
 
-	index = ext_hash(code);
+	index = ext_hash(ext_code.code);
 	rcu_read_lock();
 	list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
-		if (likely(p->code == code))
-			p->handler(ext_int_code, param32, param64);
+		if (likely(p->code == ext_code.code))
+			p->handler(ext_code, param32, param64);
 	rcu_read_unlock();
 	irq_exit();
 	set_irq_regs(old_regs);

+ 200 - 0
arch/s390/kernel/lgr.c

@@ -0,0 +1,200 @@
+/*
+ * Linux Guest Relocation (LGR) detection
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <asm/sysinfo.h>
+#include <asm/ebcdic.h>
+#include <asm/system.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#define LGR_TIMER_INTERVAL_SECS (30 * 60)
+#define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */
+
+/*
+ * LGR info: Contains stfle and stsi data
+ */
+struct lgr_info {
+	/* Bit field with facility information: 4 DWORDs are stored */
+	u64 stfle_fac_list[4];
+	/* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */
+	u32 level;
+	/* Level 1: CEC info (stsi 1.1.1) */
+	char manufacturer[16];
+	char type[4];
+	char sequence[16];
+	char plant[4];
+	char model[16];
+	/* Level 2: LPAR info (stsi 2.2.2) */
+	u16 lpar_number;
+	char name[8];
+	/* Level 3: VM info (stsi 3.2.2) */
+	u8 vm_count;
+	struct {
+		char name[8];
+		char cpi[16];
+	} vm[VM_LEVEL_MAX];
+} __packed __aligned(8);
+
+/*
+ * LGR globals
+ */
+static void *lgr_page;
+static struct lgr_info lgr_info_last;
+static struct lgr_info lgr_info_cur;
+static struct debug_info *lgr_dbf;
+
+/*
+ * Return number of valid stsi levels
+ */
+static inline int stsi_0(void)
+{
+	int rc = stsi(NULL, 0, 0, 0);
+
+	return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
+}
+
+/*
+ * Copy buffer and then convert it to ASCII
+ */
+static void cpascii(char *dst, char *src, int size)
+{
+	memcpy(dst, src, size);
+	EBCASC(dst, size);
+}
+
+/*
+ * Fill LGR info with 1.1.1 stsi data
+ */
+static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
+{
+	struct sysinfo_1_1_1 *si = lgr_page;
+
+	if (stsi(si, 1, 1, 1) == -ENOSYS)
+		return;
+	cpascii(lgr_info->manufacturer, si->manufacturer,
+		sizeof(si->manufacturer));
+	cpascii(lgr_info->type, si->type, sizeof(si->type));
+	cpascii(lgr_info->model, si->model, sizeof(si->model));
+	cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence));
+	cpascii(lgr_info->plant, si->plant, sizeof(si->plant));
+}
+
+/*
+ * Fill LGR info with 2.2.2 stsi data
+ */
+static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
+{
+	struct sysinfo_2_2_2 *si = lgr_page;
+
+	if (stsi(si, 2, 2, 2) == -ENOSYS)
+		return;
+	cpascii(lgr_info->name, si->name, sizeof(si->name));
+	memcpy(&lgr_info->lpar_number, &si->lpar_number,
+	       sizeof(lgr_info->lpar_number));
+}
+
+/*
+ * Fill LGR info with 3.2.2 stsi data
+ */
+static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
+{
+	struct sysinfo_3_2_2 *si = lgr_page;
+	int i;
+
+	if (stsi(si, 3, 2, 2) == -ENOSYS)
+		return;
+	for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
+		cpascii(lgr_info->vm[i].name, si->vm[i].name,
+			sizeof(si->vm[i].name));
+		cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi,
+			sizeof(si->vm[i].cpi));
+	}
+	lgr_info->vm_count = si->count;
+}
+
+/*
+ * Fill LGR info with current data
+ */
+static void lgr_info_get(struct lgr_info *lgr_info)
+{
+	memset(lgr_info, 0, sizeof(*lgr_info));
+	stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
+	lgr_info->level = stsi_0();
+	if (lgr_info->level == -ENOSYS)
+		return;
+	if (lgr_info->level >= 1)
+		lgr_stsi_1_1_1(lgr_info);
+	if (lgr_info->level >= 2)
+		lgr_stsi_2_2_2(lgr_info);
+	if (lgr_info->level >= 3)
+		lgr_stsi_3_2_2(lgr_info);
+}
+
+/*
+ * Check if LGR info has changed and if yes log new LGR info to s390dbf
+ */
+void lgr_info_log(void)
+{
+	static DEFINE_SPINLOCK(lgr_info_lock);
+	unsigned long flags;
+
+	if (!spin_trylock_irqsave(&lgr_info_lock, flags))
+		return;
+	lgr_info_get(&lgr_info_cur);
+	if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) {
+		debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur));
+		lgr_info_last = lgr_info_cur;
+	}
+	spin_unlock_irqrestore(&lgr_info_lock, flags);
+}
+EXPORT_SYMBOL_GPL(lgr_info_log);
+
+static void lgr_timer_set(void);
+
+/*
+ * LGR timer callback
+ */
+static void lgr_timer_fn(unsigned long ignored)
+{
+	lgr_info_log();
+	lgr_timer_set();
+}
+
+static struct timer_list lgr_timer =
+	TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0);
+
+/*
+ * Setup next LGR timer
+ */
+static void lgr_timer_set(void)
+{
+	mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ);
+}
+
+/*
+ * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer
+ */
+static int __init lgr_init(void)
+{
+	lgr_page = (void *) __get_free_pages(GFP_KERNEL, 0);
+	if (!lgr_page)
+		return -ENOMEM;
+	lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info));
+	if (!lgr_dbf) {
+		free_page((unsigned long) lgr_page);
+		return -ENOMEM;
+	}
+	debug_register_view(lgr_dbf, &debug_hex_ascii_view);
+	lgr_info_get(&lgr_info_last);
+	debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
+	lgr_timer_set();
+	return 0;
+}
+module_init(lgr_init);

+ 14 - 38
arch/s390/kernel/machine_kexec.c

@@ -14,6 +14,7 @@
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/ftrace.h>
+#include <linux/debug_locks.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
 #include <asm/pgtable.h>
@@ -48,51 +49,22 @@ static void add_elf_notes(int cpu)
 	memset(ptr, 0, sizeof(struct elf_note));
 }
 
-/*
- * Store status of next available physical CPU
- */
-static int store_status_next(int start_cpu, int this_cpu)
-{
-	struct save_area *sa = (void *) 4608 + store_prefix();
-	int cpu, rc;
-
-	for (cpu = start_cpu; cpu < 65536; cpu++) {
-		if (cpu == this_cpu)
-			continue;
-		do {
-			rc = raw_sigp(cpu, sigp_stop_and_store_status);
-		} while (rc == sigp_busy);
-		if (rc != sigp_order_code_accepted)
-			continue;
-		if (sa->pref_reg)
-			return cpu;
-	}
-	return -1;
-}
-
 /*
  * Initialize CPU ELF notes
  */
 void setup_regs(void)
 {
 	unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
-	int cpu, this_cpu, phys_cpu = 0, first = 1;
+	int cpu, this_cpu;
 
-	this_cpu = stap();
-
-	if (!S390_lowcore.prefixreg_save_area)
-		first = 0;
+	this_cpu = smp_find_processor_id(stap());
+	add_elf_notes(this_cpu);
 	for_each_online_cpu(cpu) {
-		if (first) {
-			add_elf_notes(cpu);
-			first = 0;
+		if (cpu == this_cpu)
+			continue;
+		if (smp_store_status(cpu))
 			continue;
-		}
-		phys_cpu = store_status_next(phys_cpu, this_cpu);
-		if (phys_cpu == -1)
-			break;
 		add_elf_notes(cpu);
-		phys_cpu++;
 	}
 	/* Copy dump CPU store status info to absolute zero */
 	memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
@@ -238,10 +210,14 @@ static void __machine_kexec(void *data)
 	struct kimage *image = data;
 
 	pfault_fini();
-	if (image->type == KEXEC_TYPE_CRASH)
+	tracing_off();
+	debug_locks_off();
+	if (image->type == KEXEC_TYPE_CRASH) {
+		lgr_info_log();
 		s390_reset_system(__do_machine_kdump, data);
-	else
+	} else {
 		s390_reset_system(__do_machine_kexec, data);
+	}
 	disabled_wait((unsigned long) __builtin_return_address(0));
 }
 
@@ -255,5 +231,5 @@ void machine_kexec(struct kimage *image)
 		return;
 	tracer_disable();
 	smp_send_stop();
-	smp_switch_to_ipl_cpu(__machine_kexec, image);
+	smp_call_ipl_cpu(__machine_kexec, image);
 }

+ 0 - 2
arch/s390/kernel/nmi.c

@@ -254,8 +254,6 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 	int umode;
 
 	nmi_enter();
-	s390_idle_check(regs, S390_lowcore.mcck_clock,
-			S390_lowcore.mcck_enter_timer);
 	kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
 	mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
 	mcck = &__get_cpu_var(cpu_mcck);

+ 169 - 0
arch/s390/kernel/os_info.c

@@ -0,0 +1,169 @@
+/*
+ * OS info memory interface
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "os_info"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/crash_dump.h>
+#include <linux/kernel.h>
+#include <asm/checksum.h>
+#include <asm/lowcore.h>
+#include <asm/system.h>
+#include <asm/os_info.h>
+
+/*
+ * OS info structure has to be page aligned
+ */
+static struct os_info os_info __page_aligned_data;
+
+/*
+ * Compute checksum over OS info structure
+ */
+u32 os_info_csum(struct os_info *os_info)
+{
+	int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
+	return csum_partial(&os_info->version_major, size, 0);
+}
+
+/*
+ * Add crashkernel info to OS info and update checksum
+ */
+void os_info_crashkernel_add(unsigned long base, unsigned long size)
+{
+	os_info.crashkernel_addr = (u64)(unsigned long)base;
+	os_info.crashkernel_size = (u64)(unsigned long)size;
+	os_info.csum = os_info_csum(&os_info);
+}
+
+/*
+ * Add OS info entry and update checksum
+ */
+void os_info_entry_add(int nr, void *ptr, u64 size)
+{
+	os_info.entry[nr].addr = (u64)(unsigned long)ptr;
+	os_info.entry[nr].size = size;
+	os_info.entry[nr].csum = csum_partial(ptr, size, 0);
+	os_info.csum = os_info_csum(&os_info);
+}
+
+/*
+ * Initialize OS info struture and set lowcore pointer
+ */
+void __init os_info_init(void)
+{
+	void *ptr = &os_info;
+
+	os_info.version_major = OS_INFO_VERSION_MAJOR;
+	os_info.version_minor = OS_INFO_VERSION_MINOR;
+	os_info.magic = OS_INFO_MAGIC;
+	os_info.csum = os_info_csum(&os_info);
+	copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+}
+
+#ifdef CONFIG_CRASH_DUMP
+
+static struct os_info *os_info_old;
+
+/*
+ * Allocate and copy OS info entry from oldmem
+ */
+static void os_info_old_alloc(int nr, int align)
+{
+	unsigned long addr, size = 0;
+	char *buf, *buf_align, *msg;
+	u32 csum;
+
+	addr = os_info_old->entry[nr].addr;
+	if (!addr) {
+		msg = "not available";
+		goto fail;
+	}
+	size = os_info_old->entry[nr].size;
+	buf = kmalloc(size + align - 1, GFP_KERNEL);
+	if (!buf) {
+		msg = "alloc failed";
+		goto fail;
+	}
+	buf_align = PTR_ALIGN(buf, align);
+	if (copy_from_oldmem(buf_align, (void *) addr, size)) {
+		msg = "copy failed";
+		goto fail_free;
+	}
+	csum = csum_partial(buf_align, size, 0);
+	if (csum != os_info_old->entry[nr].csum) {
+		msg = "checksum failed";
+		goto fail_free;
+	}
+	os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align;
+	msg = "copied";
+	goto out;
+fail_free:
+	kfree(buf);
+fail:
+	os_info_old->entry[nr].addr = 0;
+out:
+	pr_info("entry %i: %s (addr=0x%lx size=%lu)\n",
+		nr, msg, addr, size);
+}
+
+/*
+ * Initialize os info and os info entries from oldmem
+ */
+static void os_info_old_init(void)
+{
+	static int os_info_init;
+	unsigned long addr;
+
+	if (os_info_init)
+		return;
+	if (!OLDMEM_BASE)
+		goto fail;
+	if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr)))
+		goto fail;
+	if (addr == 0 || addr % PAGE_SIZE)
+		goto fail;
+	os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL);
+	if (!os_info_old)
+		goto fail;
+	if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old)))
+		goto fail_free;
+	if (os_info_old->magic != OS_INFO_MAGIC)
+		goto fail_free;
+	if (os_info_old->csum != os_info_csum(os_info_old))
+		goto fail_free;
+	if (os_info_old->version_major > OS_INFO_VERSION_MAJOR)
+		goto fail_free;
+	os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
+	os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
+	os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
+	pr_info("crashkernel: addr=0x%lx size=%lu\n",
+		(unsigned long) os_info_old->crashkernel_addr,
+		(unsigned long) os_info_old->crashkernel_size);
+	os_info_init = 1;
+	return;
+fail_free:
+	kfree(os_info_old);
+fail:
+	os_info_init = 1;
+	os_info_old = NULL;
+}
+
+/*
+ * Return pointer to os infor entry and its size
+ */
+void *os_info_old_entry(int nr, unsigned long *size)
+{
+	os_info_old_init();
+
+	if (!os_info_old)
+		return NULL;
+	if (!os_info_old->entry[nr].addr)
+		return NULL;
+	*size = (unsigned long) os_info_old->entry[nr].size;
+	return (void *)(unsigned long)os_info_old->entry[nr].addr;
+}
+#endif

+ 1 - 6
arch/s390/kernel/process.c

@@ -77,13 +77,8 @@ static void default_idle(void)
 		local_irq_enable();
 		return;
 	}
-	trace_hardirqs_on();
-	/* Don't trace preempt off for idle. */
-	stop_critical_timings();
-	/* Stop virtual timer and halt the cpu. */
+	/* Halt the cpu and keep track of cpu time accounting. */
 	vtime_stop_cpu();
-	/* Reenable preemption tracer. */
-	start_critical_timings();
 }
 
 void cpu_idle(void)

+ 30 - 31
arch/s390/kernel/setup.c

@@ -2,7 +2,7 @@
  *  arch/s390/kernel/setup.c
  *
  *  S390 version
- *    Copyright (C) IBM Corp. 1999,2010
+ *    Copyright (C) IBM Corp. 1999,2012
  *    Author(s): Hartmut Penner (hp@de.ibm.com),
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
  *
@@ -62,6 +62,8 @@
 #include <asm/ebcdic.h>
 #include <asm/kvm_virtio.h>
 #include <asm/diag.h>
+#include <asm/os_info.h>
+#include "entry.h"
 
 long psw_kernel_bits	= PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
 			  PSW_MASK_EA | PSW_MASK_BA;
@@ -351,8 +353,9 @@ static void setup_addressing_mode(void)
 	}
 }
 
-static void __init
-setup_lowcore(void)
+void *restart_stack __attribute__((__section__(".data")));
+
+static void __init setup_lowcore(void)
 {
 	struct _lowcore *lc;
 
@@ -363,7 +366,7 @@ setup_lowcore(void)
 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 	lc->restart_psw.mask = psw_kernel_bits;
 	lc->restart_psw.addr =
-		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
+		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
 	lc->external_new_psw.mask = psw_kernel_bits |
 		PSW_MASK_DAT | PSW_MASK_MCHECK;
 	lc->external_new_psw.addr =
@@ -412,6 +415,24 @@ setup_lowcore(void)
 	lc->last_update_timer = S390_lowcore.last_update_timer;
 	lc->last_update_clock = S390_lowcore.last_update_clock;
 	lc->ftrace_func = S390_lowcore.ftrace_func;
+
+	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+	restart_stack += ASYNC_SIZE;
+
+	/*
+	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
+	 * restart data to the absolute zero lowcore. This is necesary if
+	 * PSW restart is done on an offline CPU that has lowcore zero.
+	 */
+	lc->restart_stack = (unsigned long) restart_stack;
+	lc->restart_fn = (unsigned long) do_restart;
+	lc->restart_data = 0;
+	lc->restart_source = -1UL;
+	memcpy(&S390_lowcore.restart_stack, &lc->restart_stack,
+	       4*sizeof(unsigned long));
+	copy_to_absolute_zero(&S390_lowcore.restart_psw,
+			      &lc->restart_psw, sizeof(psw_t));
+
 	set_prefix((u32)(unsigned long) lc);
 	lowcore_ptr[0] = lc;
 }
@@ -572,27 +593,6 @@ static void __init setup_memory_end(void)
 	}
 }
 
-void *restart_stack __attribute__((__section__(".data")));
-
-/*
- * Setup new PSW and allocate stack for PSW restart interrupt
- */
-static void __init setup_restart_psw(void)
-{
-	psw_t psw;
-
-	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
-	restart_stack += ASYNC_SIZE;
-
-	/*
-	 * Setup restart PSW for absolute zero lowcore. This is necesary
-	 * if PSW restart is done on an offline CPU that has lowcore zero
-	 */
-	psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
-	psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
-	copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw));
-}
-
 static void __init setup_vmcoreinfo(void)
 {
 #ifdef CONFIG_KEXEC
@@ -747,7 +747,7 @@ static void __init reserve_crashkernel(void)
 {
 #ifdef CONFIG_CRASH_DUMP
 	unsigned long long crash_base, crash_size;
-	char *msg;
+	char *msg = NULL;
 	int rc;
 
 	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
@@ -779,11 +779,11 @@ static void __init reserve_crashkernel(void)
 	pr_info("Reserving %lluMB of memory at %lluMB "
 		"for crashkernel (System RAM: %luMB)\n",
 		crash_size >> 20, crash_base >> 20, memory_end >> 20);
+	os_info_crashkernel_add(crash_base, crash_size);
 #endif
 }
 
-static void __init
-setup_memory(void)
+static void __init setup_memory(void)
 {
         unsigned long bootmap_size;
 	unsigned long start_pfn, end_pfn;
@@ -1014,8 +1014,7 @@ static void __init setup_hwcaps(void)
  * was printed.
  */
 
-void __init
-setup_arch(char **cmdline_p)
+void __init setup_arch(char **cmdline_p)
 {
         /*
          * print what head.S has found out about the machine
@@ -1060,6 +1059,7 @@ setup_arch(char **cmdline_p)
 
 	parse_early_param();
 
+	os_info_init();
 	setup_ipl();
 	setup_memory_end();
 	setup_addressing_mode();
@@ -1068,7 +1068,6 @@ setup_arch(char **cmdline_p)
 	setup_memory();
 	setup_resources();
 	setup_vmcoreinfo();
-	setup_restart_psw();
 	setup_lowcore();
 
         cpu_init();

+ 1 - 5
arch/s390/kernel/signal.c

@@ -384,7 +384,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
 			 siginfo_t *info, sigset_t *oldset,
 			 struct pt_regs *regs)
 {
-	sigset_t blocked;
 	int ret;
 
 	/* Set up the stack frame */
@@ -394,10 +393,7 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
 		ret = setup_frame(sig, ka, oldset, regs);
 	if (ret)
 		return ret;
-	sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&blocked, sig);
-	set_current_blocked(&blocked);
+	block_sigmask(ka, sig);
 	return 0;
 }
 

文件差异内容过多而无法显示
+ 473 - 372
arch/s390/kernel/smp.c


+ 0 - 58
arch/s390/kernel/switch_cpu.S

@@ -1,58 +0,0 @@
-/*
- * 31-bit switch cpu code
- *
- * Copyright IBM Corp. 2009
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-
-# smp_switch_to_cpu switches to destination cpu and executes the passed function
-# Parameter: %r2 - function to call
-#	     %r3 - function parameter
-#	     %r4 - stack poiner
-#	     %r5 - current cpu
-#	     %r6 - destination cpu
-
-	.section .text
-ENTRY(smp_switch_to_cpu)
-	stm	%r6,%r15,__SF_GPRS(%r15)
-	lr	%r1,%r15
-	ahi	%r15,-STACK_FRAME_OVERHEAD
-	st	%r1,__SF_BACKCHAIN(%r15)
-	basr	%r13,0
-0:	la	%r1,.gprregs_addr-0b(%r13)
-	l	%r1,0(%r1)
-	stm	%r0,%r15,0(%r1)
-1:	sigp	%r0,%r6,__SIGP_RESTART	/* start destination CPU */
-	brc	2,1b			/* busy, try again */
-2:	sigp	%r0,%r5,__SIGP_STOP	/* stop current CPU */
-	brc	2,2b			/* busy, try again */
-3:	j	3b
-
-ENTRY(smp_restart_cpu)
-	basr	%r13,0
-0:	la	%r1,.gprregs_addr-0b(%r13)
-	l	%r1,0(%r1)
-	lm	%r0,%r15,0(%r1)
-1:	sigp	%r0,%r5,__SIGP_SENSE	/* Wait for calling CPU */
-	brc	10,1b			/* busy, accepted (status 0), running */
-	tmll	%r0,0x40		/* Test if calling CPU is stopped */
-	jz	1b
-	ltr	%r4,%r4			/* New stack ? */
-	jz	1f
-	lr	%r15,%r4
-1:	lr	%r14,%r2		/* r14: Function to call */
-	lr	%r2,%r3			/* r2 : Parameter for function*/
-	basr	%r14,%r14		/* Call function */
-
-.gprregs_addr:
-	.long	.gprregs
-
-	.section .data,"aw",@progbits
-.gprregs:
-	.rept	16
-	.long	0
-	.endr

+ 0 - 51
arch/s390/kernel/switch_cpu64.S

@@ -1,51 +0,0 @@
-/*
- * 64-bit switch cpu code
- *
- * Copyright IBM Corp. 2009
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-
-# smp_switch_to_cpu switches to destination cpu and executes the passed function
-# Parameter: %r2 - function to call
-#	     %r3 - function parameter
-#	     %r4 - stack poiner
-#	     %r5 - current cpu
-#	     %r6 - destination cpu
-
-	.section .text
-ENTRY(smp_switch_to_cpu)
-	stmg	%r6,%r15,__SF_GPRS(%r15)
-	lgr	%r1,%r15
-	aghi	%r15,-STACK_FRAME_OVERHEAD
-	stg	%r1,__SF_BACKCHAIN(%r15)
-	larl	%r1,.gprregs
-	stmg	%r0,%r15,0(%r1)
-1:	sigp	%r0,%r6,__SIGP_RESTART	/* start destination CPU */
-	brc	2,1b			/* busy, try again */
-2:	sigp	%r0,%r5,__SIGP_STOP	/* stop current CPU */
-	brc	2,2b			/* busy, try again */
-3:	j	3b
-
-ENTRY(smp_restart_cpu)
-	larl	%r1,.gprregs
-	lmg	%r0,%r15,0(%r1)
-1:	sigp	%r0,%r5,__SIGP_SENSE	/* Wait for calling CPU */
-	brc	10,1b			/* busy, accepted (status 0), running */
-	tmll	%r0,0x40		/* Test if calling CPU is stopped */
-	jz	1b
-	ltgr	%r4,%r4			/* New stack ? */
-	jz	1f
-	lgr	%r15,%r4
-1:	lgr	%r14,%r2		/* r14: Function to call */
-	lgr	%r2,%r3			/* r2 : Parameter for function*/
-	basr	%r14,%r14		/* Call function */
-
-	.section .data,"aw",@progbits
-.gprregs:
-	.rept	16
-	.quad	0
-	.endr

+ 11 - 8
arch/s390/kernel/swsusp_asm64.S

@@ -42,7 +42,7 @@ ENTRY(swsusp_arch_suspend)
 	lghi	%r1,0x1000
 
 	/* Save CPU address */
-	stap	__LC_CPU_ADDRESS(%r0)
+	stap	__LC_EXT_CPU_ADDR(%r0)
 
 	/* Store registers */
 	mvc	0x318(4,%r1),__SF_EMPTY(%r15)	/* move prefix to lowcore */
@@ -173,15 +173,15 @@ pgm_check_entry:
 	larl	%r1,.Lresume_cpu		/* Resume CPU address: r2 */
 	stap	0(%r1)
 	llgh	%r2,0(%r1)
-	llgh	%r1,__LC_CPU_ADDRESS(%r0)	/* Suspend CPU address: r1 */
+	llgh	%r1,__LC_EXT_CPU_ADDR(%r0)	/* Suspend CPU address: r1 */
 	cgr	%r1,%r2
 	je	restore_registers		/* r1 = r2 -> nothing to do */
 	larl	%r4,.Lrestart_suspend_psw	/* Set new restart PSW */
 	mvc	__LC_RST_NEW_PSW(16,%r0),0(%r4)
 3:
-	sigp	%r9,%r1,__SIGP_INITIAL_CPU_RESET
-	brc	8,4f	/* accepted */
-	brc	2,3b	/* busy, try again */
+	sigp	%r9,%r1,11			/* sigp initial cpu reset */
+	brc	8,4f				/* accepted */
+	brc	2,3b				/* busy, try again */
 
 	/* Suspend CPU not available -> panic */
 	larl	%r15,init_thread_union
@@ -196,10 +196,10 @@ pgm_check_entry:
 	lpsw	0(%r3)
 4:
 	/* Switch to suspend CPU */
-	sigp	%r9,%r1,__SIGP_RESTART	/* start suspend CPU */
+	sigp	%r9,%r1,6		/* sigp restart to suspend CPU */
 	brc	2,4b			/* busy, try again */
 5:
-	sigp	%r9,%r2,__SIGP_STOP	/* stop resume (current) CPU */
+	sigp	%r9,%r2,5		/* sigp stop to current resume CPU */
 	brc	2,5b			/* busy, try again */
 6:	j	6b
 
@@ -207,7 +207,7 @@ restart_suspend:
 	larl	%r1,.Lresume_cpu
 	llgh	%r2,0(%r1)
 7:
-	sigp	%r9,%r2,__SIGP_SENSE	/* Wait for resume CPU */
+	sigp	%r9,%r2,1		/* sigp sense, wait for resume CPU */
 	brc	8,7b			/* accepted, status 0, still running */
 	brc	2,7b			/* busy, try again */
 	tmll	%r9,0x40		/* Test if resume CPU is stopped */
@@ -257,6 +257,9 @@ restore_registers:
 	lghi	%r2,0
 	brasl	%r14,arch_set_page_states
 
+	/* Log potential guest relocation */
+	brasl	%r14,lgr_info_log
+
 	/* Reinitialize the channel subsystem */
 	brasl	%r14,channel_subsystem_reinit
 

+ 2 - 2
arch/s390/kernel/time.c

@@ -165,7 +165,7 @@ void init_cpu_timer(void)
 	__ctl_set_bit(0, 4);
 }
 
-static void clock_comparator_interrupt(unsigned int ext_int_code,
+static void clock_comparator_interrupt(struct ext_code ext_code,
 				       unsigned int param32,
 				       unsigned long param64)
 {
@@ -177,7 +177,7 @@ static void clock_comparator_interrupt(unsigned int ext_int_code,
 static void etr_timing_alert(struct etr_irq_parm *);
 static void stp_timing_alert(struct stp_irq_parm *);
 
-static void timing_alert_interrupt(unsigned int ext_int_code,
+static void timing_alert_interrupt(struct ext_code ext_code,
 				   unsigned int param32, unsigned long param64)
 {
 	kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;

+ 4 - 4
arch/s390/kernel/topology.c

@@ -79,12 +79,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 	     cpu < TOPOLOGY_CPU_BITS;
 	     cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
 	{
-		unsigned int rcpu, lcpu;
+		unsigned int rcpu;
+		int lcpu;
 
 		rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
-		for_each_present_cpu(lcpu) {
-			if (cpu_logical_map(lcpu) != rcpu)
-				continue;
+		lcpu = smp_find_processor_id(rcpu);
+		if (lcpu >= 0) {
 			cpumask_set_cpu(lcpu, &book->mask);
 			cpu_book_id[lcpu] = book->id;
 			cpumask_set_cpu(lcpu, &core->mask);

+ 4 - 2
arch/s390/kernel/traps.c

@@ -41,6 +41,7 @@
 #include <asm/cpcmd.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/ipl.h>
 #include "entry.h"
 
 void (*pgm_check_table[128])(struct pt_regs *regs);
@@ -144,8 +145,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
 	for (i = 0; i < kstack_depth_to_print; i++) {
 		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
 			break;
-		if (i && ((i * sizeof (long) % 32) == 0))
-			printk("\n       ");
+		if ((i * sizeof(long) % 32) == 0)
+			printk("%s       ", i == 0 ? "" : "\n");
 		printk(LONG, *stack++);
 	}
 	printk("\n");
@@ -239,6 +240,7 @@ void die(struct pt_regs *regs, const char *str)
 	static int die_counter;
 
 	oops_enter();
+	lgr_info_log();
 	debug_stop_all();
 	console_verbose();
 	spin_lock_irq(&die_lock);

+ 7 - 21
arch/s390/kernel/vdso.c

@@ -88,19 +88,12 @@ static void vdso_init_data(struct vdso_data *vd)
 }
 
 #ifdef CONFIG_64BIT
-/*
- * Setup per cpu vdso data page.
- */
-static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
-{
-}
-
 /*
  * Allocate/free per cpu vdso data.
  */
 #define SEGMENT_ORDER	2
 
-int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
+int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 {
 	unsigned long segment_table, page_table, page_frame;
 	u32 *psal, *aste;
@@ -139,7 +132,6 @@ int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
 	aste[4] = (u32)(addr_t) psal;
 	lowcore->vdso_per_cpu_data = page_frame;
 
-	vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
 	return 0;
 
 out:
@@ -149,7 +141,7 @@ out:
 	return -ENOMEM;
 }
 
-void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
+void vdso_free_per_cpu(struct _lowcore *lowcore)
 {
 	unsigned long segment_table, page_table, page_frame;
 	u32 *psal, *aste;
@@ -168,19 +160,15 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
 	free_pages(segment_table, SEGMENT_ORDER);
 }
 
-static void __vdso_init_cr5(void *dummy)
+static void vdso_init_cr5(void)
 {
 	unsigned long cr5;
 
+	if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
+		return;
 	cr5 = offsetof(struct _lowcore, paste);
 	__ctl_load(cr5, 5, 5);
 }
-
-static void vdso_init_cr5(void)
-{
-	if (user_mode != HOME_SPACE_MODE && vdso_enabled)
-		on_each_cpu(__vdso_init_cr5, NULL, 1);
-}
 #endif /* CONFIG_64BIT */
 
 /*
@@ -322,10 +310,8 @@ static int __init vdso_init(void)
 	}
 	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 	vdso64_pagelist[vdso64_pages] = NULL;
-#ifndef CONFIG_SMP
-	if (vdso_alloc_per_cpu(0, &S390_lowcore))
+	if (vdso_alloc_per_cpu(&S390_lowcore))
 		BUG();
-#endif
 	vdso_init_cr5();
 #endif /* CONFIG_64BIT */
 
@@ -335,7 +321,7 @@ static int __init vdso_init(void)
 
 	return 0;
 }
-arch_initcall(vdso_init);
+early_initcall(vdso_init);
 
 int in_gate_area_no_mm(unsigned long addr)
 {

+ 35 - 133
arch/s390/kernel/vtime.c

@@ -26,6 +26,7 @@
 #include <asm/irq_regs.h>
 #include <asm/cputime.h>
 #include <asm/irq.h>
+#include "entry.h"
 
 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 
@@ -123,153 +124,53 @@ void account_system_vtime(struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(account_system_vtime);
 
-void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
+void __kprobes vtime_stop_cpu(void)
 {
 	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 	struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
-	__u64 idle_time, expires;
+	unsigned long long idle_time;
+	unsigned long psw_mask;
 
-	if (idle->idle_enter == 0ULL)
-		return;
+	trace_hardirqs_on();
+	/* Don't trace preempt off for idle. */
+	stop_critical_timings();
 
-	/* Account time spent with enabled wait psw loaded as idle time. */
-	idle_time = int_clock - idle->idle_enter;
-	account_idle_time(idle_time);
-	S390_lowcore.steal_timer +=
-		idle->idle_enter - S390_lowcore.last_update_clock;
-	S390_lowcore.last_update_clock = int_clock;
-
-	/* Account system time spent going idle. */
-	S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
-	S390_lowcore.last_update_timer = enter_timer;
-
-	/* Restart vtime CPU timer */
-	if (vq->do_spt) {
-		/* Program old expire value but first save progress. */
-		expires = vq->idle - enter_timer;
-		expires += get_vtimer();
-		set_vtimer(expires);
-	} else {
-		/* Don't account the CPU timer delta while the cpu was idle. */
-		vq->elapsed -= vq->idle - enter_timer;
-	}
+	/* Wait for external, I/O or machine check interrupt. */
+	psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
+		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+	idle->nohz_delay = 0;
 
+	/* Call the assembler magic in entry.S */
+	psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
+
+	/* Reenable preemption tracer. */
+	start_critical_timings();
+
+	/* Account time spent with enabled wait psw loaded as idle time. */
 	idle->sequence++;
 	smp_wmb();
+	idle_time = idle->idle_exit - idle->idle_enter;
 	idle->idle_time += idle_time;
-	idle->idle_enter = 0ULL;
+	idle->idle_enter = idle->idle_exit = 0ULL;
 	idle->idle_count++;
+	account_idle_time(idle_time);
 	smp_wmb();
 	idle->sequence++;
 }
 
-void __kprobes vtime_stop_cpu(void)
-{
-	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
-	struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
-	psw_t psw;
-
-	/* Wait for external, I/O or machine check interrupt. */
-	psw.mask = psw_kernel_bits | PSW_MASK_WAIT |
-		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-
-	idle->nohz_delay = 0;
-
-	/* Check if the CPU timer needs to be reprogrammed. */
-	if (vq->do_spt) {
-		__u64 vmax = VTIMER_MAX_SLICE;
-		/*
-		 * The inline assembly is equivalent to
-		 *	vq->idle = get_cpu_timer();
-		 *	set_cpu_timer(VTIMER_MAX_SLICE);
-		 *	idle->idle_enter = get_clock();
-		 *	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
-		 *			   PSW_MASK_DAT | PSW_MASK_IO |
-		 *			   PSW_MASK_EXT | PSW_MASK_MCHECK);
-		 * The difference is that the inline assembly makes sure that
-		 * the last three instruction are stpt, stck and lpsw in that
-		 * order. This is done to increase the precision.
-		 */
-		asm volatile(
-#ifndef CONFIG_64BIT
-			"	basr	1,0\n"
-			"0:	ahi	1,1f-0b\n"
-			"	st	1,4(%2)\n"
-#else /* CONFIG_64BIT */
-			"	larl	1,1f\n"
-			"	stg	1,8(%2)\n"
-#endif /* CONFIG_64BIT */
-			"	stpt	0(%4)\n"
-			"	spt	0(%5)\n"
-			"	stck	0(%3)\n"
-#ifndef CONFIG_64BIT
-			"	lpsw	0(%2)\n"
-#else /* CONFIG_64BIT */
-			"	lpswe	0(%2)\n"
-#endif /* CONFIG_64BIT */
-			"1:"
-			: "=m" (idle->idle_enter), "=m" (vq->idle)
-			: "a" (&psw), "a" (&idle->idle_enter),
-			  "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
-			: "memory", "cc", "1");
-	} else {
-		/*
-		 * The inline assembly is equivalent to
-		 *	vq->idle = get_cpu_timer();
-		 *	idle->idle_enter = get_clock();
-		 *	__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
-		 *			   PSW_MASK_DAT | PSW_MASK_IO |
-		 *			   PSW_MASK_EXT | PSW_MASK_MCHECK);
-		 * The difference is that the inline assembly makes sure that
-		 * the last three instruction are stpt, stck and lpsw in that
-		 * order. This is done to increase the precision.
-		 */
-		asm volatile(
-#ifndef CONFIG_64BIT
-			"	basr	1,0\n"
-			"0:	ahi	1,1f-0b\n"
-			"	st	1,4(%2)\n"
-#else /* CONFIG_64BIT */
-			"	larl	1,1f\n"
-			"	stg	1,8(%2)\n"
-#endif /* CONFIG_64BIT */
-			"	stpt	0(%4)\n"
-			"	stck	0(%3)\n"
-#ifndef CONFIG_64BIT
-			"	lpsw	0(%2)\n"
-#else /* CONFIG_64BIT */
-			"	lpswe	0(%2)\n"
-#endif /* CONFIG_64BIT */
-			"1:"
-			: "=m" (idle->idle_enter), "=m" (vq->idle)
-			: "a" (&psw), "a" (&idle->idle_enter),
-			  "a" (&vq->idle), "m" (psw)
-			: "memory", "cc", "1");
-	}
-}
-
 cputime64_t s390_get_idle_time(int cpu)
 {
-	struct s390_idle_data *idle;
-	unsigned long long now, idle_time, idle_enter;
+	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
+	unsigned long long now, idle_enter, idle_exit;
 	unsigned int sequence;
 
-	idle = &per_cpu(s390_idle, cpu);
-
-	now = get_clock();
-repeat:
-	sequence = idle->sequence;
-	smp_rmb();
-	if (sequence & 1)
-		goto repeat;
-	idle_time = 0;
-	idle_enter = idle->idle_enter;
-	if (idle_enter != 0ULL && idle_enter < now)
-		idle_time = now - idle_enter;
-	smp_rmb();
-	if (idle->sequence != sequence)
-		goto repeat;
-	return idle_time;
+	do {
+		now = get_clock();
+		sequence = ACCESS_ONCE(idle->sequence);
+		idle_enter = ACCESS_ONCE(idle->idle_enter);
+		idle_exit = ACCESS_ONCE(idle->idle_exit);
+	} while ((sequence & 1) || (idle->sequence != sequence));
+	return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
 }
 
 /*
@@ -319,7 +220,7 @@ static void do_callbacks(struct list_head *cb_list)
 /*
  * Handler for the virtual CPU timer.
  */
-static void do_cpu_timer_interrupt(unsigned int ext_int_code,
+static void do_cpu_timer_interrupt(struct ext_code ext_code,
 				   unsigned int param32, unsigned long param64)
 {
 	struct vtimer_queue *vq;
@@ -346,7 +247,6 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
 	}
 	spin_unlock(&vq->lock);
 
-	vq->do_spt = list_empty(&cb_list);
 	do_callbacks(&cb_list);
 
 	/* next event is first in list */
@@ -355,8 +255,7 @@ static void do_cpu_timer_interrupt(unsigned int ext_int_code,
 	if (!list_empty(&vq->list)) {
 		event = list_first_entry(&vq->list, struct vtimer_list, entry);
 		next = event->expires;
-	} else
-		vq->do_spt = 0;
+	}
 	spin_unlock(&vq->lock);
 	/*
 	 * To improve precision add the time spent by the
@@ -570,6 +469,9 @@ void init_cpu_vtimer(void)
 
 	/* enable cpu timer interrupts */
 	__ctl_set_bit(0,10);
+
+	/* set initial cpu timer */
+	set_vtimer(0x7fffffffffffffffULL);
 }
 
 static int __cpuinit s390_nohz_notify(struct notifier_block *self,

+ 3 - 3
arch/s390/kvm/interrupt.c

@@ -134,7 +134,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
 		if (rc == -EFAULT)
 			exception = 1;
 
-		rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
+		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
 		if (rc == -EFAULT)
 			exception = 1;
 
@@ -156,7 +156,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
 		if (rc == -EFAULT)
 			exception = 1;
 
-		rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->extcall.code);
+		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
 		if (rc == -EFAULT)
 			exception = 1;
 
@@ -202,7 +202,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
 		if (rc == -EFAULT)
 			exception = 1;
 
-		rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
+		rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
 		if (rc == -EFAULT)
 			exception = 1;
 

+ 14 - 17
arch/s390/lib/delay.c

@@ -13,6 +13,7 @@
 #include <linux/irqflags.h>
 #include <linux/interrupt.h>
 #include <asm/div64.h>
+#include <asm/timer.h>
 
 void __delay(unsigned long loops)
 {
@@ -28,36 +29,33 @@ void __delay(unsigned long loops)
 
 static void __udelay_disabled(unsigned long long usecs)
 {
-	unsigned long mask, cr0, cr0_saved;
-	u64 clock_saved;
-	u64 end;
+	unsigned long cr0, cr6, new;
+	u64 clock_saved, end;
 
-	mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT |
-		PSW_MASK_EXT | PSW_MASK_MCHECK;
 	end = get_clock() + (usecs << 12);
 	clock_saved = local_tick_disable();
-	__ctl_store(cr0_saved, 0, 0);
-	cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
-	__ctl_load(cr0 , 0, 0);
+	__ctl_store(cr0, 0, 0);
+	__ctl_store(cr6, 6, 6);
+	new = (cr0 &  0xffff00e0) | 0x00000800;
+	__ctl_load(new , 0, 0);
+	new = 0;
+	__ctl_load(new, 6, 6);
 	lockdep_off();
 	do {
 		set_clock_comparator(end);
-		trace_hardirqs_on();
-		__load_psw_mask(mask);
+		vtime_stop_cpu();
 		local_irq_disable();
 	} while (get_clock() < end);
 	lockdep_on();
-	__ctl_load(cr0_saved, 0, 0);
+	__ctl_load(cr0, 0, 0);
+	__ctl_load(cr6, 6, 6);
 	local_tick_enable(clock_saved);
 }
 
 static void __udelay_enabled(unsigned long long usecs)
 {
-	unsigned long mask;
-	u64 clock_saved;
-	u64 end;
+	u64 clock_saved, end;
 
-	mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
 	end = get_clock() + (usecs << 12);
 	do {
 		clock_saved = 0;
@@ -65,8 +63,7 @@ static void __udelay_enabled(unsigned long long usecs)
 			clock_saved = local_tick_disable();
 			set_clock_comparator(end);
 		}
-		trace_hardirqs_on();
-		__load_psw_mask(mask);
+		vtime_stop_cpu();
 		local_irq_disable();
 		if (clock_saved)
 			local_tick_enable(clock_saved);

+ 8 - 22
arch/s390/lib/spinlock.c

@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/smp.h>
 #include <asm/io.h>
 
 int spin_retry = 1000;
@@ -24,21 +25,6 @@ static int __init spin_retry_setup(char *str)
 }
 __setup("spin_retry=", spin_retry_setup);
 
-static inline void _raw_yield(void)
-{
-	if (MACHINE_HAS_DIAG44)
-		asm volatile("diag 0,0,0x44");
-}
-
-static inline void _raw_yield_cpu(int cpu)
-{
-	if (MACHINE_HAS_DIAG9C)
-		asm volatile("diag %0,0,0x9c"
-			     : : "d" (cpu_logical_map(cpu)));
-	else
-		_raw_yield();
-}
-
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
 	int count = spin_retry;
@@ -60,7 +46,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 		}
 		owner = lp->owner_cpu;
 		if (owner)
-			_raw_yield_cpu(~owner);
+			smp_yield_cpu(~owner);
 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
 			return;
 	}
@@ -91,7 +77,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 		}
 		owner = lp->owner_cpu;
 		if (owner)
-			_raw_yield_cpu(~owner);
+			smp_yield_cpu(~owner);
 		local_irq_disable();
 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
 			return;
@@ -121,7 +107,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
 	if (cpu != 0) {
 		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
 		    !smp_vcpu_scheduled(~cpu))
-			_raw_yield_cpu(~cpu);
+			smp_yield_cpu(~cpu);
 	}
 }
 EXPORT_SYMBOL(arch_spin_relax);
@@ -133,7 +119,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
 
 	while (1) {
 		if (count-- <= 0) {
-			_raw_yield();
+			smp_yield();
 			count = spin_retry;
 		}
 		if (!arch_read_can_lock(rw))
@@ -153,7 +139,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 	local_irq_restore(flags);
 	while (1) {
 		if (count-- <= 0) {
-			_raw_yield();
+			smp_yield();
 			count = spin_retry;
 		}
 		if (!arch_read_can_lock(rw))
@@ -188,7 +174,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
 
 	while (1) {
 		if (count-- <= 0) {
-			_raw_yield();
+			smp_yield();
 			count = spin_retry;
 		}
 		if (!arch_write_can_lock(rw))
@@ -206,7 +192,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
 	local_irq_restore(flags);
 	while (1) {
 		if (count-- <= 0) {
-			_raw_yield();
+			smp_yield();
 			count = spin_retry;
 		}
 		if (!arch_write_can_lock(rw))

+ 2 - 2
arch/s390/mm/fault.c

@@ -532,7 +532,7 @@ void pfault_fini(void)
 static DEFINE_SPINLOCK(pfault_lock);
 static LIST_HEAD(pfault_list);
 
-static void pfault_interrupt(unsigned int ext_int_code,
+static void pfault_interrupt(struct ext_code ext_code,
 			     unsigned int param32, unsigned long param64)
 {
 	struct task_struct *tsk;
@@ -545,7 +545,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
 	 * in the 'cpu address' field associated with the
          * external interrupt. 
 	 */
-	subcode = ext_int_code >> 16;
+	subcode = ext_code.subcode;
 	if ((subcode & 0xff00) != __SUBCODE_MASK)
 		return;
 	kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;

+ 3 - 3
arch/s390/oprofile/hwsampler.c

@@ -233,8 +233,8 @@ static inline unsigned long *trailer_entry_ptr(unsigned long v)
 }
 
 /* prototypes for external interrupt handler and worker */
-static void hws_ext_handler(unsigned int ext_int_code,
-				unsigned int param32, unsigned long param64);
+static void hws_ext_handler(struct ext_code ext_code,
+			    unsigned int param32, unsigned long param64);
 
 static void worker(struct work_struct *work);
 
@@ -673,7 +673,7 @@ int hwsampler_activate(unsigned int cpu)
 	return rc;
 }
 
-static void hws_ext_handler(unsigned int ext_int_code,
+static void hws_ext_handler(struct ext_code ext_code,
 			    unsigned int param32, unsigned long param64)
 {
 	struct hws_cpu_buffer *cb;

+ 0 - 9
drivers/crypto/Kconfig

@@ -64,7 +64,6 @@ config CRYPTO_DEV_GEODE
 config ZCRYPT
 	tristate "Support for PCI-attached cryptographic adapters"
 	depends on S390
-	select ZCRYPT_MONOLITHIC if ZCRYPT="y"
 	select HW_RANDOM
 	help
 	  Select this option if you want to use a PCI-attached cryptographic
@@ -77,14 +76,6 @@ config ZCRYPT
 	  + Crypto Express3 Coprocessor (CEX3C)
 	  + Crypto Express3 Accelerator (CEX3A)
 
-config ZCRYPT_MONOLITHIC
-	bool "Monolithic zcrypt module"
-	depends on ZCRYPT
-	help
-	  Select this option if you want to have a single module z90crypt,
-	  that contains all parts of the crypto device driver (ap bus,
-	  request router and all the card drivers).
-
 config CRYPTO_SHA1_S390
 	tristate "SHA1 digest algorithm"
 	depends on S390

+ 4 - 0
drivers/s390/block/dasd.c

@@ -640,6 +640,10 @@ void dasd_enable_device(struct dasd_device *device)
 		dasd_set_target_state(device, DASD_STATE_NEW);
 	/* Now wait for the devices to come up. */
 	wait_event(dasd_init_waitq, _wait_for_device(device));
+
+	dasd_reload_device(device);
+	if (device->discipline->kick_validate)
+		device->discipline->kick_validate(device);
 }
 
 /*

+ 4 - 4
drivers/s390/block/dasd_diag.c

@@ -229,7 +229,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
 }
 
 /* Handle external interruption. */
-static void dasd_ext_handler(unsigned int ext_int_code,
+static void dasd_ext_handler(struct ext_code ext_code,
 			     unsigned int param32, unsigned long param64)
 {
 	struct dasd_ccw_req *cqr, *next;
@@ -239,7 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
 	addr_t ip;
 	int rc;
 
-	switch (ext_int_code >> 24) {
+	switch (ext_code.subcode >> 8) {
 	case DASD_DIAG_CODE_31BIT:
 		ip = (addr_t) param32;
 		break;
@@ -280,7 +280,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
 	cqr->stopclk = get_clock();
 
 	expires = 0;
-	if ((ext_int_code & 0xff0000) == 0) {
+	if ((ext_code.subcode & 0xff) == 0) {
 		cqr->status = DASD_CQR_SUCCESS;
 		/* Start first request on queue if possible -> fast_io. */
 		if (!list_empty(&device->ccw_queue)) {
@@ -296,7 +296,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
 		cqr->status = DASD_CQR_QUEUED;
 		DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
 			      "request %p was %d (%d retries left)", cqr,
-			      (ext_int_code >> 16) & 0xff, cqr->retries);
+			      ext_code.subcode & 0xff, cqr->retries);
 		dasd_diag_erp(device);
 	}
 

+ 8 - 0
drivers/s390/block/dasd_eckd.c

@@ -1564,6 +1564,12 @@ static void dasd_eckd_do_validate_server(struct work_struct *work)
 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
 {
 	dasd_get_device(device);
+	/* exit if device not online or in offline processing */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+	   device->state < DASD_STATE_ONLINE) {
+		dasd_put_device(device);
+		return;
+	}
 	/* queue call to do_validate_server to the kernel event daemon. */
 	schedule_work(&device->kick_validate);
 }
@@ -1993,6 +1999,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
 static int dasd_eckd_online_to_ready(struct dasd_device *device)
 {
 	cancel_work_sync(&device->reload_device);
+	cancel_work_sync(&device->kick_validate);
 	return dasd_alias_remove_device(device);
 };
 
@@ -2263,6 +2270,7 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
 		 * and only if not suspended
 		 */
 		if (!device->block && private->lcu &&
+		    device->state == DASD_STATE_ONLINE &&
 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
 		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
 			/*

+ 2 - 2
drivers/s390/char/sclp.c

@@ -393,7 +393,7 @@ __sclp_find_req(u32 sccb)
 /* Handler for external interruption. Perform request post-processing.
  * Prepare read event data request if necessary. Start processing of next
  * request on queue. */
-static void sclp_interrupt_handler(unsigned int ext_int_code,
+static void sclp_interrupt_handler(struct ext_code ext_code,
 				   unsigned int param32, unsigned long param64)
 {
 	struct sclp_req *req;
@@ -818,7 +818,7 @@ EXPORT_SYMBOL(sclp_reactivate);
 
 /* Handler for external interruption used during initialization. Modify
  * request state to done. */
-static void sclp_check_handler(unsigned int ext_int_code,
+static void sclp_check_handler(struct ext_code ext_code,
 			       unsigned int param32, unsigned long param64)
 {
 	u32 finished_sccb;

+ 0 - 1
drivers/s390/char/sclp_quiesce.c

@@ -15,7 +15,6 @@
 #include <linux/reboot.h>
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
-#include <asm/sigp.h>
 #include <asm/smp.h>
 
 #include "sclp.h"

+ 80 - 21
drivers/s390/char/sclp_sdias.c

@@ -8,6 +8,7 @@
 #define KMSG_COMPONENT "sclp_sdias"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/completion.h>
 #include <linux/sched.h>
 #include <asm/sclp.h>
 #include <asm/debug.h>
@@ -62,15 +63,29 @@ struct sdias_sccb {
 } __attribute__((packed));
 
 static struct sdias_sccb sccb __attribute__((aligned(4096)));
+static struct sdias_evbuf sdias_evbuf;
 
-static int sclp_req_done;
-static wait_queue_head_t sdias_wq;
+static DECLARE_COMPLETION(evbuf_accepted);
+static DECLARE_COMPLETION(evbuf_done);
 static DEFINE_MUTEX(sdias_mutex);
 
+/*
+ * Called by SCLP base when read event data has been completed (async mode only)
+ */
+static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
+{
+	memcpy(&sdias_evbuf, evbuf,
+	       min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
+	complete(&evbuf_done);
+	TRACE("sclp_sdias_receiver_fn done\n");
+}
+
+/*
+ * Called by SCLP base when sdias event has been accepted
+ */
 static void sdias_callback(struct sclp_req *request, void *data)
 {
-	sclp_req_done = 1;
-	wake_up(&sdias_wq); /* Inform caller, that request is complete */
+	complete(&evbuf_accepted);
 	TRACE("callback done\n");
 }
 
@@ -80,7 +95,6 @@ static int sdias_sclp_send(struct sclp_req *req)
 	int rc;
 
 	for (retries = SDIAS_RETRIES; retries; retries--) {
-		sclp_req_done = 0;
 		TRACE("add request\n");
 		rc = sclp_add_request(req);
 		if (rc) {
@@ -91,16 +105,31 @@ static int sdias_sclp_send(struct sclp_req *req)
 			continue;
 		}
 		/* initiated, wait for completion of service call */
-		wait_event(sdias_wq, (sclp_req_done == 1));
+		wait_for_completion(&evbuf_accepted);
 		if (req->status == SCLP_REQ_FAILED) {
 			TRACE("sclp request failed\n");
-			rc = -EIO;
 			continue;
 		}
+		/* if not accepted, retry */
+		if (!(sccb.evbuf.hdr.flags & 0x80)) {
+			TRACE("sclp request failed: flags=%x\n",
+			      sccb.evbuf.hdr.flags);
+			continue;
+		}
+		/*
+		 * for the sync interface the response is in the initial sccb
+		 */
+		if (!sclp_sdias_register.receiver_fn) {
+			memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
+			TRACE("sync request done\n");
+			return 0;
+		}
+		/* otherwise we wait for completion */
+		wait_for_completion(&evbuf_done);
 		TRACE("request done\n");
-		break;
+		return 0;
 	}
-	return rc;
+	return -EIO;
 }
 
 /*
@@ -140,13 +169,12 @@ int sclp_sdias_blk_count(void)
 		goto out;
 	}
 
-	switch (sccb.evbuf.event_status) {
+	switch (sdias_evbuf.event_status) {
 		case 0:
-			rc = sccb.evbuf.blk_cnt;
+			rc = sdias_evbuf.blk_cnt;
 			break;
 		default:
-			pr_err("SCLP error: %x\n",
-			       sccb.evbuf.event_status);
+			pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
 			rc = -EIO;
 			goto out;
 	}
@@ -211,18 +239,18 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
 		goto out;
 	}
 
-	switch (sccb.evbuf.event_status) {
+	switch (sdias_evbuf.event_status) {
 		case EVSTATE_ALL_STORED:
 			TRACE("all stored\n");
 		case EVSTATE_PART_STORED:
-			TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
+			TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
 			break;
 		case EVSTATE_NO_DATA:
 			TRACE("no data\n");
 		default:
 			pr_err("Error from SCLP while copying hsa. "
 			       "Event status = %x\n",
-			       sccb.evbuf.event_status);
+			       sdias_evbuf.event_status);
 			rc = -EIO;
 	}
 out:
@@ -230,19 +258,50 @@ out:
 	return rc;
 }
 
-int __init sclp_sdias_init(void)
+static int __init sclp_sdias_register_check(void)
 {
 	int rc;
 
+	rc = sclp_register(&sclp_sdias_register);
+	if (rc)
+		return rc;
+	if (sclp_sdias_blk_count() == 0) {
+		sclp_unregister(&sclp_sdias_register);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int __init sclp_sdias_init_sync(void)
+{
+	TRACE("Try synchronous mode\n");
+	sclp_sdias_register.receive_mask = 0;
+	sclp_sdias_register.receiver_fn = NULL;
+	return sclp_sdias_register_check();
+}
+
+static int __init sclp_sdias_init_async(void)
+{
+	TRACE("Try asynchronous mode\n");
+	sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
+	sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
+	return sclp_sdias_register_check();
+}
+
+int __init sclp_sdias_init(void)
+{
 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 		return 0;
 	sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
 	debug_register_view(sdias_dbf, &debug_sprintf_view);
 	debug_set_level(sdias_dbf, 6);
-	rc = sclp_register(&sclp_sdias_register);
-	if (rc)
-		return rc;
-	init_waitqueue_head(&sdias_wq);
+	if (sclp_sdias_init_sync() == 0)
+		goto out;
+	if (sclp_sdias_init_async() == 0)
+		goto out;
+	TRACE("init failed\n");
+	return -ENODEV;
+out:
 	TRACE("init done\n");
 	return 0;
 }

+ 0 - 1
drivers/s390/char/zcore.c

@@ -21,7 +21,6 @@
 #include <asm/ipl.h>
 #include <asm/sclp.h>
 #include <asm/setup.h>
-#include <asm/sigp.h>
 #include <asm/uaccess.h>
 #include <asm/debug.h>
 #include <asm/processor.h>

+ 0 - 2
drivers/s390/cio/cio.c

@@ -601,8 +601,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
 	struct pt_regs *old_regs;
 
 	old_regs = set_irq_regs(regs);
-	s390_idle_check(regs, S390_lowcore.int_clock,
-			S390_lowcore.async_enter_timer);
 	irq_enter();
 	__this_cpu_write(s390_idle.nohz_delay, 1);
 	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)

+ 6 - 0
drivers/s390/cio/qdio_main.c

@@ -18,6 +18,7 @@
 #include <linux/atomic.h>
 #include <asm/debug.h>
 #include <asm/qdio.h>
+#include <asm/ipl.h>
 
 #include "cio.h"
 #include "css.h"
@@ -1093,6 +1094,11 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
 		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
 no_handler:
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+	/*
+	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+	 * Therefore we call the LGR detection function here.
+	 */
+	lgr_info_log();
 }
 
 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,

+ 0 - 10
drivers/s390/crypto/Makefile

@@ -2,16 +2,6 @@
 # S/390 crypto devices
 #
 
-ifdef CONFIG_ZCRYPT_MONOLITHIC
-
-z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
-		zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
-obj-$(CONFIG_ZCRYPT) += z90crypt.o
-
-else
-
 ap-objs := ap_bus.o
 obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
-
-endif

+ 0 - 2
drivers/s390/crypto/ap_bus.c

@@ -1862,7 +1862,5 @@ void ap_module_exit(void)
 	}
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(ap_module_init);
 module_exit(ap_module_exit);
-#endif

+ 0 - 2
drivers/s390/crypto/zcrypt_api.c

@@ -1220,7 +1220,5 @@ void zcrypt_api_exit(void)
 	misc_deregister(&zcrypt_misc_device);
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(zcrypt_api_init);
 module_exit(zcrypt_api_exit);
-#endif

+ 0 - 4
drivers/s390/crypto/zcrypt_cex2a.c

@@ -63,13 +63,11 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
 	{ /* end of list */ },
 };
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
 		   "Copyright 2001, 2006 IBM Corporation");
 MODULE_LICENSE("GPL");
-#endif
 
 static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
 static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
@@ -496,7 +494,5 @@ void __exit zcrypt_cex2a_exit(void)
 	ap_driver_unregister(&zcrypt_cex2a_driver);
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(zcrypt_cex2a_init);
 module_exit(zcrypt_cex2a_exit);
-#endif

+ 0 - 100
drivers/s390/crypto/zcrypt_mono.c

@@ -1,100 +0,0 @@
-/*
- *  linux/drivers/s390/crypto/zcrypt_mono.c
- *
- *  zcrypt 2.1.0
- *
- *  Copyright (C)  2001, 2006 IBM Corporation
- *  Author(s): Robert Burroughs
- *	       Eric Rossman (edrossma@us.ibm.com)
- *
- *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/compat.h>
-#include <linux/atomic.h>
-#include <asm/uaccess.h>
-
-#include "ap_bus.h"
-#include "zcrypt_api.h"
-#include "zcrypt_pcica.h"
-#include "zcrypt_pcicc.h"
-#include "zcrypt_pcixcc.h"
-#include "zcrypt_cex2a.h"
-
-/**
- * The module initialization code.
- */
-static int __init zcrypt_init(void)
-{
-	int rc;
-
-	rc = ap_module_init();
-	if (rc)
-		goto out;
-	rc = zcrypt_api_init();
-	if (rc)
-		goto out_ap;
-	rc = zcrypt_pcica_init();
-	if (rc)
-		goto out_api;
-	rc = zcrypt_pcicc_init();
-	if (rc)
-		goto out_pcica;
-	rc = zcrypt_pcixcc_init();
-	if (rc)
-		goto out_pcicc;
-	rc = zcrypt_cex2a_init();
-	if (rc)
-		goto out_pcixcc;
-	return 0;
-
-out_pcixcc:
-	zcrypt_pcixcc_exit();
-out_pcicc:
-	zcrypt_pcicc_exit();
-out_pcica:
-	zcrypt_pcica_exit();
-out_api:
-	zcrypt_api_exit();
-out_ap:
-	ap_module_exit();
-out:
-	return rc;
-}
-
-/**
- * The module termination code.
- */
-static void __exit zcrypt_exit(void)
-{
-	zcrypt_cex2a_exit();
-	zcrypt_pcixcc_exit();
-	zcrypt_pcicc_exit();
-	zcrypt_pcica_exit();
-	zcrypt_api_exit();
-	ap_module_exit();
-}
-
-module_init(zcrypt_init);
-module_exit(zcrypt_exit);

+ 0 - 4
drivers/s390/crypto/zcrypt_pcica.c

@@ -53,13 +53,11 @@ static struct ap_device_id zcrypt_pcica_ids[] = {
 	{ /* end of list */ },
 };
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
 		   "Copyright 2001, 2006 IBM Corporation");
 MODULE_LICENSE("GPL");
-#endif
 
 static int zcrypt_pcica_probe(struct ap_device *ap_dev);
 static void zcrypt_pcica_remove(struct ap_device *ap_dev);
@@ -408,7 +406,5 @@ void zcrypt_pcica_exit(void)
 	ap_driver_unregister(&zcrypt_pcica_driver);
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(zcrypt_pcica_init);
 module_exit(zcrypt_pcica_exit);
-#endif

+ 0 - 4
drivers/s390/crypto/zcrypt_pcicc.c

@@ -65,13 +65,11 @@ static struct ap_device_id zcrypt_pcicc_ids[] = {
 	{ /* end of list */ },
 };
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
 		   "Copyright 2001, 2006 IBM Corporation");
 MODULE_LICENSE("GPL");
-#endif
 
 static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
 static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
@@ -614,7 +612,5 @@ void zcrypt_pcicc_exit(void)
 	ap_driver_unregister(&zcrypt_pcicc_driver);
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(zcrypt_pcicc_init);
 module_exit(zcrypt_pcicc_exit);
-#endif

+ 0 - 4
drivers/s390/crypto/zcrypt_pcixcc.c

@@ -75,13 +75,11 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
 	{ /* end of list */ },
 };
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
 		   "Copyright 2001, 2006 IBM Corporation");
 MODULE_LICENSE("GPL");
-#endif
 
 static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
 static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
@@ -1121,7 +1119,5 @@ void zcrypt_pcixcc_exit(void)
 	ap_driver_unregister(&zcrypt_pcixcc_driver);
 }
 
-#ifndef CONFIG_ZCRYPT_MONOLITHIC
 module_init(zcrypt_pcixcc_init);
 module_exit(zcrypt_pcixcc_exit);
-#endif

+ 2 - 4
drivers/s390/kvm/kvm_virtio.c

@@ -380,15 +380,13 @@ static void hotplug_devices(struct work_struct *dummy)
 /*
  * we emulate the request_irq behaviour on top of s390 extints
  */
-static void kvm_extint_handler(unsigned int ext_int_code,
+static void kvm_extint_handler(struct ext_code ext_code,
 			       unsigned int param32, unsigned long param64)
 {
 	struct virtqueue *vq;
-	u16 subcode;
 	u32 param;
 
-	subcode = ext_int_code >> 16;
-	if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
+	if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
 		return;
 	kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
 

+ 1 - 1
net/iucv/iucv.c

@@ -1800,7 +1800,7 @@ static void iucv_work_fn(struct work_struct *work)
  * Handles external interrupts coming in from CP.
  * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
  */
-static void iucv_external_interrupt(unsigned int ext_int_code,
+static void iucv_external_interrupt(struct ext_code ext_code,
 				    unsigned int param32, unsigned long param64)
 {
 	struct iucv_irq_data *p;

部分文件因为文件数量过多而无法显示