Browse Source

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
 "This is the bulk of the s390 patches for the 3.11 merge window.

  Notable enhancements are: the block timeout patches for dasd from
  Hannes, and more work on the PCI support front.  In addition some
  cleanup and the usual bug fixing."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits)
  s390/dasd: Fail all requests when DASD_FLAG_ABORTIO is set
  s390/dasd: Add 'timeout' attribute
  block: check for timeout function in blk_rq_timed_out()
  block/dasd: detailed I/O errors
  s390/dasd: Reduce amount of messages for specific errors
  s390/dasd: Implement block timeout handling
  s390/dasd: process all requests in the device tasklet
  s390/dasd: make number of retries configurable
  s390/dasd: Clarify comment
  s390/hwsampler: Updated misleading member names in hws_data_entry
  s390/appldata_net_sum: do not use static data
  s390/appldata_mem: do not use static data
  s390/vmwatchdog: do not use static data
  s390/airq: simplify adapter interrupt code
  s390/pci: remove per device debug attribute
  s390/dma: remove gratuitous brackets
  s390/facility: decompose test_facility()
  s390/sclp: remove duplicated include from sclp_ctl.c
  s390/irq: store interrupt information in pt_regs
  s390/drivers: Cocci spatch "ptr_ret.spatch"
  ...
Linus Torvalds 12 years ago
parent
commit
c1101cbc7d
65 changed files with 1325 additions and 569 deletions
  1. 1 0
      Documentation/ioctl/ioctl-number.txt
  2. 14 4
      arch/s390/appldata/appldata_mem.c
  3. 14 4
      arch/s390/appldata/appldata_net_sum.c
  4. 2 6
      arch/s390/hypfs/hypfs_diag.c
  5. 12 3
      arch/s390/include/asm/airq.h
  6. 1 1
      arch/s390/include/asm/dma-mapping.h
  7. 11 6
      arch/s390/include/asm/facility.h
  8. 0 22
      arch/s390/include/asm/io.h
  9. 0 2
      arch/s390/include/asm/pci.h
  10. 3 0
      arch/s390/include/asm/pgalloc.h
  11. 1 0
      arch/s390/include/asm/ptrace.h
  12. 1 0
      arch/s390/include/uapi/asm/Kbuild
  13. 13 0
      arch/s390/include/uapi/asm/chsc.h
  14. 4 0
      arch/s390/include/uapi/asm/dasd.h
  15. 24 0
      arch/s390/include/uapi/asm/sclp_ctl.h
  16. 1 0
      arch/s390/kernel/asm-offsets.c
  17. 10 2
      arch/s390/kernel/entry.S
  18. 1 1
      arch/s390/kernel/entry.h
  19. 12 4
      arch/s390/kernel/entry64.S
  20. 5 3
      arch/s390/kernel/irq.c
  21. 1 4
      arch/s390/kernel/smp.c
  22. 48 0
      arch/s390/mm/pgtable.c
  23. 2 2
      arch/s390/oprofile/hwsampler.h
  24. 37 46
      arch/s390/pci/pci.c
  25. 0 1
      arch/s390/pci/pci_clp.c
  26. 0 29
      arch/s390/pci/pci_debug.c
  27. 3 3
      arch/s390/pci/pci_dma.c
  28. 8 12
      arch/s390/pci/pci_sysfs.c
  29. 3 0
      block/blk-core.c
  30. 3 2
      block/blk-timeout.c
  31. 44 16
      drivers/pci/hotplug/s390_pci_hpc.c
  32. 10 0
      drivers/pci/pci.c
  33. 1 0
      drivers/pci/probe.c
  34. 104 11
      drivers/s390/block/dasd.c
  35. 97 0
      drivers/s390/block/dasd_devmap.c
  36. 6 2
      drivers/s390/block/dasd_diag.c
  37. 12 5
      drivers/s390/block/dasd_eckd.c
  38. 8 0
      drivers/s390/block/dasd_erp.c
  39. 8 2
      drivers/s390/block/dasd_fba.c
  40. 10 0
      drivers/s390/block/dasd_int.h
  41. 59 0
      drivers/s390/block/dasd_ioctl.c
  42. 1 1
      drivers/s390/char/Makefile
  43. 80 6
      drivers/s390/char/sclp.c
  44. 6 1
      drivers/s390/char/sclp.h
  45. 10 10
      drivers/s390/char/sclp_cmd.c
  46. 30 1
      drivers/s390/char/sclp_con.c
  47. 144 0
      drivers/s390/char/sclp_ctl.c
  48. 34 5
      drivers/s390/char/sclp_vt220.c
  49. 1 1
      drivers/s390/char/tape_class.c
  50. 3 2
      drivers/s390/char/vmwatchdog.c
  51. 56 111
      drivers/s390/cio/airq.c
  52. 60 0
      drivers/s390/cio/chsc.c
  53. 38 6
      drivers/s390/cio/chsc.h
  54. 150 5
      drivers/s390/cio/chsc_sch.c
  55. 28 40
      drivers/s390/cio/cio.c
  56. 0 34
      drivers/s390/cio/qdio.h
  57. 0 44
      drivers/s390/cio/qdio_main.c
  58. 19 28
      drivers/s390/cio/qdio_setup.c
  59. 29 50
      drivers/s390/cio/qdio_thinint.c
  60. 37 27
      drivers/s390/crypto/ap_bus.c
  61. 1 1
      drivers/s390/net/claw.c
  62. 1 1
      drivers/s390/net/ctcm_main.c
  63. 1 1
      drivers/s390/net/lcs.c
  64. 1 1
      drivers/s390/net/qeth_core_main.c
  65. 1 0
      include/linux/pci.h

+ 1 - 0
Documentation/ioctl/ioctl-number.txt

@@ -72,6 +72,7 @@ Code  Seq#(hex)	Include File		Comments
 0x06	all	linux/lp.h
 0x09	all	linux/raid/md_u.h
 0x10	00-0F	drivers/char/s390/vmcp.h
+0x10	10-1F	arch/s390/include/uapi/sclp_ctl.h
 0x12	all	linux/fs.h
 		linux/blkpg.h
 0x1b	all	InfiniBand Subsystem	<http://infiniband.sourceforge.net/>

+ 14 - 4
arch/s390/appldata/appldata_mem.c

@@ -32,7 +32,7 @@
  * book:
  * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
  */
-static struct appldata_mem_data {
+struct appldata_mem_data {
 	u64 timestamp;
 	u32 sync_count_1;       /* after VM collected the record data, */
 	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
@@ -63,7 +63,7 @@ static struct appldata_mem_data {
 	u64 pgmajfault;		/* page faults (major only) */
 // <-- New in 2.6
 
-} __attribute__((packed)) appldata_mem_data;
+} __packed;
 
 
 /*
@@ -118,7 +118,6 @@ static struct appldata_ops ops = {
 	.record_nr = APPLDATA_RECORD_MEM_ID,
 	.size	   = sizeof(struct appldata_mem_data),
 	.callback  = &appldata_get_mem_data,
-	.data      = &appldata_mem_data,
 	.owner     = THIS_MODULE,
 	.mod_lvl   = {0xF0, 0xF0},		/* EBCDIC "00" */
 };
@@ -131,7 +130,17 @@ static struct appldata_ops ops = {
  */
 static int __init appldata_mem_init(void)
 {
-	return appldata_register_ops(&ops);
+	int ret;
+
+	ops.data = kzalloc(sizeof(struct appldata_mem_data), GFP_KERNEL);
+	if (!ops.data)
+		return -ENOMEM;
+
+	ret = appldata_register_ops(&ops);
+	if (ret)
+		kfree(ops.data);
+
+	return ret;
 }
 
 /*
@@ -142,6 +151,7 @@ static int __init appldata_mem_init(void)
 static void __exit appldata_mem_exit(void)
 {
 	appldata_unregister_ops(&ops);
+	kfree(ops.data);
 }
 
 

+ 14 - 4
arch/s390/appldata/appldata_net_sum.c

@@ -29,7 +29,7 @@
  * book:
  * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
  */
-static struct appldata_net_sum_data {
+struct appldata_net_sum_data {
 	u64 timestamp;
 	u32 sync_count_1;	/* after VM collected the record data, */
 	u32 sync_count_2;	/* sync_count_1 and sync_count_2 should be the
@@ -51,7 +51,7 @@ static struct appldata_net_sum_data {
 	u64 rx_dropped;		/* no space in linux buffers     */
 	u64 tx_dropped;		/* no space available in linux   */
 	u64 collisions;		/* collisions while transmitting */
-} __attribute__((packed)) appldata_net_sum_data;
+} __packed;
 
 
 /*
@@ -121,7 +121,6 @@ static struct appldata_ops ops = {
 	.record_nr = APPLDATA_RECORD_NET_SUM_ID,
 	.size	   = sizeof(struct appldata_net_sum_data),
 	.callback  = &appldata_get_net_sum_data,
-	.data      = &appldata_net_sum_data,
 	.owner     = THIS_MODULE,
 	.mod_lvl   = {0xF0, 0xF0},		/* EBCDIC "00" */
 };
@@ -134,7 +133,17 @@ static struct appldata_ops ops = {
  */
 static int __init appldata_net_init(void)
 {
-	return appldata_register_ops(&ops);
+	int ret;
+
+	ops.data = kzalloc(sizeof(struct appldata_net_sum_data), GFP_KERNEL);
+	if (!ops.data)
+		return -ENOMEM;
+
+	ret = appldata_register_ops(&ops);
+	if (ret)
+		kfree(ops.data);
+
+	return ret;
 }
 
 /*
@@ -145,6 +154,7 @@ static int __init appldata_net_init(void)
 static void __exit appldata_net_exit(void)
 {
 	appldata_unregister_ops(&ops);
+	kfree(ops.data);
 }
 
 

+ 2 - 6
arch/s390/hypfs/hypfs_diag.c

@@ -651,9 +651,7 @@ static int hypfs_create_cpu_files(struct super_block *sb,
 	}
 	diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
 	rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
-	if (IS_ERR(rc))
-		return PTR_ERR(rc);
-	return 0;
+	return PTR_RET(rc);
 }
 
 static void *hypfs_create_lpar_files(struct super_block *sb,
@@ -702,9 +700,7 @@ static int hypfs_create_phys_cpu_files(struct super_block *sb,
 		return PTR_ERR(rc);
 	diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
 	rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
-	if (IS_ERR(rc))
-		return PTR_ERR(rc);
-	return 0;
+	return PTR_RET(rc);
 }
 
 static void *hypfs_create_phys_files(struct super_block *sb,

+ 12 - 3
arch/s390/include/asm/airq.h

@@ -9,9 +9,18 @@
 #ifndef _ASM_S390_AIRQ_H
 #define _ASM_S390_AIRQ_H
 
-typedef void (*adapter_int_handler_t)(void *, void *);
+struct airq_struct {
+	struct hlist_node list;		/* Handler queueing. */
+	void (*handler)(struct airq_struct *);	/* Thin-interrupt handler */
+	u8 *lsi_ptr;			/* Local-Summary-Indicator pointer */
+	u8 lsi_mask;			/* Local-Summary-Indicator mask */
+	u8 isc;				/* Interrupt-subclass */
+	u8 flags;
+};
 
-void *s390_register_adapter_interrupt(adapter_int_handler_t, void *, u8);
-void s390_unregister_adapter_interrupt(void *, u8);
+#define AIRQ_PTR_ALLOCATED	0x01
+
+int register_adapter_interrupt(struct airq_struct *airq);
+void unregister_adapter_interrupt(struct airq_struct *airq);
 
 #endif /* _ASM_S390_AIRQ_H */

+ 1 - 1
arch/s390/include/asm/dma-mapping.h

@@ -53,7 +53,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 	debug_dma_mapping_error(dev, dma_addr);
 	if (dma_ops->mapping_error)
 		return dma_ops->mapping_error(dev, dma_addr);
-	return (dma_addr == DMA_ERROR_CODE);
+	return dma_addr == DMA_ERROR_CODE;
 }
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,

+ 11 - 6
arch/s390/include/asm/facility.h

@@ -13,6 +13,16 @@
 
 #define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
 
+static inline int __test_facility(unsigned long nr, void *facilities)
+{
+	unsigned char *ptr;
+
+	if (nr >= MAX_FACILITY_BIT)
+		return 0;
+	ptr = (unsigned char *) facilities + (nr >> 3);
+	return (*ptr & (0x80 >> (nr & 7))) != 0;
+}
+
 /*
  * The test_facility function uses the bit odering where the MSB is bit 0.
  * That makes it easier to query facility bits with the bit number as
@@ -20,12 +30,7 @@
  */
 static inline int test_facility(unsigned long nr)
 {
-	unsigned char *ptr;
-
-	if (nr >= MAX_FACILITY_BIT)
-		return 0;
-	ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
-	return (*ptr & (0x80 >> (nr & 7))) != 0;
+	return __test_facility(nr, &S390_lowcore.stfle_fac_list);
 }
 
 /**

+ 0 - 22
arch/s390/include/asm/io.h

@@ -13,28 +13,6 @@
 #include <asm/page.h>
 #include <asm/pci_io.h>
 
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-static inline unsigned long virt_to_phys(volatile void * address)
-{
-	unsigned long real_address;
-	asm volatile(
-		 "	lra	%0,0(%1)\n"
-		 "	jz	0f\n"
-		 "	la	%0,0\n"
-		 "0:"
-		 : "=a" (real_address) : "a" (address) : "cc");
-	return real_address;
-}
-#define virt_to_phys virt_to_phys
-
-static inline void * phys_to_virt(unsigned long address)
-{
-	return (void *) address;
-}
-
 void *xlate_dev_mem_ptr(unsigned long phys);
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
 void unxlate_dev_mem_ptr(unsigned long phys, void *addr);

+ 0 - 2
arch/s390/include/asm/pci.h

@@ -120,7 +120,6 @@ struct zpci_dev {
 
 	struct dentry	*debugfs_dev;
 	struct dentry	*debugfs_perf;
-	struct dentry	*debugfs_debug;
 };
 
 struct pci_hp_callback_ops {
@@ -143,7 +142,6 @@ int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 void zpci_stop_device(struct zpci_dev *);
 void zpci_free_device(struct zpci_dev *);
-int zpci_scan_device(struct zpci_dev *);
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
 int zpci_unregister_ioat(struct zpci_dev *, u8);
 

+ 3 - 0
arch/s390/include/asm/pgalloc.h

@@ -22,6 +22,9 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
 void page_table_free(struct mm_struct *, unsigned long *);
 void page_table_free_rcu(struct mmu_gather *, unsigned long *);
 
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+			  unsigned long key, bool nq);
+
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
 	typedef struct { char _[n]; } addrtype;

+ 1 - 0
arch/s390/include/asm/ptrace.h

@@ -24,6 +24,7 @@ struct pt_regs
 	unsigned long gprs[NUM_GPRS];
 	unsigned long orig_gpr2;
 	unsigned int int_code;
+	unsigned int int_parm;
 	unsigned long int_parm_long;
 };
 

+ 1 - 0
arch/s390/include/uapi/asm/Kbuild

@@ -35,6 +35,7 @@ header-y += siginfo.h
 header-y += signal.h
 header-y += socket.h
 header-y += sockios.h
+header-y += sclp_ctl.h
 header-y += stat.h
 header-y += statfs.h
 header-y += swab.h

+ 13 - 0
arch/s390/include/uapi/asm/chsc.h

@@ -29,6 +29,16 @@ struct chsc_async_area {
 	__u8 data[CHSC_SIZE - sizeof(struct chsc_async_header)];
 } __attribute__ ((packed));
 
+struct chsc_header {
+	__u16 length;
+	__u16 code;
+} __attribute__ ((packed));
+
+struct chsc_sync_area {
+	struct chsc_header header;
+	__u8 data[CHSC_SIZE - sizeof(struct chsc_header)];
+} __attribute__ ((packed));
+
 struct chsc_response_struct {
 	__u16 length;
 	__u16 code;
@@ -126,5 +136,8 @@ struct chsc_cpd_info {
 #define CHSC_INFO_CCL _IOWR(CHSC_IOCTL_MAGIC, 0x86, struct chsc_comp_list)
 #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
 #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
+#define CHSC_START_SYNC _IOWR(CHSC_IOCTL_MAGIC, 0x89, struct chsc_sync_area)
+#define CHSC_ON_CLOSE_SET _IOWR(CHSC_IOCTL_MAGIC, 0x8a, struct chsc_async_area)
+#define CHSC_ON_CLOSE_REMOVE _IO(CHSC_IOCTL_MAGIC, 0x8b)
 
 #endif

+ 4 - 0
arch/s390/include/uapi/asm/dasd.h

@@ -261,6 +261,10 @@ struct dasd_snid_ioctl_data {
 #define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) 
 /* Resume IO on device */
 #define BIODASDRESUME  _IO(DASD_IOCTL_LETTER,7) 
+/* Abort all I/O on a device */
+#define BIODASDABORTIO _IO(DASD_IOCTL_LETTER, 240)
+/* Allow I/O on a device */
+#define BIODASDALLOWIO _IO(DASD_IOCTL_LETTER, 241)
 
 
 /* retrieve API version number */

+ 24 - 0
arch/s390/include/uapi/asm/sclp_ctl.h

@@ -0,0 +1,24 @@
+/*
+ * IOCTL interface for SCLP
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_SCLP_CTL_H
+#define _ASM_SCLP_CTL_H
+
+#include <linux/types.h>
+
+struct sclp_ctl_sccb {
+	__u32	cmdw;
+	__u64	sccb;
+} __attribute__((packed));
+
+#define SCLP_CTL_IOCTL_MAGIC 0x10
+
+#define SCLP_CTL_SCCB \
+	_IOWR(SCLP_CTL_IOCTL_MAGIC, 0x10, struct sclp_ctl_sccb)
+
+#endif

+ 1 - 0
arch/s390/kernel/asm-offsets.c

@@ -47,6 +47,7 @@ int main(void)
 	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
 	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
 	DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
+	DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm));
 	DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
 	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
 	BLANK();

+ 10 - 2
arch/s390/kernel/entry.S

@@ -429,11 +429,19 @@ io_skip:
 	stm	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
 	stm	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 	TRACE_IRQS_OFF
 	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+io_loop:
 	l	%r1,BASED(.Ldo_IRQ)
 	lr	%r2,%r11		# pass pointer to pt_regs
 	basr	%r14,%r1		# call do_IRQ
+	tm	__LC_MACHINE_FLAGS+2,0x10	# MACHINE_FLAG_LPAR
+	jz	io_return
+	tpi	0
+	jz	io_return
+	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+	j	io_loop
 io_return:
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON
@@ -573,10 +581,10 @@ ext_skip:
 	stm	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
 	stm	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
+	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
 	TRACE_IRQS_OFF
 	lr	%r2,%r11		# pass pointer to pt_regs
-	l	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
-	l	%r4,__LC_EXT_PARAMS	# get external parameters
 	l	%r1,BASED(.Ldo_extint)
 	basr	%r14,%r1		# call do_extint
 	j	io_return

+ 1 - 1
arch/s390/kernel/entry.h

@@ -54,7 +54,7 @@ void handle_signal32(unsigned long sig, struct k_sigaction *ka,
 void do_notify_resume(struct pt_regs *regs);
 
 struct ext_code;
-void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long);
+void do_extint(struct pt_regs *regs);
 void do_restart(void);
 void __init startup_init(void);
 void die(struct pt_regs *regs, const char *str);

+ 12 - 4
arch/s390/kernel/entry64.S

@@ -460,10 +460,18 @@ io_skip:
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
+	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
 	TRACE_IRQS_OFF
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+io_loop:
 	lgr	%r2,%r11		# pass pointer to pt_regs
 	brasl	%r14,do_IRQ
+	tm	__LC_MACHINE_FLAGS+6,0x10	# MACHINE_FLAG_LPAR
+	jz	io_return
+	tpi	0
+	jz	io_return
+	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+	j	io_loop
 io_return:
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON
@@ -605,13 +613,13 @@ ext_skip:
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
+	lghi	%r1,__LC_EXT_PARAMS2
+	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
+	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
+	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
 	TRACE_IRQS_OFF
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-	lghi	%r1,4096
 	lgr	%r2,%r11		# pass pointer to pt_regs
-	llgf	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
-	llgf	%r4,__LC_EXT_PARAMS	# get external parameter
-	lg	%r5,__LC_EXT_PARAMS2-4096(%r1)	# get 64 bit external parameter
 	brasl	%r14,do_extint
 	j	io_return
 

+ 5 - 3
arch/s390/kernel/irq.c

@@ -234,9 +234,9 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
 }
 EXPORT_SYMBOL(unregister_external_interrupt);
 
-void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
-			   unsigned int param32, unsigned long param64)
+void __irq_entry do_extint(struct pt_regs *regs)
 {
+	struct ext_code ext_code;
 	struct pt_regs *old_regs;
 	struct ext_int_info *p;
 	int index;
@@ -248,6 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
 		clock_comparator_work();
 	}
 	kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
+	ext_code = *(struct ext_code *) &regs->int_code;
 	if (ext_code.code != 0x1004)
 		__get_cpu_var(s390_idle).nohz_delay = 1;
 
@@ -255,7 +256,8 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
 	rcu_read_lock();
 	list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
 		if (likely(p->code == ext_code.code))
-			p->handler(ext_code, param32, param64);
+			p->handler(ext_code, regs->int_parm,
+				   regs->int_parm_long);
 	rcu_read_unlock();
 	irq_exit();
 	set_irq_regs(old_regs);

+ 1 - 4
arch/s390/kernel/smp.c

@@ -49,7 +49,6 @@
 
 enum {
 	ec_schedule = 0,
-	ec_call_function,
 	ec_call_function_single,
 	ec_stop_cpu,
 };
@@ -438,8 +437,6 @@ static void smp_handle_ext_call(void)
 		smp_stop_cpu();
 	if (test_bit(ec_schedule, &bits))
 		scheduler_ipi();
-	if (test_bit(ec_call_function, &bits))
-		generic_smp_call_function_interrupt();
 	if (test_bit(ec_call_function_single, &bits))
 		generic_smp_call_function_single_interrupt();
 }
@@ -456,7 +453,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 	int cpu;
 
 	for_each_cpu(cpu, mask)
-		pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
+		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 }
 
 void arch_send_call_function_single_ipi(int cpu)

+ 48 - 0
arch/s390/mm/pgtable.c

@@ -771,6 +771,54 @@ static inline void page_table_free_pgste(unsigned long *table)
 	__free_page(page);
 }
 
+int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+			  unsigned long key, bool nq)
+{
+	spinlock_t *ptl;
+	pgste_t old, new;
+	pte_t *ptep;
+
+	down_read(&mm->mmap_sem);
+	ptep = get_locked_pte(current->mm, addr, &ptl);
+	if (unlikely(!ptep)) {
+		up_read(&mm->mmap_sem);
+		return -EFAULT;
+	}
+
+	new = old = pgste_get_lock(ptep);
+	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
+			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
+	pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
+	pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
+	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+		unsigned long address, bits;
+		unsigned char skey;
+
+		address = pte_val(*ptep) & PAGE_MASK;
+		skey = page_get_storage_key(address);
+		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+		/* Set storage key ACC and FP */
+		page_set_storage_key(address,
+				(key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)),
+				!nq);
+
+		/* Merge host changed & referenced into pgste  */
+		pgste_val(new) |= bits << 52;
+		/* Transfer skey changed & referenced bit to kvm user bits */
+		pgste_val(new) |= bits << 45;	/* PGSTE_UR_BIT & PGSTE_UC_BIT */
+	}
+	/* changing the guest storage key is considered a change of the page */
+	if ((pgste_val(new) ^ pgste_val(old)) &
+	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
+		pgste_val(new) |= PGSTE_UC_BIT;
+
+	pgste_set_unlock(ptep, new);
+	pte_unmap_unlock(*ptep, ptl);
+	up_read(&mm->mmap_sem);
+	return 0;
+}
+EXPORT_SYMBOL(set_guest_storage_key);
+
 #else /* CONFIG_PGSTE */
 
 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,

+ 2 - 2
arch/s390/oprofile/hwsampler.h

@@ -81,8 +81,8 @@ struct hws_data_entry {
 	unsigned int:16;
 	unsigned int prim_asn:16;   /* primary ASN                       */
 	unsigned long long ia;      /* Instruction Address               */
-	unsigned long long lpp;     /* Logical-Partition Program Param.  */
-	unsigned long long vpp;     /* Virtual-Machine Program Param.    */
+	unsigned long long gpp;     /* Guest Program Parameter		 */
+	unsigned long long hpp;     /* Host Program Parameter		 */
 };
 
 struct hws_trailer_entry {

+ 37 - 46
arch/s390/pci/pci.c

@@ -82,10 +82,13 @@ struct intr_bucket {
 
 static struct intr_bucket *bucket;
 
-/* Adapter local summary indicator */
-static u8 *zpci_irq_si;
+/* Adapter interrupt definitions */
+static void zpci_irq_handler(struct airq_struct *airq);
 
-static atomic_t irq_retries = ATOMIC_INIT(0);
+static struct airq_struct zpci_airq = {
+	.handler = zpci_irq_handler,
+	.isc = PCI_ISC,
+};
 
 /* I/O Map */
 static DEFINE_SPINLOCK(zpci_iomap_lock);
@@ -404,7 +407,7 @@ static struct pci_ops pci_root_ops = {
 /* store the last handled bit to implement fair scheduling of devices */
 static DEFINE_PER_CPU(unsigned long, next_sbit);
 
-static void zpci_irq_handler(void *dont, void *need)
+static void zpci_irq_handler(struct airq_struct *airq)
 {
 	unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
 	int rescan = 0, max = aisb_max;
@@ -452,7 +455,6 @@ scan:
 	max = aisb_max;
 	sbit = find_first_bit_left(bucket->aisb, max);
 	if (sbit != max) {
-		atomic_inc(&irq_retries);
 		rescan++;
 		goto scan;
 	}
@@ -565,7 +567,21 @@ static void zpci_map_resources(struct zpci_dev *zdev)
 		pr_debug("BAR%i: -> start: %Lx  end: %Lx\n",
 			i, pdev->resource[i].start, pdev->resource[i].end);
 	}
-};
+}
+
+static void zpci_unmap_resources(struct zpci_dev *zdev)
+{
+	struct pci_dev *pdev = zdev->pdev;
+	resource_size_t len;
+	int i;
+
+	for (i = 0; i < PCI_BAR_COUNT; i++) {
+		len = pci_resource_len(pdev, i);
+		if (!len)
+			continue;
+		pci_iounmap(pdev, (void *) pdev->resource[i].start);
+	}
+}
 
 struct zpci_dev *zpci_alloc_device(void)
 {
@@ -701,25 +717,20 @@ static int __init zpci_irq_init(void)
 		goto out_alloc;
 	}
 
-	isc_register(PCI_ISC);
-	zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
-	if (IS_ERR(zpci_irq_si)) {
-		rc = PTR_ERR(zpci_irq_si);
-		zpci_irq_si = NULL;
+	rc = register_adapter_interrupt(&zpci_airq);
+	if (rc)
 		goto out_ai;
-	}
+	/* Set summary to 1 to be called every time for the ISC. */
+	*zpci_airq.lsi_ptr = 1;
 
 	for_each_online_cpu(cpu)
 		per_cpu(next_sbit, cpu) = 0;
 
 	spin_lock_init(&bucket->lock);
-	/* set summary to 1 to be called every time for the ISC */
-	*zpci_irq_si = 1;
 	set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
 	return 0;
 
 out_ai:
-	isc_unregister(PCI_ISC);
 	free_page((unsigned long) bucket->alloc);
 out_alloc:
 	free_page((unsigned long) bucket->aisb);
@@ -732,21 +743,10 @@ static void zpci_irq_exit(void)
 {
 	free_page((unsigned long) bucket->alloc);
 	free_page((unsigned long) bucket->aisb);
-	s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
-	isc_unregister(PCI_ISC);
+	unregister_adapter_interrupt(&zpci_airq);
 	kfree(bucket);
 }
 
-void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m)
-{
-	if (!zdev)
-		return;
-
-	seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries));
-	seq_printf(m, "aibv[0]:%016lx  aibv[1]:%016lx  aisb:%016lx\n",
-		   get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb);
-}
-
 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
 						unsigned long flags, int domain)
 {
@@ -810,6 +810,16 @@ int pcibios_add_device(struct pci_dev *pdev)
 	return 0;
 }
 
+void pcibios_release_device(struct pci_dev *pdev)
+{
+	struct zpci_dev *zdev = get_zdev(pdev);
+
+	zpci_unmap_resources(zdev);
+	zpci_fmb_disable_device(zdev);
+	zpci_debug_exit_device(zdev);
+	zdev->pdev = NULL;
+}
+
 static int zpci_scan_bus(struct zpci_dev *zdev)
 {
 	struct resource *res;
@@ -950,25 +960,6 @@ void zpci_stop_device(struct zpci_dev *zdev)
 }
 EXPORT_SYMBOL_GPL(zpci_stop_device);
 
-int zpci_scan_device(struct zpci_dev *zdev)
-{
-	zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
-	if (!zdev->pdev) {
-		pr_err("pci_scan_single_device failed for fid: 0x%x\n",
-			zdev->fid);
-		goto out;
-	}
-
-	pci_bus_add_devices(zdev->bus);
-
-	return 0;
-out:
-	zpci_dma_exit_device(zdev);
-	clp_disable_fh(zdev);
-	return -EIO;
-}
-EXPORT_SYMBOL_GPL(zpci_scan_device);
-
 static inline int barsize(u8 size)
 {
 	return (size) ? (1 << size) >> 10 : 0;

+ 0 - 1
arch/s390/pci/pci_clp.c

@@ -236,7 +236,6 @@ int clp_disable_fh(struct zpci_dev *zdev)
 	if (!zdev_enabled(zdev))
 		return 0;
 
-	dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
 	rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
 	if (!rc)
 		/* Success -> store disabled handle in zdev */

+ 0 - 29
arch/s390/pci/pci_debug.c

@@ -115,27 +115,6 @@ static const struct file_operations debugfs_pci_perf_fops = {
 	.release = single_release,
 };
 
-static int pci_debug_show(struct seq_file *m, void *v)
-{
-	struct zpci_dev *zdev = m->private;
-
-	zpci_debug_info(zdev, m);
-	return 0;
-}
-
-static int pci_debug_seq_open(struct inode *inode, struct file *filp)
-{
-	return single_open(filp, pci_debug_show,
-			   file_inode(filp)->i_private);
-}
-
-static const struct file_operations debugfs_pci_debug_fops = {
-	.open	 = pci_debug_seq_open,
-	.read	 = seq_read,
-	.llseek  = seq_lseek,
-	.release = single_release,
-};
-
 void zpci_debug_init_device(struct zpci_dev *zdev)
 {
 	zdev->debugfs_dev = debugfs_create_dir(dev_name(&zdev->pdev->dev),
@@ -149,19 +128,11 @@ void zpci_debug_init_device(struct zpci_dev *zdev)
 				&debugfs_pci_perf_fops);
 	if (IS_ERR(zdev->debugfs_perf))
 		zdev->debugfs_perf = NULL;
-
-	zdev->debugfs_debug = debugfs_create_file("debug",
-				S_IFREG | S_IRUGO | S_IWUSR,
-				zdev->debugfs_dev, zdev,
-				&debugfs_pci_debug_fops);
-	if (IS_ERR(zdev->debugfs_debug))
-		zdev->debugfs_debug = NULL;
 }
 
 void zpci_debug_exit_device(struct zpci_dev *zdev)
 {
 	debugfs_remove(zdev->debugfs_perf);
-	debugfs_remove(zdev->debugfs_debug);
 	debugfs_remove(zdev->debugfs_dev);
 }
 

+ 3 - 3
arch/s390/pci/pci_dma.c

@@ -263,7 +263,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 				     enum dma_data_direction direction,
 				     struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 	unsigned long nr_pages, iommu_page_index;
 	unsigned long pa = page_to_phys(page) + offset;
 	int flags = ZPCI_PTE_VALID;
@@ -304,7 +304,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
 				 size_t size, enum dma_data_direction direction,
 				 struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 	unsigned long iommu_page_index;
 	int npages;
 
@@ -323,7 +323,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
 			    dma_addr_t *dma_handle, gfp_t flag,
 			    struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 	struct page *page;
 	unsigned long pa;
 	dma_addr_t map;

+ 8 - 12
arch/s390/pci/pci_sysfs.c

@@ -15,40 +15,36 @@
 static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 
-	sprintf(buf, "0x%08x\n", zdev->fid);
-	return strlen(buf);
+	return sprintf(buf, "0x%08x\n", zdev->fid);
 }
 static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
 
 static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
 		       char *buf)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 
-	sprintf(buf, "0x%08x\n", zdev->fh);
-	return strlen(buf);
+	return sprintf(buf, "0x%08x\n", zdev->fh);
 }
 static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
 
 static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
 			  char *buf)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 
-	sprintf(buf, "0x%04x\n", zdev->pchid);
-	return strlen(buf);
+	return sprintf(buf, "0x%04x\n", zdev->pchid);
 }
 static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
 
 static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
 			  char *buf)
 {
-	struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
+	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
 
-	sprintf(buf, "0x%02x\n", zdev->pfgid);
-	return strlen(buf);
+	return sprintf(buf, "0x%02x\n", zdev->pfgid);
 }
 static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
 

+ 3 - 0
block/blk-core.c

@@ -2315,6 +2315,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 		case -EBADE:
 			error_type = "critical nexus";
 			break;
+		case -ETIMEDOUT:
+			error_type = "timeout";
+			break;
 		case -EIO:
 		default:
 			error_type = "I/O";

+ 3 - 2
block/blk-timeout.c

@@ -82,9 +82,10 @@ void blk_delete_timer(struct request *req)
 static void blk_rq_timed_out(struct request *req)
 {
 	struct request_queue *q = req->q;
-	enum blk_eh_timer_return ret;
+	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
 
-	ret = q->rq_timed_out_fn(req);
+	if (q->rq_timed_out_fn)
+		ret = q->rq_timed_out_fn(req);
 	switch (ret) {
 	case BLK_EH_HANDLED:
 		__blk_complete_request(req);

+ 44 - 16
drivers/pci/hotplug/s390_pci_hpc.c

@@ -41,6 +41,28 @@ struct slot {
 	struct zpci_dev *zdev;
 };
 
+static inline int slot_configure(struct slot *slot)
+{
+	int ret = sclp_pci_configure(slot->zdev->fid);
+
+	zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+	if (!ret)
+		slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+
+	return ret;
+}
+
+static inline int slot_deconfigure(struct slot *slot)
+{
+	int ret = sclp_pci_deconfigure(slot->zdev->fid);
+
+	zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+	if (!ret)
+		slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+
+	return ret;
+}
+
 static int enable_slot(struct hotplug_slot *hotplug_slot)
 {
 	struct slot *slot = hotplug_slot->private;
@@ -49,14 +71,23 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
 	if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
 		return -EIO;
 
-	rc = sclp_pci_configure(slot->zdev->fid);
-	zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, rc);
-	if (!rc) {
-		slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
-		/* automatically scan the device after is was configured */
-		zpci_enable_device(slot->zdev);
-		zpci_scan_device(slot->zdev);
-	}
+	rc = slot_configure(slot);
+	if (rc)
+		return rc;
+
+	rc = zpci_enable_device(slot->zdev);
+	if (rc)
+		goto out_deconfigure;
+
+	slot->zdev->state = ZPCI_FN_STATE_ONLINE;
+
+	pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+	pci_bus_add_devices(slot->zdev->bus);
+
+	return rc;
+
+out_deconfigure:
+	slot_deconfigure(slot);
 	return rc;
 }
 
@@ -68,17 +99,14 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
 	if (!zpci_fn_configured(slot->zdev->state))
 		return -EIO;
 
+	if (slot->zdev->pdev)
+		pci_stop_and_remove_bus_device(slot->zdev->pdev);
+
 	rc = zpci_disable_device(slot->zdev);
 	if (rc)
 		return rc;
-	/* TODO: we rely on the user to unbind/remove the device, is that plausible
-	 *	 or do we need to trigger that here?
-	 */
-	rc = sclp_pci_deconfigure(slot->zdev->fid);
-	zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, rc);
-	if (!rc)
-		slot->zdev->state = ZPCI_FN_STATE_STANDBY;
-	return rc;
+
+	return slot_deconfigure(slot);
 }
 
 static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)

+ 10 - 0
drivers/pci/pci.c

@@ -1334,6 +1334,16 @@ int __weak pcibios_add_device (struct pci_dev *dev)
 	return 0;
 }
 
+/**
+ * pcibios_release_device - provide arch specific hooks when releasing device dev
+ * @dev: the PCI device being released
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are released. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_release_device(struct pci_dev *dev) {}
+
 /**
  * pcibios_disable_device - disable arch specific PCI resources for device dev
  * @dev: the PCI device to disable

+ 1 - 0
drivers/pci/probe.c

@@ -1132,6 +1132,7 @@ static void pci_release_dev(struct device *dev)
 	pci_dev = to_pci_dev(dev);
 	pci_release_capabilities(pci_dev);
 	pci_release_of_node(pci_dev);
+	pcibios_release_device(pci_dev);
 	kfree(pci_dev);
 }
 

+ 104 - 11
drivers/s390/block/dasd.c

@@ -38,9 +38,6 @@
  */
 #define DASD_CHANQ_MAX_SIZE 4
 
-#define DASD_SLEEPON_START_TAG	(void *) 1
-#define DASD_SLEEPON_END_TAG	(void *) 2
-
 /*
  * SECTION: exported variables of dasd.c
  */
@@ -1787,11 +1784,11 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
 	list_for_each_safe(l, n, &device->ccw_queue) {
 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
 
-		/* Stop list processing at the first non-final request. */
+		/* Skip any non-final request. */
 		if (cqr->status == DASD_CQR_QUEUED ||
 		    cqr->status == DASD_CQR_IN_IO ||
 		    cqr->status == DASD_CQR_CLEAR_PENDING)
-			break;
+			continue;
 		if (cqr->status == DASD_CQR_ERROR) {
 			__dasd_device_recovery(device, cqr);
 		}
@@ -2183,7 +2180,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
 		    (!dasd_eer_enabled(device))) {
 			cqr->status = DASD_CQR_FAILED;
-			cqr->intrc = -EAGAIN;
+			cqr->intrc = -ENOLINK;
 			continue;
 		}
 		/* Don't try to start requests if device is stopped */
@@ -2402,8 +2399,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
  * Cancels a request that was started with dasd_sleep_on_req.
  * This is useful to timeout requests. The request will be
  * terminated if it is currently in i/o.
- * Returns 1 if the request has been terminated.
- *	   0 if there was no need to terminate the request (not started yet)
+ * Returns 0 if request termination was successful
  *	   negative error code if termination failed
  * Cancellation of a request is an asynchronous operation! The calling
  * function has to wait until the request is properly returned via callback.
@@ -2440,7 +2436,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
 	return rc;
 }
 
-
 /*
  * SECTION: Operations of the dasd_block layer.
  */
@@ -2537,6 +2532,16 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 			__blk_end_request_all(req, -EIO);
 			continue;
 		}
+		if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
+		    (basedev->features & DASD_FEATURE_FAILFAST ||
+		     blk_noretry_request(req))) {
+			DBF_DEV_EVENT(DBF_ERR, basedev,
+				      "Rejecting failfast request %p",
+				      req);
+			blk_start_request(req);
+			__blk_end_request_all(req, -ETIMEDOUT);
+			continue;
+		}
 		cqr = basedev->discipline->build_cp(basedev, block, req);
 		if (IS_ERR(cqr)) {
 			if (PTR_ERR(cqr) == -EBUSY)
@@ -2575,8 +2580,10 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 		 */
 		cqr->callback_data = (void *) req;
 		cqr->status = DASD_CQR_FILLED;
+		req->completion_data = cqr;
 		blk_start_request(req);
 		list_add_tail(&cqr->blocklist, &block->ccw_queue);
+		INIT_LIST_HEAD(&cqr->devlist);
 		dasd_profile_start(block, cqr, req);
 	}
 }
@@ -2590,8 +2597,17 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 	req = (struct request *) cqr->callback_data;
 	dasd_profile_end(cqr->block, cqr, req);
 	status = cqr->block->base->discipline->free_cp(cqr, req);
-	if (status <= 0)
-		error = status ? status : -EIO;
+	if (status < 0)
+		error = status;
+	else if (status == 0) {
+		if (cqr->intrc == -EPERM)
+			error = -EBADE;
+		else if (cqr->intrc == -ENOLINK ||
+			 cqr->intrc == -ETIMEDOUT)
+			error = cqr->intrc;
+		else
+			error = -EIO;
+	}
 	__blk_end_request_all(req, error);
 }
 
@@ -2692,6 +2708,7 @@ static void __dasd_block_start_head(struct dasd_block *block)
 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
 		    (!dasd_eer_enabled(block->base))) {
 			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -ENOLINK;
 			dasd_schedule_block_bh(block);
 			continue;
 		}
@@ -2863,6 +2880,82 @@ static void do_dasd_request(struct request_queue *queue)
 	spin_unlock(&block->queue_lock);
 }
 
+/*
+ * Block timeout callback, called from the block layer
+ *
+ * request_queue lock is held on entry.
+ *
+ * Return values:
+ * BLK_EH_RESET_TIMER if the request should be left running
+ * BLK_EH_NOT_HANDLED if the request is handled or terminated
+ *		      by the driver.
+ */
+enum blk_eh_timer_return dasd_times_out(struct request *req)
+{
+	struct dasd_ccw_req *cqr = req->completion_data;
+	struct dasd_block *block = req->q->queuedata;
+	struct dasd_device *device;
+	int rc = 0;
+
+	if (!cqr)
+		return BLK_EH_NOT_HANDLED;
+
+	device = cqr->startdev ? cqr->startdev : block->base;
+	if (!device->blk_timeout)
+		return BLK_EH_RESET_TIMER;
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      " dasd_times_out cqr %p status %x",
+		      cqr, cqr->status);
+
+	spin_lock(&block->queue_lock);
+	spin_lock(get_ccwdev_lock(device->cdev));
+	cqr->retries = -1;
+	cqr->intrc = -ETIMEDOUT;
+	if (cqr->status >= DASD_CQR_QUEUED) {
+		spin_unlock(get_ccwdev_lock(device->cdev));
+		rc = dasd_cancel_req(cqr);
+	} else if (cqr->status == DASD_CQR_FILLED ||
+		   cqr->status == DASD_CQR_NEED_ERP) {
+		cqr->status = DASD_CQR_TERMINATED;
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	} else if (cqr->status == DASD_CQR_IN_ERP) {
+		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
+
+		list_for_each_entry_safe(searchcqr, nextcqr,
+					 &block->ccw_queue, blocklist) {
+			tmpcqr = searchcqr;
+			while (tmpcqr->refers)
+				tmpcqr = tmpcqr->refers;
+			if (tmpcqr != cqr)
+				continue;
+			/* searchcqr is an ERP request for cqr */
+			searchcqr->retries = -1;
+			searchcqr->intrc = -ETIMEDOUT;
+			if (searchcqr->status >= DASD_CQR_QUEUED) {
+				spin_unlock(get_ccwdev_lock(device->cdev));
+				rc = dasd_cancel_req(searchcqr);
+				spin_lock(get_ccwdev_lock(device->cdev));
+			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
+				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
+				searchcqr->status = DASD_CQR_TERMINATED;
+				rc = 0;
+			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
+				/*
+				 * Shouldn't happen; most recent ERP
+				 * request is at the front of queue
+				 */
+				continue;
+			}
+			break;
+		}
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	dasd_schedule_block_bh(block);
+	spin_unlock(&block->queue_lock);
+
+	return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+}
+
 /*
  * Allocate and initialize request queue and default I/O scheduler.
  */

+ 97 - 0
drivers/s390/block/dasd_devmap.c

@@ -1240,6 +1240,101 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
 
 static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
 
+static ssize_t
+dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_retries_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((strict_strtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_RETRIES_MAX)) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+
+	if (val)
+		device->default_retries = val;
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(retries, 0644, dasd_retries_show, dasd_retries_store);
+
+static ssize_t
+dasd_timeout_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_timeout_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	struct request_queue *q;
+	unsigned long val, flags;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device) || !device->block)
+		return -ENODEV;
+
+	if ((strict_strtoul(buf, 10, &val) != 0) ||
+	    val > UINT_MAX / HZ) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	q = device->block->request_queue;
+	if (!q) {
+		dasd_put_device(device);
+		return -ENODEV;
+	}
+	spin_lock_irqsave(&device->block->request_queue_lock, flags);
+	if (!val)
+		blk_queue_rq_timed_out(q, NULL);
+	else
+		blk_queue_rq_timed_out(q, dasd_times_out);
+
+	device->blk_timeout = val;
+
+	blk_queue_rq_timeout(q, device->blk_timeout * HZ);
+	spin_unlock_irqrestore(&device->block->request_queue_lock, flags);
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(timeout, 0644,
+		   dasd_timeout_show, dasd_timeout_store);
+
 static ssize_t dasd_reservation_policy_show(struct device *dev,
 					    struct device_attribute *attr,
 					    char *buf)
@@ -1350,6 +1445,8 @@ static struct attribute * dasd_attrs[] = {
 	&dev_attr_erplog.attr,
 	&dev_attr_failfast.attr,
 	&dev_attr_expires.attr,
+	&dev_attr_retries.attr,
+	&dev_attr_timeout.attr,
 	&dev_attr_reservation_policy.attr,
 	&dev_attr_last_known_reservation_state.attr,
 	&dev_attr_safe_offline.attr,

+ 6 - 2
drivers/s390/block/dasd_diag.c

@@ -359,6 +359,7 @@ dasd_diag_check_device(struct dasd_device *device)
 	}
 
 	device->default_expires = DIAG_TIMEOUT;
+	device->default_retries = DIAG_MAX_RETRIES;
 
 	/* Figure out position of label block */
 	switch (private->rdc_data.vdev_class) {
@@ -555,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
 			recid++;
 		}
 	}
-	cqr->retries = DIAG_MAX_RETRIES;
+	cqr->retries = memdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	if (blk_noretry_request(req) ||
 	    block->base->features & DASD_FEATURE_FAILFAST)
@@ -582,7 +583,10 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
 
 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
-	cqr->status = DASD_CQR_FILLED;
+	if (cqr->retries < 0)
+		cqr->status = DASD_CQR_FAILED;
+	else
+		cqr->status = DASD_CQR_FILLED;
 };
 
 /* Fill in IOCTL data for device. */

+ 12 - 5
drivers/s390/block/dasd_eckd.c

@@ -1682,6 +1682,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
 
 	/* set default timeout */
 	device->default_expires = DASD_EXPIRES;
+	/* set default retry count */
+	device->default_retries = DASD_RETRIES;
+
 	if (private->gneq) {
 		value = 1;
 		for (i = 0; i < private->gneq->timeout.value; i++)
@@ -2378,6 +2381,10 @@ sleep:
 
 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
+	if (cqr->retries < 0) {
+		cqr->status = DASD_CQR_FAILED;
+		return;
+	}
 	cqr->status = DASD_CQR_FILLED;
 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
 		dasd_eckd_reset_ccw_to_base_io(cqr);
@@ -2659,7 +2666,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
-	cqr->retries = 256;
+	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
@@ -2834,7 +2841,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
-	cqr->retries = 256;
+	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
@@ -2968,7 +2975,7 @@ static int prepare_itcw(struct itcw *itcw,
 
 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
 		     &pfxdata, sizeof(pfxdata), total_data_size);
-	return IS_ERR(dcw) ? PTR_ERR(dcw) : 0;
+	return PTR_RET(dcw);
 }
 
 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
@@ -3127,7 +3134,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
 	cqr->lpm = startdev->path_data.ppm;
-	cqr->retries = 256;
+	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
@@ -3330,7 +3337,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;
 	cqr->lpm = startdev->path_data.ppm;
-	cqr->retries = 256;
+	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 

+ 8 - 0
drivers/s390/block/dasd_erp.c

@@ -159,6 +159,14 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
 	struct dasd_device *device;
 
 	device = cqr->startdev;
+	if (cqr->intrc == -ETIMEDOUT) {
+		dev_err(&device->cdev->dev, "cqr %p timeout error", cqr);
+		return;
+	}
+	if (cqr->intrc == -ENOLINK) {
+		dev_err(&device->cdev->dev, "cqr %p transport error", cqr);
+		return;
+	}
 	/* dump sense data */
 	if (device->discipline && device->discipline->dump_sense)
 		device->discipline->dump_sense(device, cqr, irb);

+ 8 - 2
drivers/s390/block/dasd_fba.c

@@ -29,6 +29,8 @@
 #endif				/* PRINTK_HEADER */
 #define PRINTK_HEADER "dasd(fba):"
 
+#define FBA_DEFAULT_RETRIES 32
+
 #define DASD_FBA_CCW_WRITE 0x41
 #define DASD_FBA_CCW_READ 0x42
 #define DASD_FBA_CCW_LOCATE 0x43
@@ -167,6 +169,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
 	}
 
 	device->default_expires = DASD_EXPIRES;
+	device->default_retries = FBA_DEFAULT_RETRIES;
 	device->path_data.opm = LPM_ANYPATH;
 
 	readonly = dasd_device_is_ro(device);
@@ -369,7 +372,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
 	cqr->memdev = memdev;
 	cqr->block = block;
 	cqr->expires = memdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->retries = 32;
+	cqr->retries = memdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
 	return cqr;
@@ -425,7 +428,10 @@ out:
 
 static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
-	cqr->status = DASD_CQR_FILLED;
+	if (cqr->retries < 0)
+		cqr->status = DASD_CQR_FAILED;
+	else
+		cqr->status = DASD_CQR_FILLED;
 };
 
 static int

+ 10 - 0
drivers/s390/block/dasd_int.h

@@ -224,6 +224,8 @@ struct dasd_ccw_req {
 /* default expiration time*/
 #define DASD_EXPIRES	  300
 #define DASD_EXPIRES_MAX  40000000
+#define DASD_RETRIES	  256
+#define DASD_RETRIES_MAX  32768
 
 /* per dasd_ccw_req flags */
 #define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
@@ -466,6 +468,9 @@ struct dasd_device {
 
 	/* default expiration time in s */
 	unsigned long default_expires;
+	unsigned long default_retries;
+
+	unsigned long blk_timeout;
 
 	struct dentry *debugfs_dentry;
 	struct dasd_profile profile;
@@ -519,7 +524,10 @@ struct dasd_block {
 #define DASD_FLAG_SUSPENDED	9	/* The device was suspended */
 #define DASD_FLAG_SAFE_OFFLINE	10	/* safe offline processing requested*/
 #define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
+#define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
 
+#define DASD_SLEEPON_START_TAG	((void *) 1)
+#define DASD_SLEEPON_END_TAG	((void *) 2)
 
 void dasd_put_device_wake(struct dasd_device *);
 
@@ -660,6 +668,8 @@ void dasd_free_device(struct dasd_device *);
 struct dasd_block *dasd_alloc_block(void);
 void dasd_free_block(struct dasd_block *);
 
+enum blk_eh_timer_return dasd_times_out(struct request *req);
+
 void dasd_enable_device(struct dasd_device *);
 void dasd_set_target_state(struct dasd_device *, int);
 void dasd_kick_device(struct dasd_device *);

+ 59 - 0
drivers/s390/block/dasd_ioctl.c

@@ -140,6 +140,59 @@ static int dasd_ioctl_resume(struct dasd_block *block)
 	return 0;
 }
 
+/*
+ * Abort all failfast I/O on a device.
+ */
+static int dasd_ioctl_abortio(struct dasd_block *block)
+{
+	unsigned long flags;
+	struct dasd_device *base;
+	struct dasd_ccw_req *cqr, *n;
+
+	base = block->base;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
+		return 0;
+	DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
+
+	spin_lock_irqsave(&block->request_queue_lock, flags);
+	spin_lock(&block->queue_lock);
+	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+		if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+		    cqr->callback_data &&
+		    cqr->callback_data != DASD_SLEEPON_START_TAG &&
+		    cqr->callback_data != DASD_SLEEPON_END_TAG) {
+			spin_unlock(&block->queue_lock);
+			blk_abort_request(cqr->callback_data);
+			spin_lock(&block->queue_lock);
+		}
+	}
+	spin_unlock(&block->queue_lock);
+	spin_unlock_irqrestore(&block->request_queue_lock, flags);
+
+	dasd_schedule_block_bh(block);
+	return 0;
+}
+
+/*
+ * Allow I/O on a device
+ */
+static int dasd_ioctl_allowio(struct dasd_block *block)
+{
+	struct dasd_device *base;
+
+	base = block->base;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
+		DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
+
+	return 0;
+}
+
 /*
  * performs formatting of _device_ according to _fdata_
  * Note: The discipline's format_function is assumed to deliver formatting
@@ -458,6 +511,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
 	case BIODASDRESUME:
 		rc = dasd_ioctl_resume(block);
 		break;
+	case BIODASDABORTIO:
+		rc = dasd_ioctl_abortio(block);
+		break;
+	case BIODASDALLOWIO:
+		rc = dasd_ioctl_allowio(block);
+		break;
 	case BIODASDFMT:
 		rc = dasd_ioctl_format(bdev, argp);
 		break;

+ 1 - 1
drivers/s390/char/Makefile

@@ -3,7 +3,7 @@
 #
 
 obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
-	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o
+	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o
 
 obj-$(CONFIG_TN3270) += raw3270.o
 obj-$(CONFIG_TN3270_CONSOLE) += con3270.o

+ 80 - 6
drivers/s390/char/sclp.c

@@ -50,11 +50,42 @@ static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
 /* Suspend request */
 static DECLARE_COMPLETION(sclp_request_queue_flushed);
 
+/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
+int sclp_console_pages = SCLP_CONSOLE_PAGES;
+/* Flag to indicate if buffer pages are dropped on buffer full condition */
+int sclp_console_drop = 0;
+/* Number of times the console dropped buffer pages */
+unsigned long sclp_console_full;
+
 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
 {
 	complete(&sclp_request_queue_flushed);
 }
 
+static int __init sclp_setup_console_pages(char *str)
+{
+	int pages, rc;
+
+	rc = kstrtoint(str, 0, &pages);
+	if (!rc && pages >= SCLP_CONSOLE_PAGES)
+		sclp_console_pages = pages;
+	return 1;
+}
+
+__setup("sclp_con_pages=", sclp_setup_console_pages);
+
+static int __init sclp_setup_console_drop(char *str)
+{
+	int drop, rc;
+
+	rc = kstrtoint(str, 0, &drop);
+	if (!rc && drop)
+		sclp_console_drop = 1;
+	return 1;
+}
+
+__setup("sclp_con_drop=", sclp_setup_console_drop);
+
 static struct sclp_req sclp_suspend_req;
 
 /* Timer for request retries. */
@@ -117,14 +148,19 @@ static int sclp_init(void);
 int
 sclp_service_call(sclp_cmdw_t command, void *sccb)
 {
-	int cc;
+	int cc = 4; /* Initialize for program check handling */
 
 	asm volatile(
-		"	.insn	rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
-		"	ipm	%0\n"
-		"	srl	%0,28"
-		: "=&d" (cc) : "d" (command), "a" (__pa(sccb))
+		"0:	.insn	rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
+		"1:	ipm	%0\n"
+		"	srl	%0,28\n"
+		"2:\n"
+		EX_TABLE(0b, 2b)
+		EX_TABLE(1b, 2b)
+		: "+&d" (cc) : "d" (command), "a" (__pa(sccb))
 		: "cc", "memory");
+	if (cc == 4)
+		return -EINVAL;
 	if (cc == 3)
 		return -EIO;
 	if (cc == 2)
@@ -1013,11 +1049,47 @@ static const struct dev_pm_ops sclp_pm_ops = {
 	.restore	= sclp_restore,
 };
 
+static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_pages);
+}
+
+static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
+
+static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%i\n", sclp_console_drop);
+}
+
+static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
+
+static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
+{
+	return sprintf(buf, "%lu\n", sclp_console_full);
+}
+
+static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
+
+static struct attribute *sclp_drv_attrs[] = {
+	&driver_attr_con_pages.attr,
+	&driver_attr_con_drop.attr,
+	&driver_attr_con_full.attr,
+	NULL,
+};
+static struct attribute_group sclp_drv_attr_group = {
+	.attrs = sclp_drv_attrs,
+};
+static const struct attribute_group *sclp_drv_attr_groups[] = {
+	&sclp_drv_attr_group,
+	NULL,
+};
+
 static struct platform_driver sclp_pdrv = {
 	.driver = {
 		.name	= "sclp",
 		.owner	= THIS_MODULE,
 		.pm	= &sclp_pm_ops,
+		.groups = sclp_drv_attr_groups,
 	},
 };
 
@@ -1096,10 +1168,12 @@ static __init int sclp_initcall(void)
 	rc = platform_driver_register(&sclp_pdrv);
 	if (rc)
 		return rc;
+
 	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
-	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	rc = PTR_RET(sclp_pdev);
 	if (rc)
 		goto fail_platform_driver_unregister;
+
 	rc = atomic_notifier_chain_register(&panic_notifier_list,
 					    &sclp_on_panic_nb);
 	if (rc)

+ 6 - 1
drivers/s390/char/sclp.h

@@ -15,7 +15,7 @@
 
 /* maximum number of pages concerning our own memory management */
 #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
-#define MAX_CONSOLE_PAGES	6
+#define SCLP_CONSOLE_PAGES	6
 
 #define EVTYP_OPCMD		0x01
 #define EVTYP_MSG		0x02
@@ -171,10 +171,15 @@ int sclp_remove_processed(struct sccb_header *sccb);
 int sclp_deactivate(void);
 int sclp_reactivate(void);
 int sclp_service_call(sclp_cmdw_t command, void *sccb);
+int sclp_sync_request(sclp_cmdw_t command, void *sccb);
 
 int sclp_sdias_init(void);
 void sclp_sdias_exit(void);
 
+extern int sclp_console_pages;
+extern int sclp_console_drop;
+extern unsigned long sclp_console_full;
+
 /* useful inlines */
 
 /* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */

+ 10 - 10
drivers/s390/char/sclp_cmd.c

@@ -195,7 +195,7 @@ static void sclp_sync_callback(struct sclp_req *req, void *data)
 	complete(completion);
 }
 
-static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
+int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
 {
 	struct completion completion;
 	struct sclp_req *request;
@@ -270,7 +270,7 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info)
 	if (!sccb)
 		return -ENOMEM;
 	sccb->header.length = sizeof(*sccb);
-	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
+	rc = sclp_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
 	if (rc)
 		goto out;
 	if (sccb->header.response_code != 0x0010) {
@@ -304,7 +304,7 @@ static int do_cpu_configure(sclp_cmdw_t cmd)
 	if (!sccb)
 		return -ENOMEM;
 	sccb->header.length = sizeof(*sccb);
-	rc = do_sync_request(cmd, sccb);
+	rc = sclp_sync_request(cmd, sccb);
 	if (rc)
 		goto out;
 	switch (sccb->header.response_code) {
@@ -374,7 +374,7 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
 		return -ENOMEM;
 	sccb->header.length = PAGE_SIZE;
 	sccb->rn = rn;
-	rc = do_sync_request(cmd, sccb);
+	rc = sclp_sync_request(cmd, sccb);
 	if (rc)
 		goto out;
 	switch (sccb->header.response_code) {
@@ -429,7 +429,7 @@ static int sclp_attach_storage(u8 id)
 	if (!sccb)
 		return -ENOMEM;
 	sccb->header.length = PAGE_SIZE;
-	rc = do_sync_request(0x00080001 | id << 8, sccb);
+	rc = sclp_sync_request(0x00080001 | id << 8, sccb);
 	if (rc)
 		goto out;
 	switch (sccb->header.response_code) {
@@ -627,7 +627,7 @@ static int __init sclp_detect_standby_memory(void)
 	for (id = 0; id <= sclp_max_storage_id; id++) {
 		memset(sccb, 0, PAGE_SIZE);
 		sccb->header.length = PAGE_SIZE;
-		rc = do_sync_request(0x00040001 | id << 8, sccb);
+		rc = sclp_sync_request(0x00040001 | id << 8, sccb);
 		if (rc)
 			goto out;
 		switch (sccb->header.response_code) {
@@ -668,7 +668,7 @@ static int __init sclp_detect_standby_memory(void)
 	if (rc)
 		goto out;
 	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
-	rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
+	rc = PTR_RET(sclp_pdev);
 	if (rc)
 		goto out_driver;
 	sclp_add_standby_memory();
@@ -714,7 +714,7 @@ static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
 	sccb->header.length = PAGE_SIZE;
 	sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
 	sccb->aid = fid;
-	rc = do_sync_request(cmd, sccb);
+	rc = sclp_sync_request(cmd, sccb);
 	if (rc)
 		goto out;
 	switch (sccb->header.response_code) {
@@ -771,7 +771,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
 	if (!sccb)
 		return -ENOMEM;
 	sccb->header.length = sizeof(*sccb);
-	rc = do_sync_request(cmd, sccb);
+	rc = sclp_sync_request(cmd, sccb);
 	if (rc)
 		goto out;
 	switch (sccb->header.response_code) {
@@ -846,7 +846,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
 	if (!sccb)
 		return -ENOMEM;
 	sccb->header.length = sizeof(*sccb);
-	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
+	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
 	if (rc)
 		goto out;
 	if (sccb->header.response_code != 0x0010) {

+ 30 - 1
drivers/s390/char/sclp_con.c

@@ -129,6 +129,31 @@ sclp_console_timeout(unsigned long data)
 	sclp_conbuf_emit();
 }
 
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_console_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_buffer *buffer;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_con_outqueue.next;
+	if (sclp_con_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_con_outqueue)
+		return 0;
+	list_del(list);
+	buffer = list_entry(list, struct sclp_buffer, list);
+	page = sclp_unmake_buffer(buffer);
+	list_add_tail((struct list_head *) page, &sclp_con_pages);
+	return 1;
+}
+
 /*
  * Writes the given message to S390 system console
  */
@@ -150,9 +175,13 @@ sclp_console_write(struct console *console, const char *message,
 	do {
 		/* make sure we have a console output buffer */
 		if (sclp_conbuf == NULL) {
+			if (list_empty(&sclp_con_pages))
+				sclp_console_full++;
 			while (list_empty(&sclp_con_pages)) {
 				if (sclp_con_suspended)
 					goto out;
+				if (sclp_console_drop_buffer())
+					break;
 				spin_unlock_irqrestore(&sclp_con_lock, flags);
 				sclp_sync_wait();
 				spin_lock_irqsave(&sclp_con_lock, flags);
@@ -297,7 +326,7 @@ sclp_console_init(void)
 		return rc;
 	/* Allocate pages for output buffering */
 	INIT_LIST_HEAD(&sclp_con_pages);
-	for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
+	for (i = 0; i < sclp_console_pages; i++) {
 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 		list_add_tail(page, &sclp_con_pages);
 	}

+ 144 - 0
drivers/s390/char/sclp_ctl.c

@@ -0,0 +1,144 @@
+/*
+ * IOCTL interface for SCLP
+ *
+ * Copyright IBM Corp. 2012
+ *
+ * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <linux/compat.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <asm/compat.h>
+#include <asm/sclp_ctl.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+/*
+ * Supported command words
+ */
+static unsigned int sclp_ctl_sccb_wlist[] = {
+	0x00400002,
+	0x00410002,
+};
+
+/*
+ * Check if command word is supported
+ */
+static int sclp_ctl_cmdw_supported(unsigned int cmdw)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
+		if (cmdw == sclp_ctl_sccb_wlist[i])
+			return 1;
+	}
+	return 0;
+}
+
+static void __user *u64_to_uptr(u64 value)
+{
+	if (is_compat_task())
+		return compat_ptr(value);
+	else
+		return (void __user *)(unsigned long)value;
+}
+
+/*
+ * Start SCLP request
+ */
+static int sclp_ctl_ioctl_sccb(void __user *user_area)
+{
+	struct sclp_ctl_sccb ctl_sccb;
+	struct sccb_header *sccb;
+	int rc;
+
+	if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
+		return -EFAULT;
+	if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
+		return -EOPNOTSUPP;
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+	if (sccb->length > PAGE_SIZE || sccb->length < 8)
+		return -EINVAL;
+	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+	rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
+	if (rc)
+		goto out_free;
+	if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
+		rc = -EFAULT;
+out_free:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+/*
+ * SCLP SCCB ioctl function
+ */
+static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
+			   unsigned long arg)
+{
+	void __user *argp;
+
+	if (is_compat_task())
+		argp = compat_ptr(arg);
+	else
+		argp = (void __user *) arg;
+	switch (cmd) {
+	case SCLP_CTL_SCCB:
+		return sclp_ctl_ioctl_sccb(argp);
+	default: /* unknown ioctl number */
+		return -ENOTTY;
+	}
+}
+
+/*
+ * File operations
+ */
+static const struct file_operations sclp_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = nonseekable_open,
+	.unlocked_ioctl = sclp_ctl_ioctl,
+	.compat_ioctl = sclp_ctl_ioctl,
+	.llseek = no_llseek,
+};
+
+/*
+ * Misc device definition
+ */
+static struct miscdevice sclp_ctl_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "sclp",
+	.fops = &sclp_ctl_fops,
+};
+
+/*
+ * Register sclp_ctl misc device
+ */
+static int __init sclp_ctl_init(void)
+{
+	return misc_register(&sclp_ctl_device);
+}
+module_init(sclp_ctl_init);
+
+/*
+ * Deregister sclp_ctl misc device
+ */
+static void __exit sclp_ctl_exit(void)
+{
+	misc_deregister(&sclp_ctl_device);
+}
+module_exit(sclp_ctl_exit);

+ 34 - 5
drivers/s390/char/sclp_vt220.c

@@ -362,6 +362,31 @@ sclp_vt220_timeout(unsigned long data)
 
 #define BUFFER_MAX_DELAY	HZ/20
 
+/*
+ * Drop oldest console buffer if sclp_con_drop is set
+ */
+static int
+sclp_vt220_drop_buffer(void)
+{
+	struct list_head *list;
+	struct sclp_vt220_request *request;
+	void *page;
+
+	if (!sclp_console_drop)
+		return 0;
+	list = sclp_vt220_outqueue.next;
+	if (sclp_vt220_queue_running)
+		/* The first element is in I/O */
+		list = list->next;
+	if (list == &sclp_vt220_outqueue)
+		return 0;
+	list_del(list);
+	request = list_entry(list, struct sclp_vt220_request, list);
+	page = request->sclp_req.sccb;
+	list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+	return 1;
+}
+
 /* 
  * Internal implementation of the write function. Write COUNT bytes of data
  * from memory at BUF
@@ -390,12 +415,16 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
 	do {
 		/* Create an sclp output buffer if none exists yet */
 		if (sclp_vt220_current_request == NULL) {
+			if (list_empty(&sclp_vt220_empty))
+				sclp_console_full++;
 			while (list_empty(&sclp_vt220_empty)) {
-				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 				if (may_fail || sclp_vt220_suspended)
 					goto out;
-				else
-					sclp_sync_wait();
+				if (sclp_vt220_drop_buffer())
+					break;
+				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
+
+				sclp_sync_wait();
 				spin_lock_irqsave(&sclp_vt220_lock, flags);
 			}
 			page = (void *) sclp_vt220_empty.next;
@@ -428,8 +457,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
 		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
 		add_timer(&sclp_vt220_timer);
 	}
-	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 out:
+	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
 	return overall_written;
 }
 
@@ -803,7 +832,7 @@ sclp_vt220_con_init(void)
 
 	if (!CONSOLE_IS_SCLP)
 		return 0;
-	rc = __sclp_vt220_init(MAX_CONSOLE_PAGES);
+	rc = __sclp_vt220_init(sclp_console_pages);
 	if (rc)
 		return rc;
 	/* Attach linux console */

+ 1 - 1
drivers/s390/char/tape_class.c

@@ -77,7 +77,7 @@ struct tape_class_device *register_tape_dev(
 	tcd->class_device = device_create(tape_class, device,
 					  tcd->char_device->dev, NULL,
 					  "%s", tcd->device_name);
-	rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
+	rc = PTR_RET(tcd->class_device);
 	if (rc)
 		goto fail_with_cdev;
 	rc = sysfs_create_link(

+ 3 - 2
drivers/s390/char/vmwatchdog.c

@@ -112,7 +112,8 @@ static int vmwdt_keepalive(void)
 
 static int vmwdt_disable(void)
 {
-	int ret = __diag288(wdt_cancel, 0, "", 0);
+	char cmd[] = {'\0'};
+	int ret = __diag288(wdt_cancel, 0, cmd, 0);
 	WARN_ON(ret != 0);
 	clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
 	return ret;
@@ -124,7 +125,7 @@ static int __init vmwdt_probe(void)
 	 * so we try initializing it with a NOP command ("BEGIN")
 	 * that won't cause any harm even if the following disable
 	 * fails for some reason */
-	static char __initdata ebc_begin[] = {
+	char ebc_begin[] = {
 		194, 197, 199, 201, 213
 	};
 	if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)

+ 56 - 111
drivers/s390/cio/airq.c

@@ -9,142 +9,87 @@
  */
 
 #include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
 #include <linux/slab.h>
-#include <linux/rcupdate.h>
 
 #include <asm/airq.h>
 #include <asm/isc.h>
 
 #include "cio.h"
 #include "cio_debug.h"
+#include "ioasm.h"
 
-#define NR_AIRQS		32
-#define NR_AIRQS_PER_WORD	sizeof(unsigned long)
-#define NR_AIRQ_WORDS		(NR_AIRQS / NR_AIRQS_PER_WORD)
-
-union indicator_t {
-	unsigned long word[NR_AIRQ_WORDS];
-	unsigned char byte[NR_AIRQS];
-} __attribute__((packed));
-
-struct airq_t {
-	adapter_int_handler_t handler;
-	void *drv_data;
-};
-
-static union indicator_t indicators[MAX_ISC+1];
-static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS];
-
-static int register_airq(struct airq_t *airq, u8 isc)
-{
-	int i;
-
-	for (i = 0; i < NR_AIRQS; i++)
-		if (!cmpxchg(&airqs[isc][i], NULL, airq))
-			return i;
-	return -ENOMEM;
-}
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
 
 /**
- * s390_register_adapter_interrupt() - register adapter interrupt handler
- * @handler: adapter handler to be registered
- * @drv_data: driver data passed with each call to the handler
- * @isc: isc for which the handler should be called
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
  *
- * Returns:
- *  Pointer to the indicator to be used on success
- *  ERR_PTR() if registration failed
+ * Returns 0 on success, or -EINVAL.
  */
-void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
-				      void *drv_data, u8 isc)
+int register_adapter_interrupt(struct airq_struct *airq)
 {
-	struct airq_t *airq;
-	char dbf_txt[16];
-	int ret;
-
-	if (isc > MAX_ISC)
-		return ERR_PTR(-EINVAL);
-	airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
-	if (!airq) {
-		ret = -ENOMEM;
-		goto out;
+	char dbf_txt[32];
+
+	if (!airq->handler || airq->isc > MAX_ISC)
+		return -EINVAL;
+	if (!airq->lsi_ptr) {
+		airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+		if (!airq->lsi_ptr)
+			return -ENOMEM;
+		airq->flags |= AIRQ_PTR_ALLOCATED;
 	}
-	airq->handler = handler;
-	airq->drv_data = drv_data;
-
-	ret = register_airq(airq, isc);
-out:
-	snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
+	if (!airq->lsi_mask)
+		airq->lsi_mask = 0xff;
+	snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
 	CIO_TRACE_EVENT(4, dbf_txt);
-	if (ret < 0) {
-		kfree(airq);
-		return ERR_PTR(ret);
-	} else
-		return &indicators[isc].byte[ret];
+	isc_register(airq->isc);
+	spin_lock(&airq_lists_lock);
+	hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+	spin_unlock(&airq_lists_lock);
+	return 0;
 }
-EXPORT_SYMBOL(s390_register_adapter_interrupt);
+EXPORT_SYMBOL(register_adapter_interrupt);
 
 /**
- * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
- * @ind: indicator for which the handler is to be unregistered
- * @isc: interruption subclass
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
  */
-void s390_unregister_adapter_interrupt(void *ind, u8 isc)
+void unregister_adapter_interrupt(struct airq_struct *airq)
 {
-	struct airq_t *airq;
-	char dbf_txt[16];
-	int i;
+	char dbf_txt[32];
 
-	i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
-	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
+	if (hlist_unhashed(&airq->list))
+		return;
+	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
 	CIO_TRACE_EVENT(4, dbf_txt);
-	indicators[isc].byte[i] = 0;
-	airq = xchg(&airqs[isc][i], NULL);
-	/*
-	 * Allow interrupts to complete. This will ensure that the airq handle
-	 * is no longer referenced by any interrupt handler.
-	 */
-	synchronize_sched();
-	kfree(airq);
+	spin_lock(&airq_lists_lock);
+	hlist_del_rcu(&airq->list);
+	spin_unlock(&airq_lists_lock);
+	synchronize_rcu();
+	isc_unregister(airq->isc);
+	if (airq->flags & AIRQ_PTR_ALLOCATED) {
+		kfree(airq->lsi_ptr);
+		airq->lsi_ptr = NULL;
+		airq->flags &= ~AIRQ_PTR_ALLOCATED;
+	}
 }
-EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
-
-#define INDICATOR_MASK	(0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
+EXPORT_SYMBOL(unregister_adapter_interrupt);
 
 void do_adapter_IO(u8 isc)
 {
-	int w;
-	int i;
-	unsigned long word;
-	struct airq_t *airq;
-
-	/*
-	 * Access indicator array in word-sized chunks to minimize storage
-	 * fetch operations.
-	 */
-	for (w = 0; w < NR_AIRQ_WORDS; w++) {
-		word = indicators[isc].word[w];
-		i = w * NR_AIRQS_PER_WORD;
-		/*
-		 * Check bytes within word for active indicators.
-		 */
-		while (word) {
-			if (word & INDICATOR_MASK) {
-				airq = airqs[isc][i];
-				/* Make sure gcc reads from airqs only once. */
-				barrier();
-				if (likely(airq))
-					airq->handler(&indicators[isc].byte[i],
-						      airq->drv_data);
-				else
-					/*
-					 * Reset ill-behaved indicator.
-					 */
-					indicators[isc].byte[i] = 0;
-			}
-			word <<= 8;
-			i++;
-		}
-	}
+	struct airq_struct *airq;
+	struct hlist_head *head;
+
+	head = &airq_lists[isc];
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(airq, head, list)
+		if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+			airq->handler(airq);
+	rcu_read_unlock();
 }

+ 60 - 0
drivers/s390/cio/chsc.c

@@ -20,6 +20,7 @@
 #include <asm/chpid.h>
 #include <asm/chsc.h>
 #include <asm/crw.h>
+#include <asm/isc.h>
 
 #include "css.h"
 #include "cio.h"
@@ -144,6 +145,65 @@ out:
 	return ret;
 }
 
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+	memset(ssqd, 0, sizeof(*ssqd));
+	ssqd->request.length = 0x0010;
+	ssqd->request.code = 0x0024;
+	ssqd->first_sch = schid.sch_no;
+	ssqd->last_sch = schid.sch_no;
+	ssqd->ssid = schid.ssid;
+
+	if (chsc(ssqd))
+		return -EIO;
+
+	return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr)
+{
+	memset(scssc, 0, sizeof(*scssc));
+	scssc->request.length = 0x0fe0;
+	scssc->request.code = 0x0021;
+	scssc->operation_code = 0;
+
+	scssc->summary_indicator_addr = summary_indicator_addr;
+	scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+	scssc->ks = PAGE_DEFAULT_KEY >> 4;
+	scssc->kc = PAGE_DEFAULT_KEY >> 4;
+	scssc->isc = QDIO_AIRQ_ISC;
+	scssc->schid = schid;
+
+	/* enable the time delay disablement facility */
+	if (css_general_characteristics.aif_tdd)
+		scssc->word_with_d_bit = 0x10000000;
+
+	if (chsc(scssc))
+		return -EIO;
+
+	return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
 {
 	spin_lock_irq(sch->lock);

+ 38 - 6
drivers/s390/cio/chsc.h

@@ -7,14 +7,10 @@
 #include <asm/chpid.h>
 #include <asm/chsc.h>
 #include <asm/schid.h>
+#include <asm/qdio.h>
 
 #define CHSC_SDA_OC_MSS   0x2
 
-struct chsc_header {
-	u16 length;
-	u16 code;
-} __attribute__ ((packed));
-
 #define NR_MEASUREMENT_CHARS 5
 struct cmg_chars {
 	u32 values[NR_MEASUREMENT_CHARS];
@@ -77,6 +73,40 @@ struct chsc_ssd_info {
 	u16 fla[8];
 };
 
+struct chsc_ssqd_area {
+	struct chsc_header request;
+	u16:10;
+	u8 ssid:2;
+	u8 fmt:4;
+	u16 first_sch;
+	u16:16;
+	u16 last_sch;
+	u32:32;
+	struct chsc_header response;
+	u32:32;
+	struct qdio_ssqd_desc qdio_ssqd;
+} __packed;
+
+struct chsc_scssc_area {
+	struct chsc_header request;
+	u16 operation_code;
+	u16:16;
+	u32:32;
+	u32:32;
+	u64 summary_indicator_addr;
+	u64 subchannel_indicator_addr;
+	u32 ks:4;
+	u32 kc:4;
+	u32:21;
+	u32 isc:3;
+	u32 word_with_d_bit;
+	u32:32;
+	struct subchannel_id schid;
+	u32 reserved[1004];
+	struct chsc_header response;
+	u32:32;
+} __packed;
+
 struct chsc_scpd {
 	struct chsc_header request;
 	u32:2;
@@ -116,7 +146,9 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
 void chsc_chp_online(struct chp_id chpid);
 void chsc_chp_offline(struct chp_id chpid);
 int chsc_get_channel_measurement_chars(struct channel_path *chp);
-
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+	      u64 summary_indicator_addr, u64 subchannel_indicator_addr);
 int chsc_error_from_response(int response);
 
 int chsc_siosl(struct subchannel_id schid);

+ 150 - 5
drivers/s390/cio/chsc_sch.c

@@ -29,6 +29,10 @@
 static debug_info_t *chsc_debug_msg_id;
 static debug_info_t *chsc_debug_log_id;
 
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
 #define CHSC_MSG(imp, args...) do {					\
 		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
 	} while (0)
@@ -258,7 +262,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
 		CHSC_LOG(2, "schid");
 		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
 		cc = chsc(chsc_area);
-		sprintf(dbf, "cc:%d", cc);
+		snprintf(dbf, sizeof(dbf), "cc:%d", cc);
 		CHSC_LOG(2, dbf);
 		switch (cc) {
 		case 0:
@@ -287,11 +291,11 @@ static int chsc_async(struct chsc_async_area *chsc_area,
 	return ret;
 }
 
-static void chsc_log_command(struct chsc_async_area *chsc_area)
+static void chsc_log_command(void *chsc_area)
 {
 	char dbf[10];
 
-	sprintf(dbf, "CHSC:%x", chsc_area->header.code);
+	snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
 	CHSC_LOG(0, dbf);
 	CHSC_LOG_HEX(0, chsc_area, 32);
 }
@@ -355,13 +359,106 @@ static int chsc_ioctl_start(void __user *user_area)
 		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
 			ret = -EFAULT;
 out_free:
-	sprintf(dbf, "ret:%d", ret);
+	snprintf(dbf, sizeof(dbf), "ret:%d", ret);
 	CHSC_LOG(0, dbf);
 	kfree(request);
 	free_page((unsigned long)chsc_area);
 	return ret;
 }
 
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (on_close_chsc_area) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+	if (!on_close_request) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+	on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+	if (!on_close_chsc_area) {
+		ret = -ENOMEM;
+		goto out_free_request;
+	}
+	if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free_chsc;
+	}
+	ret = 0;
+	goto out_unlock;
+
+out_free_chsc:
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+out_free_request:
+	kfree(on_close_request);
+	on_close_request = NULL;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+	CHSC_LOG(0, dbf);
+	return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (!on_close_chsc_area) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+	kfree(on_close_request);
+	on_close_request = NULL;
+	ret = 0;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+	CHSC_LOG(0, dbf);
+	return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+	struct chsc_sync_area *chsc_area;
+	int ret, ccode;
+
+	chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!chsc_area)
+		return -ENOMEM;
+	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+		ret = -EFAULT;
+		goto out_free;
+	}
+	if (chsc_area->header.code & 0x4000) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+	chsc_log_command(chsc_area);
+	ccode = chsc(chsc_area);
+	if (ccode != 0) {
+		ret = -EIO;
+		goto out_free;
+	}
+	if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+		ret = -EFAULT;
+	else
+		ret = 0;
+out_free:
+	free_page((unsigned long)chsc_area);
+	return ret;
+}
+
 static int chsc_ioctl_info_channel_path(void __user *user_cd)
 {
 	struct chsc_chp_cd *cd;
@@ -795,6 +892,8 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
 	switch (cmd) {
 	case CHSC_START:
 		return chsc_ioctl_start(argp);
+	case CHSC_START_SYNC:
+		return chsc_ioctl_start_sync(argp);
 	case CHSC_INFO_CHANNEL_PATH:
 		return chsc_ioctl_info_channel_path(argp);
 	case CHSC_INFO_CU:
@@ -809,14 +908,60 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
 		return chsc_ioctl_chpd(argp);
 	case CHSC_INFO_DCAL:
 		return chsc_ioctl_dcal(argp);
+	case CHSC_ON_CLOSE_SET:
+		return chsc_ioctl_on_close_set(argp);
+	case CHSC_ON_CLOSE_REMOVE:
+		return chsc_ioctl_on_close_remove();
 	default: /* unknown ioctl number */
 		return -ENOIOCTLCMD;
 	}
 }
 
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+	if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+		atomic_inc(&chsc_ready_for_use);
+		return -EBUSY;
+	}
+	return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+	char dbf[13];
+	int ret;
+
+	mutex_lock(&on_close_mutex);
+	if (!on_close_chsc_area)
+		goto out_unlock;
+	init_completion(&on_close_request->completion);
+	CHSC_LOG(0, "on_close");
+	chsc_log_command(on_close_chsc_area);
+	spin_lock_irq(&chsc_lock);
+	ret = chsc_async(on_close_chsc_area, on_close_request);
+	spin_unlock_irq(&chsc_lock);
+	if (ret == -EINPROGRESS) {
+		wait_for_completion(&on_close_request->completion);
+		ret = chsc_examine_irb(on_close_request);
+	}
+	snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+	CHSC_LOG(0, dbf);
+	free_page((unsigned long)on_close_chsc_area);
+	on_close_chsc_area = NULL;
+	kfree(on_close_request);
+	on_close_request = NULL;
+out_unlock:
+	mutex_unlock(&on_close_mutex);
+	atomic_inc(&chsc_ready_for_use);
+	return 0;
+}
+
 static const struct file_operations chsc_fops = {
 	.owner = THIS_MODULE,
-	.open = nonseekable_open,
+	.open = chsc_open,
+	.release = chsc_release,
 	.unlocked_ioctl = chsc_ioctl,
 	.compat_ioctl = chsc_ioctl,
 	.llseek = no_llseek,

+ 28 - 40
drivers/s390/cio/cio.c

@@ -568,7 +568,7 @@ out:
  */
 void __irq_entry do_IRQ(struct pt_regs *regs)
 {
-	struct tpi_info *tpi_info;
+	struct tpi_info *tpi_info = (struct tpi_info *) &regs->int_code;
 	struct subchannel *sch;
 	struct irb *irb;
 	struct pt_regs *old_regs;
@@ -579,46 +579,34 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
 	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 		/* Serve timer interrupts first. */
 		clock_comparator_work();
-	/*
-	 * Get interrupt information from lowcore
-	 */
-	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
-	irb = (struct irb *)&S390_lowcore.irb;
-	do {
-		kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
-		if (tpi_info->adapter_IO) {
-			do_adapter_IO(tpi_info->isc);
-			continue;
-		}
-		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
-		if (!sch) {
-			/* Clear pending interrupt condition. */
-			inc_irq_stat(IRQIO_CIO);
-			tsch(tpi_info->schid, irb);
-			continue;
-		}
-		spin_lock(sch->lock);
-		/* Store interrupt response block to lowcore. */
-		if (tsch(tpi_info->schid, irb) == 0) {
-			/* Keep subchannel information word up to date. */
-			memcpy (&sch->schib.scsw, &irb->scsw,
-				sizeof (irb->scsw));
-			/* Call interrupt handler if there is one. */
-			if (sch->driver && sch->driver->irq)
-				sch->driver->irq(sch);
-			else
-				inc_irq_stat(IRQIO_CIO);
-		} else
+
+	kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
+	irb = (struct irb *) &S390_lowcore.irb;
+	if (tpi_info->adapter_IO) {
+		do_adapter_IO(tpi_info->isc);
+		goto out;
+	}
+	sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+	if (!sch) {
+		/* Clear pending interrupt condition. */
+		inc_irq_stat(IRQIO_CIO);
+		tsch(tpi_info->schid, irb);
+		goto out;
+	}
+	spin_lock(sch->lock);
+	/* Store interrupt response block to lowcore. */
+	if (tsch(tpi_info->schid, irb) == 0) {
+		/* Keep subchannel information word up to date. */
+		memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+		/* Call interrupt handler if there is one. */
+		if (sch->driver && sch->driver->irq)
+			sch->driver->irq(sch);
+		else
 			inc_irq_stat(IRQIO_CIO);
-		spin_unlock(sch->lock);
-		/*
-		 * Are more interrupts pending?
-		 * If so, the tpi instruction will update the lowcore
-		 * to hold the info for the next interrupt.
-		 * We don't do this for VM because a tpi drops the cpu
-		 * out of the sie which costs more cycles than it saves.
-		 */
-	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
+	} else
+		inc_irq_stat(IRQIO_CIO);
+	spin_unlock(sch->lock);
+out:
 	irq_exit();
 	set_irq_regs(old_regs);
 }

+ 0 - 34
drivers/s390/cio/qdio.h

@@ -140,40 +140,6 @@ struct siga_flag {
 	u8:3;
 } __attribute__ ((packed));
 
-struct chsc_ssqd_area {
-	struct chsc_header request;
-	u16:10;
-	u8 ssid:2;
-	u8 fmt:4;
-	u16 first_sch;
-	u16:16;
-	u16 last_sch;
-	u32:32;
-	struct chsc_header response;
-	u32:32;
-	struct qdio_ssqd_desc qdio_ssqd;
-} __attribute__ ((packed));
-
-struct scssc_area {
-	struct chsc_header request;
-	u16 operation_code;
-	u16:16;
-	u32:32;
-	u32:32;
-	u64 summary_indicator_addr;
-	u64 subchannel_indicator_addr;
-	u32 ks:4;
-	u32 kc:4;
-	u32:21;
-	u32 isc:3;
-	u32 word_with_d_bit;
-	u32:32;
-	struct subchannel_id schid;
-	u32 reserved[1004];
-	struct chsc_header response;
-	u32:32;
-} __attribute__ ((packed));
-
 struct qdio_dev_perf_stat {
 	unsigned int adapter_int;
 	unsigned int qdio_int;

+ 0 - 44
drivers/s390/cio/qdio_main.c

@@ -608,50 +608,6 @@ static inline int contains_aobs(struct qdio_q *q)
 	return !q->is_input_q && q->u.out.use_cq;
 }
 
-static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
-				int i, struct qaob *aob)
-{
-	int tmp;
-
-	DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
-			(unsigned long) virt_to_phys(aob));
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
-			(unsigned long) aob->res0[0]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
-			(unsigned long) aob->res0[1]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
-			(unsigned long) aob->res0[2]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
-			(unsigned long) aob->res0[3]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
-			(unsigned long) aob->res0[4]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
-			(unsigned long) aob->res0[5]);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
-	DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
-	DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
-	DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
-	DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
-	DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
-	for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
-		DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
-				(unsigned long) aob->sba[tmp]);
-		DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
-				(unsigned long) q->sbal[i]->element[tmp].addr);
-		DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
-		DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
-				q->sbal[i]->element[tmp].length);
-	}
-	DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
-	for (tmp = 0; tmp < 2; ++tmp) {
-		DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
-			(unsigned long) aob->res4[tmp]);
-	}
-	DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
-	DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
-}
-
 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 {
 	unsigned char state = 0;

+ 19 - 28
drivers/s390/cio/qdio_setup.c

@@ -254,40 +254,31 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
 	int rc;
 
 	DBF_EVENT("getssqd:%4x", schid->sch_no);
-	if (irq_ptr != NULL)
-		ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
-	else
+	if (!irq_ptr) {
 		ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
-	memset(ssqd, 0, PAGE_SIZE);
-
-	ssqd->request = (struct chsc_header) {
-		.length = 0x0010,
-		.code	= 0x0024,
-	};
-	ssqd->first_sch = schid->sch_no;
-	ssqd->last_sch = schid->sch_no;
-	ssqd->ssid = schid->ssid;
-
-	if (chsc(ssqd))
-		return -EIO;
-	rc = chsc_error_from_response(ssqd->response.code);
+		if (!ssqd)
+			return -ENOMEM;
+	} else {
+		ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+	}
+
+	rc = chsc_ssqd(*schid, ssqd);
 	if (rc)
-		return rc;
+		goto out;
 
 	if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
 	    !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
 	    (ssqd->qdio_ssqd.sch != schid->sch_no))
-		return -EINVAL;
-
-	if (irq_ptr != NULL)
-		memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
-		       sizeof(struct qdio_ssqd_desc));
-	else {
-		memcpy(data, &ssqd->qdio_ssqd,
-		       sizeof(struct qdio_ssqd_desc));
+		rc = -EINVAL;
+
+	if (!rc)
+		memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+	if (!irq_ptr)
 		free_page((unsigned long)ssqd);
-	}
-	return 0;
+
+	return rc;
 }
 
 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
@@ -295,7 +286,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
 	unsigned char qdioac;
 	int rc;
 
-	rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL);
+	rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
 	if (rc) {
 		DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
 		DBF_ERROR("rc:%x", rc);

+ 29 - 50
drivers/s390/cio/qdio_thinint.c

@@ -36,8 +36,13 @@ struct indicator_t {
 static LIST_HEAD(tiq_list);
 static DEFINE_MUTEX(tiq_list_lock);
 
-/* adapter local summary indicator */
-static u8 *tiqdio_alsi;
+/* Adapter interrupt definitions */
+static void tiqdio_thinint_handler(struct airq_struct *airq);
+
+static struct airq_struct tiqdio_airq = {
+	.handler = tiqdio_thinint_handler,
+	.isc = QDIO_AIRQ_ISC,
+};
 
 static struct indicator_t *q_indicators;
 
@@ -176,7 +181,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
  * @alsi: pointer to adapter local summary indicator
  * @data: NULL
  */
-static void tiqdio_thinint_handler(void *alsi, void *data)
+static void tiqdio_thinint_handler(struct airq_struct *airq)
 {
 	u32 si_used = clear_shared_ind();
 	struct qdio_q *q;
@@ -208,51 +213,31 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
 
 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
 {
-	struct scssc_area *scssc_area;
+	struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+	u64 summary_indicator_addr, subchannel_indicator_addr;
 	int rc;
 
-	scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
-	memset(scssc_area, 0, PAGE_SIZE);
-
 	if (reset) {
-		scssc_area->summary_indicator_addr = 0;
-		scssc_area->subchannel_indicator_addr = 0;
+		summary_indicator_addr = 0;
+		subchannel_indicator_addr = 0;
 	} else {
-		scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
-		scssc_area->subchannel_indicator_addr =
-			virt_to_phys(irq_ptr->dsci);
+		summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+		subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
 	}
 
-	scssc_area->request = (struct chsc_header) {
-		.length = 0x0fe0,
-		.code	= 0x0021,
-	};
-	scssc_area->operation_code = 0;
-	scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
-	scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
-	scssc_area->isc = QDIO_AIRQ_ISC;
-	scssc_area->schid = irq_ptr->schid;
-
-	/* enable the time delay disablement facility */
-	if (css_general_characteristics.aif_tdd)
-		scssc_area->word_with_d_bit = 0x10000000;
-
-	rc = chsc(scssc_area);
-	if (rc)
-		return -EIO;
-
-	rc = chsc_error_from_response(scssc_area->response.code);
+	rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+		       subchannel_indicator_addr);
 	if (rc) {
 		DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
-			  scssc_area->response.code);
-		DBF_ERROR_HEX(&scssc_area->response, sizeof(void *));
-		return rc;
+			  scssc->response.code);
+		goto out;
 	}
 
 	DBF_EVENT("setscind");
-	DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long));
-	DBF_HEX(&scssc_area->subchannel_indicator_addr,	sizeof(unsigned long));
-	return 0;
+	DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+	DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+	return rc;
 }
 
 /* allocate non-shared indicators and shared indicator */
@@ -272,14 +257,12 @@ void tiqdio_free_memory(void)
 
 int __init tiqdio_register_thinints(void)
 {
-	isc_register(QDIO_AIRQ_ISC);
-	tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
-						      NULL, QDIO_AIRQ_ISC);
-	if (IS_ERR(tiqdio_alsi)) {
-		DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi));
-		tiqdio_alsi = NULL;
-		isc_unregister(QDIO_AIRQ_ISC);
-		return -ENOMEM;
+	int rc;
+
+	rc = register_adapter_interrupt(&tiqdio_airq);
+	if (rc) {
+		DBF_EVENT("RTI:%x", rc);
+		return rc;
 	}
 	return 0;
 }
@@ -312,9 +295,5 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
 void __exit tiqdio_unregister_thinints(void)
 {
 	WARN_ON(!list_empty(&tiq_list));
-
-	if (tiqdio_alsi) {
-		s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
-		isc_unregister(QDIO_AIRQ_ISC);
-	}
+	unregister_adapter_interrupt(&tiqdio_airq);
 }

+ 37 - 27
drivers/s390/crypto/ap_bus.c

@@ -58,7 +58,7 @@ static inline void ap_schedule_poll_timer(void);
 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
 static int ap_device_remove(struct device *dev);
 static int ap_device_probe(struct device *dev);
-static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_interrupt_handler(struct airq_struct *airq);
 static void ap_reset(struct ap_device *ap_dev);
 static void ap_config_timeout(unsigned long ptr);
 static int ap_select_domain(void);
@@ -106,7 +106,6 @@ static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 static struct task_struct *ap_poll_kthread = NULL;
 static DEFINE_MUTEX(ap_poll_thread_mutex);
 static DEFINE_SPINLOCK(ap_poll_timer_lock);
-static void *ap_interrupt_indicator;
 static struct hrtimer ap_poll_timer;
 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
  * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
@@ -120,13 +119,21 @@ static int ap_suspend_flag;
 static int user_set_domain = 0;
 static struct bus_type ap_bus_type;
 
+/* Adapter interrupt definitions */
+static int ap_airq_flag;
+
+static struct airq_struct ap_airq = {
+	.handler = ap_interrupt_handler,
+	.isc = AP_ISC,
+};
+
 /**
  * ap_using_interrupts() - Returns non-zero if interrupt support is
  * available.
  */
 static inline int ap_using_interrupts(void)
 {
-	return ap_interrupt_indicator != NULL;
+	return ap_airq_flag;
 }
 
 /**
@@ -588,7 +595,7 @@ static int ap_init_queue(ap_qid_t qid)
 		}
 	}
 	if (rc == 0 && ap_using_interrupts()) {
-		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
+		rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr);
 		/* If interruption mode is supported by the machine,
 		* but an AP can not be enabled for interruption then
 		* the AP will be discarded.    */
@@ -821,13 +828,22 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
 
 static int ap_bus_resume(struct device *dev)
 {
-	int rc = 0;
 	struct ap_device *ap_dev = to_ap_dev(dev);
+	int rc;
 
 	if (ap_suspend_flag) {
 		ap_suspend_flag = 0;
-		if (!ap_interrupts_available())
-			ap_interrupt_indicator = NULL;
+		if (ap_interrupts_available()) {
+			if (!ap_using_interrupts()) {
+				rc = register_adapter_interrupt(&ap_airq);
+				ap_airq_flag = (rc == 0);
+			}
+		} else {
+			if (ap_using_interrupts()) {
+				unregister_adapter_interrupt(&ap_airq);
+				ap_airq_flag = 0;
+			}
+		}
 		ap_query_configuration();
 		if (!user_set_domain) {
 			ap_domain_index = -1;
@@ -848,7 +864,10 @@ static int ap_bus_resume(struct device *dev)
 			tasklet_schedule(&ap_tasklet);
 		if (ap_thread_flag)
 			rc = ap_poll_thread_start();
-	}
+		else
+			rc = 0;
+	} else
+		rc = 0;
 	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
 		spin_lock_bh(&ap_dev->lock);
 		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
@@ -1266,7 +1285,7 @@ out:
 	return rc;
 }
 
-static void ap_interrupt_handler(void *unused1, void *unused2)
+static void ap_interrupt_handler(struct airq_struct *airq)
 {
 	inc_irq_stat(IRQIO_APB);
 	tasklet_schedule(&ap_tasklet);
@@ -1722,7 +1741,7 @@ static void ap_poll_all(unsigned long dummy)
 	 * important that no requests on any AP get lost.
 	 */
 	if (ap_using_interrupts())
-		xchg((u8 *)ap_interrupt_indicator, 0);
+		xchg(ap_airq.lsi_ptr, 0);
 	do {
 		flags = 0;
 		spin_lock(&ap_device_list_lock);
@@ -1795,7 +1814,7 @@ static int ap_poll_thread_start(void)
 	mutex_lock(&ap_poll_thread_mutex);
 	if (!ap_poll_kthread) {
 		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
-		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
+		rc = PTR_RET(ap_poll_kthread);
 		if (rc)
 			ap_poll_kthread = NULL;
 	}
@@ -1881,13 +1900,8 @@ int __init ap_module_init(void)
 		return -ENODEV;
 	}
 	if (ap_interrupts_available()) {
-		isc_register(AP_ISC);
-		ap_interrupt_indicator = s390_register_adapter_interrupt(
-			&ap_interrupt_handler, NULL, AP_ISC);
-		if (IS_ERR(ap_interrupt_indicator)) {
-			ap_interrupt_indicator = NULL;
-			isc_unregister(AP_ISC);
-		}
+		rc = register_adapter_interrupt(&ap_airq);
+		ap_airq_flag = (rc == 0);
 	}
 
 	register_reset_call(&ap_reset_call);
@@ -1904,7 +1918,7 @@ int __init ap_module_init(void)
 
 	/* Create /sys/devices/ap. */
 	ap_root_device = root_device_register("ap");
-	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
+	rc = PTR_RET(ap_root_device);
 	if (rc)
 		goto out_bus;
 
@@ -1955,10 +1969,8 @@ out_bus:
 	bus_unregister(&ap_bus_type);
 out:
 	unregister_reset_call(&ap_reset_call);
-	if (ap_using_interrupts()) {
-		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
-		isc_unregister(AP_ISC);
-	}
+	if (ap_using_interrupts())
+		unregister_adapter_interrupt(&ap_airq);
 	return rc;
 }
 
@@ -1994,10 +2006,8 @@ void ap_module_exit(void)
 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
 	bus_unregister(&ap_bus_type);
 	unregister_reset_call(&ap_reset_call);
-	if (ap_using_interrupts()) {
-		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
-		isc_unregister(AP_ISC);
-	}
+	if (ap_using_interrupts())
+		unregister_adapter_interrupt(&ap_airq);
 }
 
 module_init(ap_module_init);

+ 1 - 1
drivers/s390/net/claw.c

@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
 	}
 	CLAW_DBF_TEXT(2, setup, "init_mod");
 	claw_root_dev = root_device_register("claw");
-	ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
+	ret = PTR_RET(claw_root_dev);
 	if (ret)
 		goto register_err;
 	ret = ccw_driver_register(&claw_ccw_driver);

+ 1 - 1
drivers/s390/net/ctcm_main.c

@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
 	if (ret)
 		goto out_err;
 	ctcm_root_dev = root_device_register("ctcm");
-	ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
+	ret = PTR_RET(ctcm_root_dev);
 	if (ret)
 		goto register_err;
 	ret = ccw_driver_register(&ctcm_ccw_driver);

+ 1 - 1
drivers/s390/net/lcs.c

@@ -2441,7 +2441,7 @@ __init lcs_init_module(void)
 	if (rc)
 		goto out_err;
 	lcs_root_dev = root_device_register("lcs");
-	rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
+	rc = PTR_RET(lcs_root_dev);
 	if (rc)
 		goto register_err;
 	rc = ccw_driver_register(&lcs_ccw_driver);

+ 1 - 1
drivers/s390/net/qeth_core_main.c

@@ -5705,7 +5705,7 @@ static int __init qeth_core_init(void)
 	if (rc)
 		goto out_err;
 	qeth_core_root_dev = root_device_register("qeth");
-	rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
+	rc = PTR_RET(qeth_core_root_dev);
 	if (rc)
 		goto register_err;
 	qeth_core_header_cache = kmem_cache_create("qeth_hdr",

+ 1 - 0
include/linux/pci.h

@@ -1643,6 +1643,7 @@ void pcibios_set_master(struct pci_dev *dev);
 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
 				 enum pcie_reset_state state);
 int pcibios_add_device(struct pci_dev *dev);
+void pcibios_release_device(struct pci_dev *dev);
 
 #ifdef CONFIG_PCI_MMCONFIG
 void __init pci_mmcfg_early_init(void);