Sfoglia il codice sorgente

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

David S. Miller 13 anni fa
parent
commit
c90a9bb907
100 ha cambiato i file con 1262 aggiunte e 719 eliminazioni
  1. 0 21
      Documentation/ABI/testing/sysfs-block-rssd
  2. 46 85
      Documentation/device-mapper/verity.txt
  3. 50 0
      Documentation/prctl/no_new_privs.txt
  4. 6 0
      Documentation/stable_kernel_rules.txt
  5. 2 2
      MAINTAINERS
  6. 1 1
      Makefile
  7. 2 0
      arch/arm/kernel/vmlinux.lds.S
  8. 4 4
      arch/arm/mach-exynos/Kconfig
  9. 4 2
      arch/arm/mach-imx/clk-imx6q.c
  10. 0 5
      arch/arm/mach-omap2/board-flash.c
  11. 4 1
      arch/arm/mach-omap2/clock44xx_data.c
  12. 1 0
      arch/arm/mach-shmobile/board-armadillo800eva.c
  13. 1 0
      arch/arm/mach-shmobile/board-kzm9d.c
  14. 1 0
      arch/arm/mach-shmobile/board-kzm9g.c
  15. 3 0
      arch/arm/mach-shmobile/board-mackerel.c
  16. 4 4
      arch/arm/mach-shmobile/clock-sh73a0.c
  17. 7 0
      arch/arm/mach-shmobile/intc-r8a7779.c
  18. 5 0
      arch/arm/mach-shmobile/platsmp.c
  19. 1 1
      arch/arm/mach-shmobile/setup-sh7372.c
  20. 74 0
      arch/arm/mm/mmu.c
  21. 1 1
      arch/arm/plat-samsung/include/plat/map-s3c.h
  22. 1 1
      arch/arm/plat-samsung/include/plat/watchdog-reset.h
  23. 5 0
      arch/powerpc/include/asm/hw_irq.h
  24. 40 57
      arch/powerpc/kernel/entry_64.S
  25. 1 1
      arch/powerpc/kernel/irq.c
  26. 2 2
      arch/powerpc/kernel/prom_init.c
  27. 1 1
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  28. 1 1
      arch/powerpc/mm/numa.c
  29. 2 0
      arch/powerpc/net/bpf_jit_64.S
  30. 2 2
      arch/powerpc/platforms/pseries/iommu.c
  31. 1 1
      arch/powerpc/platforms/pseries/processor_idle.c
  32. 1 1
      arch/powerpc/xmon/xmon.c
  33. 1 1
      arch/x86/ia32/ia32_signal.c
  34. 1 1
      arch/x86/include/asm/cpufeature.h
  35. 16 11
      arch/x86/kernel/acpi/boot.c
  36. 19 6
      arch/x86/kernel/cpu/mkcapflags.pl
  37. 1 1
      arch/x86/kernel/cpu/scattered.c
  38. 4 4
      arch/x86/kernel/kgdb.c
  39. 8 0
      arch/x86/kernel/reboot.c
  40. 1 1
      arch/x86/lib/csum-wrappers_64.c
  41. 2 7
      block/blk-cgroup.c
  42. 19 6
      block/blk-core.c
  43. 0 41
      block/blk-timeout.c
  44. 18 12
      block/cfq-iosched.c
  45. 4 1
      block/scsi_ioctl.c
  46. 4 3
      drivers/acpi/acpi_pad.c
  47. 15 2
      drivers/acpi/apei/apei-base.c
  48. 9 0
      drivers/acpi/apei/apei-internal.h
  49. 3 3
      drivers/acpi/apei/ghes.c
  50. 30 2
      drivers/acpi/processor_idle.c
  51. 2 2
      drivers/acpi/sysfs.c
  52. 2 0
      drivers/acpi/video.c
  53. 4 2
      drivers/base/power/main.c
  54. 9 2
      drivers/block/drbd/drbd_bitmap.c
  55. 42 24
      drivers/block/drbd/drbd_req.c
  56. 1 0
      drivers/block/floppy.c
  57. 118 48
      drivers/block/mtip32xx/mtip32xx.c
  58. 4 1
      drivers/block/mtip32xx/mtip32xx.h
  59. 40 0
      drivers/block/umem.c
  60. 2 0
      drivers/block/xen-blkback/common.h
  61. 46 12
      drivers/block/xen-blkfront.c
  62. 13 15
      drivers/clk/clk.c
  63. 24 3
      drivers/gpu/drm/drm_edid.c
  64. 30 7
      drivers/gpu/drm/i915/i915_dma.c
  65. 11 2
      drivers/gpu/drm/radeon/radeon_gart.c
  66. 6 4
      drivers/gpu/drm/radeon/radeon_gem.c
  67. 2 2
      drivers/gpu/drm/radeon/si.c
  68. 2 2
      drivers/hwmon/coretemp.c
  69. 7 0
      drivers/md/dm-thin.c
  70. 5 3
      drivers/md/md.c
  71. 2 1
      drivers/md/multipath.c
  72. 31 23
      drivers/md/persistent-data/dm-space-map-checker.c
  73. 10 1
      drivers/md/persistent-data/dm-space-map-disk.c
  74. 9 2
      drivers/md/persistent-data/dm-transaction-manager.c
  75. 5 8
      drivers/md/raid1.c
  76. 16 10
      drivers/md/raid10.c
  77. 47 20
      drivers/md/raid5.c
  78. 1 0
      drivers/net/ethernet/intel/e1000e/defines.h
  79. 14 61
      drivers/net/ethernet/intel/e1000e/netdev.c
  80. 18 11
      drivers/net/ethernet/intel/igbvf/ethtool.c
  81. 1 1
      drivers/oprofile/oprofile_perf.c
  82. 20 15
      fs/splice.c
  83. 0 1
      include/linux/blkdev.h
  84. 0 2
      include/linux/irq.h
  85. 4 4
      include/linux/splice.h
  86. 4 0
      include/net/sctp/structs.h
  87. 2 1
      include/net/sctp/tsnmap.h
  88. 214 85
      kernel/printk.c
  89. 7 7
      kernel/rcutree.c
  90. 3 2
      kernel/relay.c
  91. 4 2
      kernel/trace/trace.c
  92. 2 1
      mm/shmem.c
  93. 1 0
      net/core/skbuff.c
  94. 12 0
      net/netfilter/ipset/ip_set_core.c
  95. 4 28
      net/netfilter/ipset/ip_set_hash_netiface.c
  96. 7 7
      net/netfilter/ipvs/ip_vs_ctl.c
  97. 3 1
      net/netfilter/nfnetlink.c
  98. 1 0
      net/sctp/associola.c
  99. 5 0
      net/sctp/output.c
  100. 16 0
      net/sctp/sm_make_chunk.c

+ 0 - 21
Documentation/ABI/testing/sysfs-block-rssd

@@ -1,26 +1,5 @@
-What:           /sys/block/rssd*/registers
-Date:           March 2012
-KernelVersion:  3.3
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps below driver information and
-                hardware registers.
-                    - S ACTive
-                    - Command Issue
-                    - Completed
-                    - PORT IRQ STAT
-                    - HOST IRQ STAT
-                    - Allocated
-                    - Commands in Q
-
 What:           /sys/block/rssd*/status
 What:           /sys/block/rssd*/status
 Date:           April 2012
 Date:           April 2012
 KernelVersion:  3.4
 KernelVersion:  3.4
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Contact:        Asai Thambi S P <asamymuthupa@micron.com>
 Description:    This is a read-only file. Indicates the status of the device.
 Description:    This is a read-only file. Indicates the status of the device.
-
-What:           /sys/block/rssd*/flags
-Date:           May 2012
-KernelVersion:  3.5
-Contact:        Asai Thambi S P <asamymuthupa@micron.com>
-Description:    This is a read-only file. Dumps the flags in port and driver
-                data structure

+ 46 - 85
Documentation/device-mapper/verity.txt

@@ -7,39 +7,39 @@ This target is read-only.
 
 
 Construction Parameters
 Construction Parameters
 =======================
 =======================
-    <version> <dev> <hash_dev> <hash_start>
+    <version> <dev> <hash_dev>
     <data_block_size> <hash_block_size>
     <data_block_size> <hash_block_size>
     <num_data_blocks> <hash_start_block>
     <num_data_blocks> <hash_start_block>
     <algorithm> <digest> <salt>
     <algorithm> <digest> <salt>
 
 
 <version>
 <version>
-    This is the version number of the on-disk format.
+    This is the type of the on-disk hash format.
 
 
     0 is the original format used in the Chromium OS.
     0 is the original format used in the Chromium OS.
-	The salt is appended when hashing, digests are stored continuously and
-	the rest of the block is padded with zeros.
+      The salt is appended when hashing, digests are stored continuously and
+      the rest of the block is padded with zeros.
 
 
     1 is the current format that should be used for new devices.
     1 is the current format that should be used for new devices.
-	The salt is prepended when hashing and each digest is
-	padded with zeros to the power of two.
+      The salt is prepended when hashing and each digest is
+      padded with zeros to the power of two.
 
 
 <dev>
 <dev>
-    This is the device containing the data the integrity of which needs to be
+    This is the device containing data, the integrity of which needs to be
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     checked.  It may be specified as a path, like /dev/sdaX, or a device number,
     <major>:<minor>.
     <major>:<minor>.
 
 
 <hash_dev>
 <hash_dev>
-    This is the device that that supplies the hash tree data.  It may be
+    This is the device that supplies the hash tree data.  It may be
     specified similarly to the device path and may be the same device.  If the
     specified similarly to the device path and may be the same device.  If the
-    same device is used, the hash_start should be outside of the dm-verity
-    configured device size.
+    same device is used, the hash_start should be outside the configured
+    dm-verity device.
 
 
 <data_block_size>
 <data_block_size>
-    The block size on a data device.  Each block corresponds to one digest on
-    the hash device.
+    The block size on a data device in bytes.
+    Each block corresponds to one digest on the hash device.
 
 
 <hash_block_size>
 <hash_block_size>
-    The size of a hash block.
+    The size of a hash block in bytes.
 
 
 <num_data_blocks>
 <num_data_blocks>
     The number of data blocks on the data device.  Additional blocks are
     The number of data blocks on the data device.  Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
 Theory of operation
 Theory of operation
 ===================
 ===================
 
 
-dm-verity is meant to be setup as part of a verified boot path.  This
+dm-verity is meant to be set up as part of a verified boot path.  This
 may be anything ranging from a boot using tboot or trustedgrub to just
 may be anything ranging from a boot using tboot or trustedgrub to just
 booting from a known-good device (like a USB drive or CD).
 booting from a known-good device (like a USB drive or CD).
 
 
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
 has been authenticated in some way (cryptographic signatures, etc).
 has been authenticated in some way (cryptographic signatures, etc).
 After instantiation, all hashes will be verified on-demand during
 After instantiation, all hashes will be verified on-demand during
 disk access.  If they cannot be verified up to the root node of the
 disk access.  If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail.  This should identify
+tree, the root hash, then the I/O will fail.  This should detect
 tampering with any data on the device and the hash data.
 tampering with any data on the device and the hash data.
 
 
 Cryptographic hashes are used to assert the integrity of the device on a
 Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis.  This allows for a lightweight hash computation on first read
-into the page cache.  Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
 
 
 Hash Tree
 Hash Tree
 ---------
 ---------
 
 
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
 Each node in the tree is a cryptographic hash.  If it is a leaf node, the hash
-is of some block data on disk.  If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
 
 
 Each entry in the tree is a collection of neighboring nodes that fit in one
 Each entry in the tree is a collection of neighboring nodes that fit in one
 block.  The number is determined based on block_size and the size of the
 block.  The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
 On-disk format
 On-disk format
 ==============
 ==============
 
 
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
 
 
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
-	uint8_t signature[8]
-		"verity\0\0";
-
-	uint8_t version;
-		1 - current format
-
-	uint8_t data_block_bits;
-		log2(data block size)
-
-	uint8_t hash_block_bits;
-		log2(hash block size)
-
-	uint8_t pad1[1];
-		zero padding
-
-	uint16_t salt_size;
-		big-endian salt size
-
-	uint8_t pad2[2];
-		zero padding
-
-	uint32_t data_blocks_hi;
-		big-endian high 32 bits of the 64-bit number of data blocks
-
-	uint32_t data_blocks_lo;
-		big-endian low 32 bits of the 64-bit number of data blocks
-
-	uint8_t algorithm[16];
-		cryptographic algorithm
-
-	uint8_t salt[384];
-		salt (the salt size is specified above)
-
-	uint8_t pad3[88];
-		zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
 
 
 Directly following the header (and with sector number padded to the next hash
 Directly following the header (and with sector number padded to the next hash
 block boundary) are the hash blocks which are stored a depth at a time
 block boundary) are the hash blocks which are stored a depth at a time
 (starting from the root), sorted in order of increasing index.
 (starting from the root), sorted in order of increasing index.
 
 
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+  http://code.google.com/p/cryptsetup/wiki/DMVerity
+
 Status
 Status
 ======
 ======
 V (for Valid) is returned if every check performed so far was valid.
 V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
 
 
 Example
 Example
 =======
 =======
-
-Setup a device:
-  dmsetup create vroot --table \
-    "0 2097152 "\
-    "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+  # dmsetup create vroot --readonly --table \
+    "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
     "1234000000000000000000000000000000000000000000000000000000000000"
     "1234000000000000000000000000000000000000000000000000000000000000"
 
 
 A command line tool veritysetup is available to compute or verify
 A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver.  This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
-    git://sources.redhat.com/git/lvm2
-    http://sourceware.org/git/?p=lvm2.git
-    http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
-	4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+  # veritysetup format /dev/sda1 /dev/sda2
+  ...
+  Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+  # veritysetup create vroot /dev/sda1 /dev/sda2 \
+    4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076

+ 50 - 0
Documentation/prctl/no_new_privs.txt

@@ -0,0 +1,50 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   /etc/passwd to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set no_new_privs.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve.  An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has no_new_privs set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set.  In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.

+ 6 - 0
Documentation/stable_kernel_rules.txt

@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
    race can be exploited is also provided.

+ 2 - 2
MAINTAINERS

@@ -4655,8 +4655,8 @@ L:	netfilter@vger.kernel.org
 L:	coreteam@netfilter.org
 L:	coreteam@netfilter.org
 W:	http://www.netfilter.org/
 W:	http://www.netfilter.org/
 W:	http://www.iptables.org/
 W:	http://www.iptables.org/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T:	git git://1984.lsi.us.es/nf
+T:	git git://1984.lsi.us.es/nf-next
 S:	Supported
 S:	Supported
 F:	include/linux/netfilter*
 F:	include/linux/netfilter*
 F:	include/linux/netfilter/
 F:	include/linux/netfilter/

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 VERSION = 3
 PATCHLEVEL = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Saber-toothed Squirrel
 NAME = Saber-toothed Squirrel
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 2 - 0
arch/arm/kernel/vmlinux.lds.S

@@ -183,7 +183,9 @@ SECTIONS
 	}
 	}
 #endif
 #endif
 
 
+#ifdef CONFIG_SMP
 	PERCPU_SECTION(L1_CACHE_BYTES)
 	PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
 
 
 #ifdef CONFIG_XIP_KERNEL
 #ifdef CONFIG_XIP_KERNEL
 	__data_loc = ALIGN(4);		/* location in binary */
 	__data_loc = ALIGN(4);		/* location in binary */

+ 4 - 4
arch/arm/mach-exynos/Kconfig

@@ -212,7 +212,7 @@ config MACH_SMDKV310
 	select EXYNOS_DEV_SYSMMU
 	select EXYNOS_DEV_SYSMMU
 	select EXYNOS4_DEV_AHCI
 	select EXYNOS4_DEV_AHCI
 	select SAMSUNG_DEV_KEYPAD
 	select SAMSUNG_DEV_KEYPAD
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select SAMSUNG_DEV_PWM
 	select SAMSUNG_DEV_PWM
 	select EXYNOS4_DEV_USB_OHCI
 	select EXYNOS4_DEV_USB_OHCI
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_FIMD0
@@ -264,7 +264,7 @@ config MACH_UNIVERSAL_C210
 	select S5P_DEV_ONENAND
 	select S5P_DEV_ONENAND
 	select S5P_DEV_TV
 	select S5P_DEV_TV
 	select EXYNOS_DEV_SYSMMU
 	select EXYNOS_DEV_SYSMMU
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select EXYNOS_DEV_DRM
 	select EXYNOS_DEV_DRM
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_I2C1
 	select EXYNOS4_SETUP_I2C1
@@ -303,7 +303,7 @@ config MACH_NURI
 	select S5P_DEV_MFC
 	select S5P_DEV_MFC
 	select S5P_DEV_USB_EHCI
 	select S5P_DEV_USB_EHCI
 	select S5P_SETUP_MIPIPHY
 	select S5P_SETUP_MIPIPHY
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select EXYNOS_DEV_DRM
 	select EXYNOS_DEV_DRM
 	select EXYNOS4_SETUP_FIMC
 	select EXYNOS4_SETUP_FIMC
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_FIMD0
@@ -341,7 +341,7 @@ config MACH_ORIGEN
 	select SAMSUNG_DEV_PWM
 	select SAMSUNG_DEV_PWM
 	select EXYNOS_DEV_DRM
 	select EXYNOS_DEV_DRM
 	select EXYNOS_DEV_SYSMMU
 	select EXYNOS_DEV_SYSMMU
-	select EXYNOS4_DEV_DMA
+	select EXYNOS_DEV_DMA
 	select EXYNOS4_DEV_USB_OHCI
 	select EXYNOS4_DEV_USB_OHCI
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_FIMD0
 	select EXYNOS4_SETUP_SDHCI
 	select EXYNOS4_SETUP_SDHCI

+ 4 - 2
arch/arm/mach-imx/clk-imx6q.c

@@ -152,13 +152,14 @@ enum mx6q_clks {
 	ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
 	ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
 	usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
 	usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
 	pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
 	pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
-	ssi2_ipg, ssi3_ipg, clk_max
+	ssi2_ipg, ssi3_ipg, rom,
+	clk_max
 };
 };
 
 
 static struct clk *clk[clk_max];
 static struct clk *clk[clk_max];
 
 
 static enum mx6q_clks const clks_init_on[] __initconst = {
 static enum mx6q_clks const clks_init_on[] __initconst = {
-	mmdc_ch0_axi, mmdc_ch1_axi,
+	mmdc_ch0_axi, rom,
 };
 };
 
 
 int __init mx6q_clocks_init(void)
 int __init mx6q_clocks_init(void)
@@ -364,6 +365,7 @@ int __init mx6q_clocks_init(void)
 	clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
 	clk[gpmi_bch]     = imx_clk_gate2("gpmi_bch",      "usdhc4",            base + 0x78, 26);
 	clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
 	clk[gpmi_io]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
 	clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
 	clk[gpmi_apb]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+	clk[rom]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
 	clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
 	clk[sata]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
 	clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
 	clk[sdma]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
 	clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
 	clk[spba]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);

+ 0 - 5
arch/arm/mach-omap2/board-flash.c

@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
 
 
 	gpmc_onenand_init(&board_onenand_data);
 	gpmc_onenand_init(&board_onenand_data);
 }
 }
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
 
 
 #if defined(CONFIG_MTD_NAND_OMAP2) || \
 #if defined(CONFIG_MTD_NAND_OMAP2) || \

+ 4 - 1
arch/arm/mach-omap2/clock44xx_data.c

@@ -3417,9 +3417,12 @@ int __init omap4xxx_clk_init(void)
 	if (cpu_is_omap443x()) {
 	if (cpu_is_omap443x()) {
 		cpu_mask = RATE_IN_4430;
 		cpu_mask = RATE_IN_4430;
 		cpu_clkflg = CK_443X;
 		cpu_clkflg = CK_443X;
-	} else if (cpu_is_omap446x()) {
+	} else if (cpu_is_omap446x() || cpu_is_omap447x()) {
 		cpu_mask = RATE_IN_4460 | RATE_IN_4430;
 		cpu_mask = RATE_IN_4460 | RATE_IN_4430;
 		cpu_clkflg = CK_446X | CK_443X;
 		cpu_clkflg = CK_446X | CK_443X;
+
+		if (cpu_is_omap447x())
+			pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
 	} else {
 	} else {
 		return 0;
 		return 0;
 	}
 	}

+ 1 - 0
arch/arm/mach-shmobile/board-armadillo800eva.c

@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
 	.init_irq	= r8a7740_init_irq,
 	.init_irq	= r8a7740_init_irq,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= eva_init,
 	.init_machine	= eva_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.timer		= &shmobile_timer,
 	.dt_compat	= eva_boards_compat_dt,
 	.dt_compat	= eva_boards_compat_dt,
 MACHINE_END
 MACHINE_END

+ 1 - 0
arch/arm/mach-shmobile/board-kzm9d.c

@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
 	.init_irq	= emev2_init_irq,
 	.init_irq	= emev2_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= kzm9d_add_standard_devices,
 	.init_machine	= kzm9d_add_standard_devices,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.timer		= &shmobile_timer,
 	.dt_compat	= kzm9d_boards_compat_dt,
 	.dt_compat	= kzm9d_boards_compat_dt,
 MACHINE_END
 MACHINE_END

+ 1 - 0
arch/arm/mach-shmobile/board-kzm9g.c

@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
 	.init_irq	= sh73a0_init_irq,
 	.init_irq	= sh73a0_init_irq,
 	.handle_irq	= gic_handle_irq,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= kzm_init,
 	.init_machine	= kzm_init,
+	.init_late	= shmobile_init_late,
 	.timer		= &shmobile_timer,
 	.timer		= &shmobile_timer,
 	.dt_compat	= kzm9g_boards_compat_dt,
 	.dt_compat	= kzm9g_boards_compat_dt,
 MACHINE_END
 MACHINE_END

+ 3 - 0
arch/arm/mach-shmobile/board-mackerel.c

@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 
 
+	/* SDHI0 PORT172 card-detect IRQ26 */
+	gpio_request(GPIO_FN_IRQ26_172, NULL);
+
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 	/* enable SDHI1 */
 	/* enable SDHI1 */
 	gpio_request(GPIO_FN_SDHICMD1, NULL);
 	gpio_request(GPIO_FN_SDHICMD1, NULL);

+ 4 - 4
arch/arm/mach-shmobile/clock-sh73a0.c

@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
 
 
 enum { MSTP001,
 enum { MSTP001,
 	MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
 	MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
-	MSTP219,
+	MSTP219, MSTP218,
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-	MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+	MSTP331, MSTP329, MSTP325, MSTP323,
 	MSTP314, MSTP313, MSTP312, MSTP311,
 	MSTP314, MSTP313, MSTP312, MSTP311,
 	MSTP303, MSTP302, MSTP301, MSTP300,
 	MSTP303, MSTP302, MSTP301, MSTP300,
 	MSTP411, MSTP410, MSTP403,
 	MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
 	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
 	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
 	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
 	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+	[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
 	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
 	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
 	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
 	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
 	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
 	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
 	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
 	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-	[MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
 	[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
 	[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
 	[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
 	[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
 	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
 	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
 	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
 	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
 	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
 	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
 	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
 	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
 	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
 	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
 	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
 	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */

+ 7 - 0
arch/arm/mach-shmobile/intc-r8a7779.c

@@ -35,6 +35,9 @@
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR3 0xfe7822ac
 #define INT2SMSKCR4 0xfe7822b0
 #define INT2SMSKCR4 0xfe7822b0
 
 
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 {
 {
 	return 0; /* always allow wakeup */
 	return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
 	gic_init(0, 29, gic_dist_base, gic_cpu_base);
 	gic_init(0, 29, gic_dist_base, gic_cpu_base);
 	gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 	gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
 
+	/* route all interrupts to ARM */
+	__raw_writel(0xffffffff, INT2NTSR0);
+	__raw_writel(0x3fffffff, INT2NTSR1);
+
 	/* unmask all known interrupts in INTCS2 */
 	/* unmask all known interrupts in INTCS2 */
 	__raw_writel(0xfffffff0, INT2SMSKCR0);
 	__raw_writel(0xfffffff0, INT2SMSKCR0);
 	__raw_writel(0xfff7ffff, INT2SMSKCR1);
 	__raw_writel(0xfff7ffff, INT2SMSKCR1);

+ 5 - 0
arch/arm/mach-shmobile/platsmp.c

@@ -25,7 +25,12 @@
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
 #define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
 			of_machine_is_compatible("renesas,sh73a0"))
 			of_machine_is_compatible("renesas,sh73a0"))
 #define is_r8a7779() machine_is_marzen()
 #define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
 #define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
 
 
 static unsigned int __init shmobile_smp_get_core_count(void)
 static unsigned int __init shmobile_smp_get_core_count(void)
 {
 {

+ 1 - 1
arch/arm/mach-shmobile/setup-sh7372.c

@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
 	},
 	},
 };
 };
 
 
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
 
 
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
 	{
 	{

+ 74 - 0
arch/arm/mm/mmu.c

@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 	}
 	}
 }
 }
 
 
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h).  However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings.  This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+	struct vm_struct *vm;
+
+	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+	vm->addr = (void *)addr;
+	vm->size = SECTION_SIZE;
+	vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+	vm->caller = pmd_empty_section_gap;
+	vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+	struct vm_struct *vm;
+	unsigned long addr, next = 0;
+	pmd_t *pmd;
+
+	/* we're still single threaded hence no lock needed here */
+	for (vm = vmlist; vm; vm = vm->next) {
+		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+			continue;
+		addr = (unsigned long)vm->addr;
+		if (addr < next)
+			continue;
+
+		/*
+		 * Check if this vm starts on an odd section boundary.
+		 * If so and the first section entry for this PMD is free
+		 * then we block the corresponding virtual address.
+		 */
+		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+			pmd = pmd_off_k(addr);
+			if (pmd_none(*pmd))
+				pmd_empty_section_gap(addr & PMD_MASK);
+		}
+
+		/*
+		 * Then check if this vm ends on an odd section boundary.
+		 * If so and the second section entry for this PMD is empty
+		 * then we block the corresponding virtual address.
+		 */
+		addr += vm->size;
+		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+			pmd = pmd_off_k(addr) + 1;
+			if (pmd_none(*pmd))
+				pmd_empty_section_gap(addr);
+		}
+
+		/* no need to look at any vm entry until we hit the next PMD */
+		next = (addr + PMD_SIZE - 1) & PMD_MASK;
+	}
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
 static void * __initdata vmalloc_min =
 static void * __initdata vmalloc_min =
 	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
 
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 	 */
 	 */
 	if (mdesc->map_io)
 	if (mdesc->map_io)
 		mdesc->map_io();
 		mdesc->map_io();
+	fill_pmd_gaps();
 
 
 	/*
 	/*
 	 * Finally flush the caches and tlb to ensure that we're in a
 	 * Finally flush the caches and tlb to ensure that we're in a

+ 1 - 1
arch/arm/plat-samsung/include/plat/map-s3c.h

@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG	S3C_VA_WATCHDOG
 #define S3C24XX_VA_WATCHDOG	S3C_VA_WATCHDOG
 
 
 #define S3C2412_VA_SSMC		S3C_ADDR_CPU(0x00000000)
 #define S3C2412_VA_SSMC		S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI		S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI		S3C_ADDR_CPU(0x00100000)
 
 
 #define S3C2410_PA_UART		(0x50000000)
 #define S3C2410_PA_UART		(0x50000000)
 #define S3C24XX_PA_UART		S3C2410_PA_UART
 #define S3C24XX_PA_UART		S3C2410_PA_UART

+ 1 - 1
arch/arm/plat-samsung/include/plat/watchdog-reset.h

@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
 
 
 	__raw_writel(0, S3C2410_WTCON);	  /* disable watchdog, to be safe  */
 	__raw_writel(0, S3C2410_WTCON);	  /* disable watchdog, to be safe  */
 
 
-	if (s3c2410_wdtclk)
+	if (!IS_ERR(s3c2410_wdtclk))
 		clk_enable(s3c2410_wdtclk);
 		clk_enable(s3c2410_wdtclk);
 
 
 	/* put initial values into count and data */
 	/* put initial values into count and data */

+ 5 - 0
arch/powerpc/include/asm/hw_irq.h

@@ -103,6 +103,11 @@ static inline void hard_irq_disable(void)
 /* include/linux/interrupt.h needs hard_irq_disable to be a macro */
 /* include/linux/interrupt.h needs hard_irq_disable to be a macro */
 #define hard_irq_disable	hard_irq_disable
 #define hard_irq_disable	hard_irq_disable
 
 
+static inline bool lazy_irq_pending(void)
+{
+	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
 /*
 /*
  * This is called by asynchronous interrupts to conditionally
  * This is called by asynchronous interrupts to conditionally
  * re-enable hard interrupts when soft-disabled after having
  * re-enable hard interrupts when soft-disabled after having

+ 40 - 57
arch/powerpc/kernel/entry_64.S

@@ -558,27 +558,54 @@ _GLOBAL(ret_from_except_lite)
 	mtmsrd	r10,1		  /* Update machine state */
 	mtmsrd	r10,1		  /* Update machine state */
 #endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PPC_BOOK3E */
 
 
-#ifdef CONFIG_PREEMPT
 	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
 	clrrdi	r9,r1,THREAD_SHIFT	/* current_thread_info() */
-	li	r0,_TIF_NEED_RESCHED	/* bits to check */
 	ld	r3,_MSR(r1)
 	ld	r3,_MSR(r1)
 	ld	r4,TI_FLAGS(r9)
 	ld	r4,TI_FLAGS(r9)
-	/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
-	rlwimi	r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
-	and.	r0,r4,r0	/* check NEED_RESCHED and maybe SIGPENDING */
-	bne	do_work
-
-#else /* !CONFIG_PREEMPT */
-	ld	r3,_MSR(r1)	/* Returning to user mode? */
 	andi.	r3,r3,MSR_PR
 	andi.	r3,r3,MSR_PR
-	beq	restore		/* if not, just restore regs and return */
+	beq	resume_kernel
 
 
 	/* Check current_thread_info()->flags */
 	/* Check current_thread_info()->flags */
+	andi.	r0,r4,_TIF_USER_WORK_MASK
+	beq	restore
+
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	beq	1f
+	bl	.restore_interrupts
+	bl	.schedule
+	b	.ret_from_except_lite
+
+1:	bl	.save_nvgprs
+	bl	.restore_interrupts
+	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.do_notify_resume
+	b	.ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+	/* Check if we need to preempt */
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	beq+	restore
+	/* Check that preempt_count() == 0 and interrupts are enabled */
+	lwz	r8,TI_PREEMPT(r9)
+	cmpwi	cr1,r8,0
+	ld	r0,SOFTE(r1)
+	cmpdi	r0,0
+	crandc	eq,cr1*4+eq,eq
+	bne	restore
+
+	/*
+	 * Here we are preempting the current task. We want to make
+	 * sure we are soft-disabled first
+	 */
+	SOFT_DISABLE_INTS(r3,r4)
+1:	bl	.preempt_schedule_irq
+
+	/* Re-test flags and eventually loop */
 	clrrdi	r9,r1,THREAD_SHIFT
 	clrrdi	r9,r1,THREAD_SHIFT
 	ld	r4,TI_FLAGS(r9)
 	ld	r4,TI_FLAGS(r9)
-	andi.	r0,r4,_TIF_USER_WORK_MASK
-	bne	do_work
-#endif /* !CONFIG_PREEMPT */
+	andi.	r0,r4,_TIF_NEED_RESCHED
+	bne	1b
+#endif /* CONFIG_PREEMPT */
 
 
 	.globl	fast_exc_return_irq
 	.globl	fast_exc_return_irq
 fast_exc_return_irq:
 fast_exc_return_irq:
@@ -759,50 +786,6 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PPC_BOOK3E */
 1:	b	.ret_from_except /* What else to do here ? */
 1:	b	.ret_from_except /* What else to do here ? */
  
  
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
-	andi.	r0,r3,MSR_PR	/* Returning to user mode? */
-	bne	user_work
-	/* Check that preempt_count() == 0 and interrupts are enabled */
-	lwz	r8,TI_PREEMPT(r9)
-	cmpwi	cr1,r8,0
-	ld	r0,SOFTE(r1)
-	cmpdi	r0,0
-	crandc	eq,cr1*4+eq,eq
-	bne	restore
-
-	/*
-	 * Here we are preempting the current task. We want to make
-	 * sure we are soft-disabled first
-	 */
-	SOFT_DISABLE_INTS(r3,r4)
-1:	bl	.preempt_schedule_irq
-
-	/* Re-test flags and eventually loop */
-	clrrdi	r9,r1,THREAD_SHIFT
-	ld	r4,TI_FLAGS(r9)
-	andi.	r0,r4,_TIF_NEED_RESCHED
-	bne	1b
-	b	restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
-	andi.	r0,r4,_TIF_NEED_RESCHED
-	beq	1f
-	bl	.restore_interrupts
-	bl	.schedule
-	b	.ret_from_except_lite
-
-1:	bl	.save_nvgprs
-	bl	.restore_interrupts
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_notify_resume
-	b	.ret_from_except
-
 unrecov_restore:
 unrecov_restore:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	.unrecoverable_exception
 	bl	.unrecoverable_exception

+ 1 - 1
arch/powerpc/kernel/irq.c

@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
  * NOTE: This is called with interrupts hard disabled but not marked
  * NOTE: This is called with interrupts hard disabled but not marked
  * as such in paca->irq_happened, so we need to resync this.
  * as such in paca->irq_happened, so we need to resync this.
  */
  */
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
 {
 {
 	if (irqs_disabled()) {
 	if (irqs_disabled()) {
 		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;

+ 2 - 2
arch/powerpc/kernel/prom_init.c

@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
 
 
 extern char opal_secondary_entry;
 extern char opal_secondary_entry;
 
 
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
 {
 {
 	long rc;
 	long rc;
 
 
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
 	prom_debug("prom_opal_hold_cpus: end...\n");
 	prom_debug("prom_opal_hold_cpus: end...\n");
 }
 }
 
 
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
 {
 {
 	struct opal_secondary_data *data = &RELOC(opal_secondary_data);
 	struct opal_secondary_data *data = &RELOC(opal_secondary_data);
 	struct opal_takeover_args *args = &data->args;
 	struct opal_takeover_args *args = &data->args;

+ 1 - 1
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	lwz	r3,VCORE_NAPPING_THREADS(r5)
 	lwz	r3,VCORE_NAPPING_THREADS(r5)
 	lwz	r4,VCPU_PTID(r9)
 	lwz	r4,VCPU_PTID(r9)
 	li	r0,1
 	li	r0,1
-	sldi	r0,r0,r4
+	sld	r0,r0,r4
 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */
 	beq	43f
 	beq	43f
 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */
 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */

+ 1 - 1
arch/powerpc/mm/numa.c

@@ -635,7 +635,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
  */
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 static void __init parse_drconf_memory(struct device_node *memory)
 {
 {
-	const u32 *dm, *usm;
+	const u32 *uninitialized_var(dm), *usm;
 	unsigned int n, rc, ranges, is_kexec_kdump = 0;
 	unsigned int n, rc, ranges, is_kexec_kdump = 0;
 	unsigned long lmb_size, base, size, sz;
 	unsigned long lmb_size, base, size, sz;
 	int nid;
 	int nid;

+ 2 - 0
arch/powerpc/net/bpf_jit_64.S

@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
 	mr	r4, r_addr;					\
 	mr	r4, r_addr;					\
 	li	r6, SIZE;					\
 	li	r6, SIZE;					\
 	bl	skb_copy_bits;					\
 	bl	skb_copy_bits;					\
+	nop;							\
 	/* R3 = 0 on success */					\
 	/* R3 = 0 on success */					\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	ld	r0, 16(r1);					\
 	ld	r0, 16(r1);					\
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
 	mr	r4, r_addr;					\
 	mr	r4, r_addr;					\
 	li	r5, SIZE;					\
 	li	r5, SIZE;					\
 	bl	bpf_internal_load_pointer_neg_helper;		\
 	bl	bpf_internal_load_pointer_neg_helper;		\
+	nop;							\
 	/* R3 != 0 on success */				\
 	/* R3 != 0 on success */				\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
 	ld	r0, 16(r1);					\
 	ld	r0, 16(r1);					\

+ 2 - 2
arch/powerpc/platforms/pseries/iommu.c

@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
 		tcep++;
 		tcep++;
 	}
 	}
 
 
-	if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+	if (tbl->it_type & TCE_PCI_SWINV_CREATE)
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 	return 0;
 	return 0;
 }
 }
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
 	while (npages--)
 	while (npages--)
 		*(tcep++) = 0;
 		*(tcep++) = 0;
 
 
-	if (tbl->it_type == TCE_PCI_SWINV_FREE)
+	if (tbl->it_type & TCE_PCI_SWINV_FREE)
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 		tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
 }
 }
 
 

+ 1 - 1
arch/powerpc/platforms/pseries/processor_idle.c

@@ -106,7 +106,7 @@ static void check_and_cede_processor(void)
 	 * we first hard disable then check.
 	 * we first hard disable then check.
 	 */
 	 */
 	hard_irq_disable();
 	hard_irq_disable();
-	if (get_paca()->irq_happened == 0)
+	if (!lazy_irq_pending())
 		cede_processor();
 		cede_processor();
 }
 }
 
 

+ 1 - 1
arch/powerpc/xmon/xmon.c

@@ -971,7 +971,7 @@ static int cpu_cmd(void)
 		/* print cpus waiting or in xmon */
 		/* print cpus waiting or in xmon */
 		printf("cpus stopped:");
 		printf("cpus stopped:");
 		count = 0;
 		count = 0;
-		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+		for_each_possible_cpu(cpu) {
 			if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
 			if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
 				if (count == 0)
 				if (count == 0)
 					printf(" %x", cpu);
 					printf(" %x", cpu);

+ 1 - 1
arch/x86/ia32/ia32_signal.c

@@ -38,7 +38,7 @@
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
 {
 	int err = 0;
 	int err = 0;
-	bool ia32 = is_ia32_task();
+	bool ia32 = test_thread_flag(TIF_IA32);
 
 
 	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
 	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
 		return -EFAULT;
 		return -EFAULT;

+ 1 - 1
arch/x86/include/asm/cpufeature.h

@@ -176,7 +176,7 @@
 #define X86_FEATURE_XSAVEOPT	(7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_XSAVEOPT	(7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN		(7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PLN		(7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS		(7*32+ 6) /* Intel Package Thermal Status */
 #define X86_FEATURE_PTS		(7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS		(7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM	(7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE	(7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_HW_PSTATE	(7*32+ 8) /* AMD HW-PState */
 
 
 /* Virtualization flags: Linux defined, word 8 */
 /* Virtualization flags: Linux defined, word 8 */

+ 16 - 11
arch/x86/kernel/acpi/boot.c

@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+	if (intsrc->source_irq == 0) {
 		if (acpi_skip_timer_override) {
 		if (acpi_skip_timer_override) {
-			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+			printk(PREFIX "BIOS IRQ0 override ignored.\n");
 			return 0;
 			return 0;
 		}
 		}
-		if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+		if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+			&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
 			intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
 			intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
 			printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
 			printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
 		}
 		}
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 }
 }
 
 
 /*
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
 {
-	/*
-	 * The ati_ixp4x0_rev() early PCI quirk should have set
-	 * the acpi_skip_timer_override flag already:
-	 */
 	if (!acpi_skip_timer_override) {
 	if (!acpi_skip_timer_override) {
-		WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-		pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+		pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
 			d->ident);
 			d->ident);
 		acpi_skip_timer_override = 1;
 		acpi_skip_timer_override = 1;
 	}
 	}
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
 	 * is enabled.  This input is incorrectly designated the
 	 * is enabled.  This input is incorrectly designated the
 	 * ISA IRQ 0 via an interrupt source override even though
 	 * ISA IRQ 0 via an interrupt source override even though
 	 * it is wired to the output of the master 8259A and INTIN0
 	 * it is wired to the output of the master 8259A and INTIN0
-	 * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+	 * is not connected at all.  Force ignoring BIOS IRQ0
 	 * override in that cases.
 	 * override in that cases.
 	 */
 	 */
 	{
 	{
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
 		     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
 		     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
 		     },
 		     },
 	 },
 	 },
+	{
+	 .callback = dmi_ignore_irq0_timer_override,
+	 .ident = "FUJITSU SIEMENS",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+		     },
+	 },
 	{}
 	{}
 };
 };
 
 

+ 19 - 6
arch/x86/kernel/cpu/mkcapflags.pl

@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
 #
 #
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 # Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
 #
 #
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "#include <asm/cpufeature.h>\n\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
 
 
+%features = ();
+$err = 0;
+
 while (defined($line = <IN>)) {
 while (defined($line = <IN>)) {
 	if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
 	if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
 		$macro = $1;
 		$macro = $1;
-		$feature = $2;
+		$feature = "\L$2";
 		$tail = $3;
 		$tail = $3;
 		if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
 		if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
-			$feature = $1;
+			$feature = "\L$1";
 		}
 		}
 
 
-		if ($feature ne '') {
-			printf OUT "\t%-32s = \"%s\",\n",
-				"[$macro]", "\L$feature";
+		next if ($feature eq '');
+
+		if ($features{$feature}++) {
+			print STDERR "$in: duplicate feature name: $feature\n";
+			$err++;
 		}
 		}
+		printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
 	}
 	}
 }
 }
 print OUT "};\n";
 print OUT "};\n";
 
 
 close(IN);
 close(IN);
 close(OUT);
 close(OUT);
+
+if ($err) {
+	unlink($out);
+	exit(1);
+}
+
+exit(0);

+ 1 - 1
arch/x86/kernel/cpu/scattered.c

@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
 	const struct cpuid_bit *cb;
 	const struct cpuid_bit *cb;
 
 
 	static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
 	static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-		{ X86_FEATURE_DTS,		CR_EAX, 0, 0x00000006, 0 },
+		{ X86_FEATURE_DTHERM,		CR_EAX, 0, 0x00000006, 0 },
 		{ X86_FEATURE_IDA,		CR_EAX, 1, 0x00000006, 0 },
 		{ X86_FEATURE_IDA,		CR_EAX, 1, 0x00000006, 0 },
 		{ X86_FEATURE_ARAT,		CR_EAX, 2, 0x00000006, 0 },
 		{ X86_FEATURE_ARAT,		CR_EAX, 2, 0x00000006, 0 },
 		{ X86_FEATURE_PLN,		CR_EAX, 4, 0x00000006, 0 },
 		{ X86_FEATURE_PLN,		CR_EAX, 4, 0x00000006, 0 },

+ 4 - 4
arch/x86/kernel/kgdb.c

@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
 
 
 /**
 /**
  *	kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  *	kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- *	@vector: The error vector of the exception that happened.
+ *	@e_vector: The error vector of the exception that happened.
  *	@signo: The signal number of the exception that happened.
  *	@signo: The signal number of the exception that happened.
  *	@err_code: The error code of the exception that happened.
  *	@err_code: The error code of the exception that happened.
- *	@remcom_in_buffer: The buffer of the packet we have read.
- *	@remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- *	@regs: The &struct pt_regs of the current process.
+ *	@remcomInBuffer: The buffer of the packet we have read.
+ *	@remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ *	@linux_regs: The &struct pt_regs of the current process.
  *
  *
  *	This function MUST handle the 'c' and 's' command packets,
  *	This function MUST handle the 'c' and 's' command packets,
  *	as well packets to set / remove a hardware breakpoint, if used.
  *	as well packets to set / remove a hardware breakpoint, if used.

+ 8 - 0
arch/x86/kernel/reboot.c

@@ -451,6 +451,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
 		},
 		},
 	},
 	},
+	{	/* Handle problems with rebooting on the Precision M6600. */
+		.callback = set_pci_reboot,
+		.ident = "Dell OptiPlex 990",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+		},
+	},
 	{ }
 	{ }
 };
 };
 
 

+ 1 - 1
arch/x86/lib/csum-wrappers_64.c

@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
  * @src: source address
  * @src: source address
  * @dst: destination address
  * @dst: destination address
  * @len: number of bytes to be copied.
  * @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
  *
  *
  * Returns an 32bit unfolded checksum of the buffer.
  * Returns an 32bit unfolded checksum of the buffer.
  */
  */

+ 2 - 7
block/blk-cgroup.c

@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
 
 
 		blkg->pd[i] = pd;
 		blkg->pd[i] = pd;
 		pd->blkg = blkg;
 		pd->blkg = blkg;
-	}
-
-	/* invoke per-policy init */
-	for (i = 0; i < BLKCG_MAX_POLS; i++) {
-		struct blkcg_policy *pol = blkcg_policy[i];
 
 
+		/* invoke per-policy init */
 		if (blkcg_policy_enabled(blkg->q, pol))
 		if (blkcg_policy_enabled(blkg->q, pol))
 			pol->pd_init_fn(blkg);
 			pol->pd_init_fn(blkg);
 	}
 	}
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
 {
-	struct request_queue *q = blkg->q;
 	struct blkcg *blkcg = blkg->blkcg;
 	struct blkcg *blkcg = blkg->blkcg;
 
 
-	lockdep_assert_held(q->queue_lock);
+	lockdep_assert_held(blkg->q->queue_lock);
 	lockdep_assert_held(&blkcg->lock);
 	lockdep_assert_held(&blkcg->lock);
 
 
 	/* Something wrong if we are trying to remove same group twice */
 	/* Something wrong if we are trying to remove same group twice */

+ 19 - 6
block/blk-core.c

@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
  */
  */
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
 {
+	int i;
+
 	while (true) {
 	while (true) {
 		bool drain = false;
 		bool drain = false;
-		int i;
 
 
 		spin_lock_irq(q->queue_lock);
 		spin_lock_irq(q->queue_lock);
 
 
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
 			break;
 			break;
 		msleep(10);
 		msleep(10);
 	}
 	}
+
+	/*
+	 * With queue marked dead, any woken up waiter will fail the
+	 * allocation path, so the wakeup chaining is lost and we're
+	 * left with hung waiters. We need to wake up those waiters.
+	 */
+	if (q->request_fn) {
+		spin_lock_irq(q->queue_lock);
+		for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+			wake_up_all(&q->rq.wait[i]);
+		spin_unlock_irq(q->queue_lock);
+	}
 }
 }
 
 
 /**
 /**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
 	/* mark @q DEAD, no new request or merges will be allowed afterwards */
 	/* mark @q DEAD, no new request or merges will be allowed afterwards */
 	mutex_lock(&q->sysfs_lock);
 	mutex_lock(&q->sysfs_lock);
 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
 	spin_lock_irq(lock);
 	spin_lock_irq(lock);
 
 
 	/*
 	/*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
 	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 	queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 	queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
-
-	if (q->queue_lock != &q->__queue_lock)
-		q->queue_lock = &q->__queue_lock;
-
 	spin_unlock_irq(lock);
 	spin_unlock_irq(lock);
 	mutex_unlock(&q->sysfs_lock);
 	mutex_unlock(&q->sysfs_lock);
 
 
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
 	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
 	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
 	blk_sync_queue(q);
 	blk_sync_queue(q);
 
 
+	spin_lock_irq(lock);
+	if (q->queue_lock != &q->__queue_lock)
+		q->queue_lock = &q->__queue_lock;
+	spin_unlock_irq(lock);
+
 	/* @q is and will stay empty, shutdown and put */
 	/* @q is and will stay empty, shutdown and put */
 	blk_put_queue(q);
 	blk_put_queue(q);
 }
 }

+ 0 - 41
block/blk-timeout.c

@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
 		mod_timer(&q->timeout, expiry);
 		mod_timer(&q->timeout, expiry);
 }
 }
 
 
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue:	pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
-	unsigned long flags;
-	struct request *rq, *tmp;
-	LIST_HEAD(list);
-
-	/*
-	 * Not a request based block device, nothing to abort
-	 */
-	if (!q->request_fn)
-		return;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-
-	elv_abort_queue(q);
-
-	/*
-	 * Splice entries to local list, to avoid deadlocking if entries
-	 * get readded to the timeout list by error handling
-	 */
-	list_splice_init(&q->timeout_list, &list);
-
-	list_for_each_entry_safe(rq, tmp, &list, timeout_list)
-		blk_abort_request(rq);
-
-	/*
-	 * Occasionally, blk_abort_request() will return without
-	 * deleting the element from the list. Make sure we add those back
-	 * instead of leaving them on the local stack list.
-	 */
-	list_splice(&list, &q->timeout_list);
-
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);

+ 18 - 12
block/cfq-iosched.c

@@ -17,8 +17,6 @@
 #include "blk.h"
 #include "blk.h"
 #include "blk-cgroup.h"
 #include "blk-cgroup.h"
 
 
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
 /*
 /*
  * tunables
  * tunables
  */
  */
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 }
 }
 
 
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
-	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 {
 {
 	return pd_to_blkg(&cfqg->pd);
 	return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 
 
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
 {
 	return blkg_get(cfqg_to_blkg(cfqg));
 	return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
 
 
 	cfq_shutdown_timer_wq(cfqd);
 	cfq_shutdown_timer_wq(cfqd);
 
 
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
 	kfree(cfqd->root_group);
 	kfree(cfqd->root_group);
 #endif
 #endif
-	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
 	kfree(cfqd);
 	kfree(cfqd);
 }
 }
 
 
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 	if (!cfq_group_idle)
 	if (!cfq_group_idle)
 		cfq_group_idle = 1;
 		cfq_group_idle = 1;
-#else
-		cfq_group_idle = 0;
-#endif
 
 
 	ret = blkcg_policy_register(&blkcg_policy_cfq);
 	ret = blkcg_policy_register(&blkcg_policy_cfq);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
+#else
+	cfq_group_idle = 0;
+#endif
 
 
+	ret = -ENOMEM;
 	cfq_pool = KMEM_CACHE(cfq_queue, 0);
 	cfq_pool = KMEM_CACHE(cfq_queue, 0);
 	if (!cfq_pool)
 	if (!cfq_pool)
 		goto err_pol_unreg;
 		goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
 err_free_pool:
 err_free_pool:
 	kmem_cache_destroy(cfq_pool);
 	kmem_cache_destroy(cfq_pool);
 err_pol_unreg:
 err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
 	blkcg_policy_unregister(&blkcg_policy_cfq);
 	blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
 	return ret;
 	return ret;
 }
 }
 
 
 static void __exit cfq_exit(void)
 static void __exit cfq_exit(void)
 {
 {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
 	blkcg_policy_unregister(&blkcg_policy_cfq);
 	blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
 	elv_unregister(&iosched_cfq);
 	elv_unregister(&iosched_cfq);
 	kmem_cache_destroy(cfq_pool);
 	kmem_cache_destroy(cfq_pool);
 }
 }

+ 4 - 1
block/scsi_ioctl.c

@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
 		break;
 		break;
 	}
 	}
 
 
+	if (capable(CAP_SYS_RAWIO))
+		return 0;
+
 	/* In particular, rule out all resets and host-specific ioctls.  */
 	/* In particular, rule out all resets and host-specific ioctls.  */
 	printk_ratelimited(KERN_WARNING
 	printk_ratelimited(KERN_WARNING
 			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 			   "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
 
-	return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+	return -ENOIOCTLCMD;
 }
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
 

+ 4 - 3
drivers/acpi/acpi_pad.c

@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 
 static unsigned long power_saving_mwait_eax;
 static unsigned long power_saving_mwait_eax;
 
 
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
 		return;
 		return;
 
 
-	mutex_lock(&isolated_cpus_lock);
+	mutex_lock(&round_robin_lock);
 	cpumask_clear(tmp);
 	cpumask_clear(tmp);
 	for_each_cpu(cpu, pad_busy_cpus)
 	for_each_cpu(cpu, pad_busy_cpus)
 		cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
 		cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
 	if (cpumask_empty(tmp))
 	if (cpumask_empty(tmp))
 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
 	if (cpumask_empty(tmp)) {
 	if (cpumask_empty(tmp)) {
-		mutex_unlock(&isolated_cpus_lock);
+		mutex_unlock(&round_robin_lock);
 		return;
 		return;
 	}
 	}
 	for_each_cpu(cpu, tmp) {
 	for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
 	tsk_in_cpu[tsk_index] = preferred_cpu;
 	tsk_in_cpu[tsk_index] = preferred_cpu;
 	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
 	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
 	cpu_weight[preferred_cpu]++;
 	cpu_weight[preferred_cpu]++;
-	mutex_unlock(&isolated_cpus_lock);
+	mutex_unlock(&round_robin_lock);
 
 
 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
 }

+ 15 - 2
drivers/acpi/apei/apei-base.c

@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
 	u8 ins = entry->instruction;
 	u8 ins = entry->instruction;
 
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		return acpi_os_map_generic_address(&entry->register_region);
+		return apei_map_generic_address(&entry->register_region);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
 	u8 ins = entry->instruction;
 	u8 ins = entry->instruction;
 
 
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-		acpi_os_unmap_generic_address(&entry->register_region);
+		apei_unmap_generic_address(&entry->register_region);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
 	return 0;
 	return 0;
 }
 }
 
 
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+	int rc;
+	u32 access_bit_width;
+	u64 address;
+
+	rc = apei_check_gar(reg, &address, &access_bit_width);
+	if (rc)
+		return rc;
+	return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
 /* read GAR in interrupt (including NMI) or process context */
 /* read GAR in interrupt (including NMI) or process context */
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 int apei_read(u64 *val, struct acpi_generic_address *reg)
 {
 {

+ 9 - 0
drivers/acpi/apei/apei-internal.h

@@ -7,6 +7,8 @@
 #define APEI_INTERNAL_H
 #define APEI_INTERNAL_H
 
 
 #include <linux/cper.h>
 #include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
 
 
 struct apei_exec_context;
 struct apei_exec_context;
 
 
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
 /* IP has been set in instruction function */
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP	1
 #define APEI_EXEC_SET_IP	1
 
 
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+	acpi_os_unmap_generic_address(reg);
+}
+
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_read(u64 *val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 int apei_write(u64 val, struct acpi_generic_address *reg);
 
 

+ 3 - 3
drivers/acpi/apei/ghes.c

@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 	if (!ghes)
 	if (!ghes)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 	ghes->generic = generic;
 	ghes->generic = generic;
-	rc = acpi_os_map_generic_address(&generic->error_status_address);
+	rc = apei_map_generic_address(&generic->error_status_address);
 	if (rc)
 	if (rc)
 		goto err_free;
 		goto err_free;
 	error_block_length = generic->error_block_length;
 	error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 	return ghes;
 	return ghes;
 
 
 err_unmap:
 err_unmap:
-	acpi_os_unmap_generic_address(&generic->error_status_address);
+	apei_unmap_generic_address(&generic->error_status_address);
 err_free:
 err_free:
 	kfree(ghes);
 	kfree(ghes);
 	return ERR_PTR(rc);
 	return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
 static void ghes_fini(struct ghes *ghes)
 static void ghes_fini(struct ghes *ghes)
 {
 {
 	kfree(ghes->estatus);
 	kfree(ghes->estatus);
-	acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+	apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 }
 
 
 enum {
 enum {

+ 30 - 2
drivers/acpi/processor_idle.c

@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 /*
 /*
  * Suspend / resume control
  * Suspend / resume control
  */
  */
+static int acpi_idle_suspend;
 static u32 saved_bm_rld;
 static u32 saved_bm_rld;
 
 
 static void acpi_idle_bm_rld_save(void)
 static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
 
 
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
 {
 {
+	if (acpi_idle_suspend == 1)
+		return 0;
+
 	acpi_idle_bm_rld_save();
 	acpi_idle_bm_rld_save();
+	acpi_idle_suspend = 1;
 	return 0;
 	return 0;
 }
 }
 
 
 int acpi_processor_resume(struct acpi_device * device)
 int acpi_processor_resume(struct acpi_device * device)
 {
 {
+	if (acpi_idle_suspend == 0)
+		return 0;
+
 	acpi_idle_bm_rld_restore();
 	acpi_idle_bm_rld_restore();
+	acpi_idle_suspend = 0;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
 
 
 	local_irq_disable();
 	local_irq_disable();
 
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	lapic_timer_state_broadcast(pr, cx, 1);
 	lapic_timer_state_broadcast(pr, cx, 1);
 	kt1 = ktime_get_real();
 	kt1 = ktime_get_real();
 	acpi_idle_do_entry(cx);
 	acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
 
 
 	local_irq_disable();
 	local_irq_disable();
 
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 		current_thread_info()->status &= ~TS_POLLING;
 		current_thread_info()->status &= ~TS_POLLING;
 		/*
 		/*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
 						drv, drv->safe_state_index);
 						drv, drv->safe_state_index);
 		} else {
 		} else {
 			local_irq_disable();
 			local_irq_disable();
-			acpi_safe_halt();
+			if (!acpi_idle_suspend)
+				acpi_safe_halt();
 			local_irq_enable();
 			local_irq_enable();
-			return -EINVAL;
+			return -EBUSY;
 		}
 		}
 	}
 	}
 
 
 	local_irq_disable();
 	local_irq_disable();
 
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EBUSY;
+	}
+
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 		current_thread_info()->status &= ~TS_POLLING;
 		current_thread_info()->status &= ~TS_POLLING;
 		/*
 		/*

+ 2 - 2
drivers/acpi/sysfs.c

@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
 {
 	int result = 0;
 	int result = 0;
 
 
-	if (!strncmp(val, "enable", strlen("enable") - 1)) {
+	if (!strncmp(val, "enable", strlen("enable"))) {
 		result = acpi_debug_trace(trace_method_name, trace_debug_level,
 		result = acpi_debug_trace(trace_method_name, trace_debug_level,
 					  trace_debug_layer, 0);
 					  trace_debug_layer, 0);
 		if (result)
 		if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 		goto exit;
 		goto exit;
 	}
 	}
 
 
-	if (!strncmp(val, "disable", strlen("disable") - 1)) {
+	if (!strncmp(val, "disable", strlen("disable"))) {
 		int name = 0;
 		int name = 0;
 		result = acpi_debug_trace((char *)&name, trace_debug_level,
 		result = acpi_debug_trace((char *)&name, trace_debug_level,
 					  trace_debug_layer, 0);
 					  trace_debug_layer, 0);

+ 2 - 0
drivers/acpi/video.c

@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
 	struct acpi_object_list args = { 1, &arg0 };
 	struct acpi_object_list args = { 1, &arg0 };
 
 
+	if (!video->cap._DOS)
+		return 0;
 
 
 	if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
 	if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
 		return -EINVAL;
 		return -EINVAL;

+ 4 - 2
drivers/base/power/main.c

@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	dpm_wait_for_children(dev, async);
 	dpm_wait_for_children(dev, async);
 
 
 	if (async_error)
 	if (async_error)
-		return 0;
+		goto Complete;
 
 
 	pm_runtime_get_noresume(dev);
 	pm_runtime_get_noresume(dev);
 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	if (pm_wakeup_pending()) {
 	if (pm_wakeup_pending()) {
 		pm_runtime_put_sync(dev);
 		pm_runtime_put_sync(dev);
 		async_error = -EBUSY;
 		async_error = -EBUSY;
-		return 0;
+		goto Complete;
 	}
 	}
 
 
 	device_lock(dev);
 	device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	}
 	}
 
 
 	device_unlock(dev);
 	device_unlock(dev);
+
+ Complete:
 	complete_all(&dev->power.completion);
 	complete_all(&dev->power.completion);
 
 
 	if (error) {
 	if (error) {

+ 9 - 2
drivers/block/drbd/drbd_bitmap.c

@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
 		first_word = 0;
 		first_word = 0;
 		spin_lock_irq(&b->bm_lock);
 		spin_lock_irq(&b->bm_lock);
 	}
 	}
-
 	/* last page (respectively only page, for first page == last page) */
 	/* last page (respectively only page, for first page == last page) */
 	last_word = MLPP(el >> LN2_BPL);
 	last_word = MLPP(el >> LN2_BPL);
-	bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+	/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+	 * ==> e = 32767, el = 32768, last_page = 2,
+	 * and now last_word = 0.
+	 * We do not want to touch last_page in this case,
+	 * as we did not allocate it, it is not present in bitmap->bm_pages.
+	 */
+	if (last_word)
+		bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
 
 
 	/* possibly trailing bits.
 	/* possibly trailing bits.
 	 * example: (e & 63) == 63, el will be e+1.
 	 * example: (e & 63) == 63, el will be e+1.

+ 42 - 24
drivers/block/drbd/drbd_req.c

@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 		req->rq_state |= RQ_LOCAL_COMPLETED;
 		req->rq_state |= RQ_LOCAL_COMPLETED;
 		req->rq_state &= ~RQ_LOCAL_PENDING;
 		req->rq_state &= ~RQ_LOCAL_PENDING;
 
 
-		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+		if (req->rq_state & RQ_LOCAL_ABORTED) {
+			_req_may_be_done(req, m);
+			break;
+		}
 
 
 		__drbd_chk_io_error(mdev, false);
 		__drbd_chk_io_error(mdev, false);
 
 
 	goto_queue_for_net_read:
 	goto_queue_for_net_read:
 
 
+		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
 		/* no point in retrying if there is no good remote data,
 		/* no point in retrying if there is no good remote data,
 		 * or we have no connection. */
 		 * or we have no connection. */
 		if (mdev->state.pdsk != D_UP_TO_DATE) {
 		if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
 	return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 	return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 }
 
 
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+	int congested = 0;
+
+	/* If I don't even have good local storage, we can not reasonably try
+	 * to pull ahead of the peer. We also need the local reference to make
+	 * sure mdev->act_log is there.
+	 * Note: caller has to make sure that net_conf is there.
+	 */
+	if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+		return;
+
+	if (mdev->net_conf->cong_fill &&
+	    atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+		dev_info(DEV, "Congestion-fill threshold reached\n");
+		congested = 1;
+	}
+
+	if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+		dev_info(DEV, "Congestion-extents threshold reached\n");
+		congested = 1;
+	}
+
+	if (congested) {
+		queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+		if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+			_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+		else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+			_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+	}
+	put_ldev(mdev);
+}
+
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
 {
 	const int rw = bio_rw(bio);
 	const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
 		_req_mod(req, queue_for_send_oos);
 		_req_mod(req, queue_for_send_oos);
 
 
 	if (remote &&
 	if (remote &&
-	    mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
-		int congested = 0;
-
-		if (mdev->net_conf->cong_fill &&
-		    atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
-			dev_info(DEV, "Congestion-fill threshold reached\n");
-			congested = 1;
-		}
-
-		if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
-			dev_info(DEV, "Congestion-extents threshold reached\n");
-			congested = 1;
-		}
-
-		if (congested) {
-			queue_barrier(mdev); /* last barrier, after mirrored writes */
-
-			if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
-				_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
-			else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
-				_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
-		}
-	}
+	    mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+		maybe_pull_ahead(mdev);
 
 
 	spin_unlock_irq(&mdev->req_lock);
 	spin_unlock_irq(&mdev->req_lock);
 	kfree(b); /* if someone else has beaten us to it... */
 	kfree(b); /* if someone else has beaten us to it... */

+ 1 - 0
drivers/block/floppy.c

@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
 
 
 	if (drive == current_reqD)
 	if (drive == current_reqD)
 		drive = current_drive;
 		drive = current_drive;
+	__cancel_delayed_work(&fd_timeout);
 
 
 	if (drive < 0 || drive >= N_DRIVE) {
 	if (drive < 0 || drive >= N_DRIVE) {
 		delay = 20UL * HZ;
 		delay = 20UL * HZ;

+ 118 - 48
drivers/block/mtip32xx/mtip32xx.c

@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <linux/kthread.h>
 #include <../drivers/ata/ahci.h>
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
 #include <linux/export.h>
+#include <linux/debugfs.h>
 #include "mtip32xx.h"
 #include "mtip32xx.h"
 
 
 #define HW_CMD_SLOT_SZ		(MTIP_MAX_COMMAND_SLOTS * 32)
 #define HW_CMD_SLOT_SZ		(MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
  * allocated in mtip_init().
  * allocated in mtip_init().
  */
  */
 static int mtip_major;
 static int mtip_major;
+static struct dentry *dfs_parent;
 
 
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
 static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
 }
 }
 
 
 /*
 /*
- * Sysfs register/status dump.
+ * Sysfs status dump.
  *
  *
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @dev  Pointer to the device structure, passed by the kernrel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
  * @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
  * return value
  * return value
  *	The size, in bytes, of the data copied into buf.
  *	The size, in bytes, of the data copied into buf.
  */
  */
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
 				struct device_attribute *attr,
 				struct device_attribute *attr,
 				char *buf)
 				char *buf)
 {
 {
-	u32 group_allocated;
 	struct driver_data *dd = dev_to_disk(dev)->private_data;
 	struct driver_data *dd = dev_to_disk(dev)->private_data;
 	int size = 0;
 	int size = 0;
+
+	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+		size += sprintf(buf, "%s", "thermal_shutdown\n");
+	else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+		size += sprintf(buf, "%s", "write_protect\n");
+	else
+		size += sprintf(buf, "%s", "online\n");
+
+	return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+				  size_t len, loff_t *offset)
+{
+	struct driver_data *dd =  (struct driver_data *)f->private_data;
+	char buf[MTIP_DFS_MAX_BUF_SIZE];
+	u32 group_allocated;
+	int size = *offset;
 	int n;
 	int n;
 
 
-	size += sprintf(&buf[size], "Hardware\n--------\n");
-	size += sprintf(&buf[size], "S ACTive      : [ 0x");
+	if (!len || size)
+		return 0;
+
+	if (size < 0)
+		return -EINVAL;
+
+	size += sprintf(&buf[size], "H/ S ACTive      : [ 0x");
 
 
 	for (n = dd->slot_groups-1; n >= 0; n--)
 	for (n = dd->slot_groups-1; n >= 0; n--)
 		size += sprintf(&buf[size], "%08X ",
 		size += sprintf(&buf[size], "%08X ",
 					 readl(dd->port->s_active[n]));
 					 readl(dd->port->s_active[n]));
 
 
 	size += sprintf(&buf[size], "]\n");
 	size += sprintf(&buf[size], "]\n");
-	size += sprintf(&buf[size], "Command Issue : [ 0x");
+	size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
 
 
 	for (n = dd->slot_groups-1; n >= 0; n--)
 	for (n = dd->slot_groups-1; n >= 0; n--)
 		size += sprintf(&buf[size], "%08X ",
 		size += sprintf(&buf[size], "%08X ",
 					readl(dd->port->cmd_issue[n]));
 					readl(dd->port->cmd_issue[n]));
 
 
 	size += sprintf(&buf[size], "]\n");
 	size += sprintf(&buf[size], "]\n");
-	size += sprintf(&buf[size], "Completed     : [ 0x");
+	size += sprintf(&buf[size], "H/ Completed     : [ 0x");
 
 
 	for (n = dd->slot_groups-1; n >= 0; n--)
 	for (n = dd->slot_groups-1; n >= 0; n--)
 		size += sprintf(&buf[size], "%08X ",
 		size += sprintf(&buf[size], "%08X ",
 				readl(dd->port->completed[n]));
 				readl(dd->port->completed[n]));
 
 
 	size += sprintf(&buf[size], "]\n");
 	size += sprintf(&buf[size], "]\n");
-	size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+	size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
 				readl(dd->port->mmio + PORT_IRQ_STAT));
 				readl(dd->port->mmio + PORT_IRQ_STAT));
-	size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+	size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
 				readl(dd->mmio + HOST_IRQ_STAT));
 				readl(dd->mmio + HOST_IRQ_STAT));
 	size += sprintf(&buf[size], "\n");
 	size += sprintf(&buf[size], "\n");
 
 
-	size += sprintf(&buf[size], "Local\n-----\n");
-	size += sprintf(&buf[size], "Allocated    : [ 0x");
+	size += sprintf(&buf[size], "L/ Allocated     : [ 0x");
 
 
 	for (n = dd->slot_groups-1; n >= 0; n--) {
 	for (n = dd->slot_groups-1; n >= 0; n--) {
 		if (sizeof(long) > sizeof(u32))
 		if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
 	}
 	}
 	size += sprintf(&buf[size], "]\n");
 	size += sprintf(&buf[size], "]\n");
 
 
-	size += sprintf(&buf[size], "Commands in Q: [ 0x");
+	size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
 
 
 	for (n = dd->slot_groups-1; n >= 0; n--) {
 	for (n = dd->slot_groups-1; n >= 0; n--) {
 		if (sizeof(long) > sizeof(u32))
 		if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
 	}
 	}
 	size += sprintf(&buf[size], "]\n");
 	size += sprintf(&buf[size], "]\n");
 
 
-	return size;
+	*offset = size <= len ? size : len;
+	size = copy_to_user(ubuf, buf, *offset);
+	if (size)
+		return -EFAULT;
+
+	return *offset;
 }
 }
 
 
-static ssize_t mtip_hw_show_status(struct device *dev,
-				struct device_attribute *attr,
-				char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+				  size_t len, loff_t *offset)
 {
 {
-	struct driver_data *dd = dev_to_disk(dev)->private_data;
-	int size = 0;
+	struct driver_data *dd =  (struct driver_data *)f->private_data;
+	char buf[MTIP_DFS_MAX_BUF_SIZE];
+	int size = *offset;
 
 
-	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
-		size += sprintf(buf, "%s", "thermal_shutdown\n");
-	else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
-		size += sprintf(buf, "%s", "write_protect\n");
-	else
-		size += sprintf(buf, "%s", "online\n");
-
-	return size;
-}
+	if (!len || size)
+		return 0;
 
 
-static ssize_t mtip_hw_show_flags(struct device *dev,
-				struct device_attribute *attr,
-				char *buf)
-{
-	struct driver_data *dd = dev_to_disk(dev)->private_data;
-	int size = 0;
+	if (size < 0)
+		return -EINVAL;
 
 
-	size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+	size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
 							dd->port->flags);
 							dd->port->flags);
-	size += sprintf(&buf[size], "Flag in dd struct   : [ %08lX ]\n",
+	size += sprintf(&buf[size], "Flag-dd   : [ %08lX ]\n",
 							dd->dd_flag);
 							dd->dd_flag);
 
 
-	return size;
+	*offset = size <= len ? size : len;
+	size = copy_to_user(ubuf, buf, *offset);
+	if (size)
+		return -EFAULT;
+
+	return *offset;
 }
 }
 
 
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+	.owner  = THIS_MODULE,
+	.open   = simple_open,
+	.read   = mtip_hw_read_registers,
+	.llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+	.owner  = THIS_MODULE,
+	.open   = simple_open,
+	.read   = mtip_hw_read_flags,
+	.llseek = no_llseek,
+};
 
 
 /*
 /*
  * Create the sysfs related attributes.
  * Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
 	if (!kobj || !dd)
 	if (!kobj || !dd)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (sysfs_create_file(kobj, &dev_attr_registers.attr))
-		dev_warn(&dd->pdev->dev,
-			"Error creating 'registers' sysfs entry\n");
 	if (sysfs_create_file(kobj, &dev_attr_status.attr))
 	if (sysfs_create_file(kobj, &dev_attr_status.attr))
 		dev_warn(&dd->pdev->dev,
 		dev_warn(&dd->pdev->dev,
 			"Error creating 'status' sysfs entry\n");
 			"Error creating 'status' sysfs entry\n");
-	if (sysfs_create_file(kobj, &dev_attr_flags.attr))
-		dev_warn(&dd->pdev->dev,
-			"Error creating 'flags' sysfs entry\n");
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
 	if (!kobj || !dd)
 	if (!kobj || !dd)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	sysfs_remove_file(kobj, &dev_attr_registers.attr);
 	sysfs_remove_file(kobj, &dev_attr_status.attr);
 	sysfs_remove_file(kobj, &dev_attr_status.attr);
-	sysfs_remove_file(kobj, &dev_attr_flags.attr);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+	if (!dfs_parent)
+		return -1;
+
+	dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+	if (IS_ERR_OR_NULL(dd->dfs_node)) {
+		dev_warn(&dd->pdev->dev,
+			"Error creating node %s under debugfs\n",
+						dd->disk->disk_name);
+		dd->dfs_node = NULL;
+		return -1;
+	}
+
+	debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+							&mtip_flags_fops);
+	debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+							&mtip_regs_fops);
+
+	return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+	debugfs_remove_recursive(dd->dfs_node);
+}
+
+
 /*
 /*
  * Perform any init/resume time hardware setup
  * Perform any init/resume time hardware setup
  *
  *
@@ -3730,6 +3784,7 @@ skip_create_disk:
 		mtip_hw_sysfs_init(dd, kobj);
 		mtip_hw_sysfs_init(dd, kobj);
 		kobject_put(kobj);
 		kobject_put(kobj);
 	}
 	}
+	mtip_hw_debugfs_init(dd);
 
 
 	if (dd->mtip_svc_handler) {
 	if (dd->mtip_svc_handler) {
 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
 	return rv;
 	return rv;
 
 
 kthread_run_error:
 kthread_run_error:
+	mtip_hw_debugfs_exit(dd);
+
 	/* Delete our gendisk. This also removes the device from /dev */
 	/* Delete our gendisk. This also removes the device from /dev */
 	del_gendisk(dd->disk);
 	del_gendisk(dd->disk);
 
 
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
 			kobject_put(kobj);
 			kobject_put(kobj);
 		}
 		}
 	}
 	}
+	mtip_hw_debugfs_exit(dd);
 
 
 	/*
 	/*
 	 * Delete our gendisk structure. This also removes the device
 	 * Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
 	}
 	}
 	mtip_major = error;
 	mtip_major = error;
 
 
+	if (!dfs_parent) {
+		dfs_parent = debugfs_create_dir("rssd", NULL);
+		if (IS_ERR_OR_NULL(dfs_parent)) {
+			printk(KERN_WARNING "Error creating debugfs parent\n");
+			dfs_parent = NULL;
+		}
+	}
+
 	/* Register our PCI operations. */
 	/* Register our PCI operations. */
 	error = pci_register_driver(&mtip_pci_driver);
 	error = pci_register_driver(&mtip_pci_driver);
-	if (error)
+	if (error) {
+		debugfs_remove(dfs_parent);
 		unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 		unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+	}
 
 
 	return error;
 	return error;
 }
 }
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
  */
  */
 static void __exit mtip_exit(void)
 static void __exit mtip_exit(void)
 {
 {
+	debugfs_remove_recursive(dfs_parent);
+
 	/* Release the allocated major block device number. */
 	/* Release the allocated major block device number. */
 	unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 	unregister_blkdev(mtip_major, MTIP_DRV_NAME);
 
 

+ 4 - 1
drivers/block/mtip32xx/mtip32xx.h

@@ -26,7 +26,6 @@
 #include <linux/ata.h>
 #include <linux/ata.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/genhd.h>
 #include <linux/genhd.h>
-#include <linux/version.h>
 
 
 /* Offset of Subsystem Device ID in pci confoguration space */
 /* Offset of Subsystem Device ID in pci confoguration space */
 #define PCI_SUBSYSTEM_DEVICEID	0x2E
 #define PCI_SUBSYSTEM_DEVICEID	0x2E
@@ -111,6 +110,8 @@
  #define dbg_printk(format, arg...)
  #define dbg_printk(format, arg...)
 #endif
 #endif
 
 
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
 #define __force_bit2int (unsigned int __force)
 #define __force_bit2int (unsigned int __force)
 
 
 enum {
 enum {
@@ -447,6 +448,8 @@ struct driver_data {
 	unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 	unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
 
 
 	struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
 	struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+	struct dentry *dfs_node;
 };
 };
 
 
 #endif
 #endif

+ 40 - 0
drivers/block/umem.c

@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
 	}
 	}
 }
 }
 
 
+struct mm_plug_cb {
+	struct blk_plug_cb cb;
+	struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+	struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+	spin_lock_irq(&mmcb->card->lock);
+	activate(mmcb->card);
+	spin_unlock_irq(&mmcb->card->lock);
+	kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+	struct blk_plug *plug = current->plug;
+	struct mm_plug_cb *mmcb;
+
+	if (!plug)
+		return 0;
+
+	list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+		if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+			return 1;
+	}
+	/* Not currently on the callback list */
+	mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+	if (!mmcb)
+		return 0;
+
+	mmcb->card = card;
+	mmcb->cb.callback = mm_unplug;
+	list_add(&mmcb->cb.list, &plug->cb_list);
+	return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
 {
 	struct cardinfo *card = q->queuedata;
 	struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
 	*card->biotail = bio;
 	*card->biotail = bio;
 	bio->bi_next = NULL;
 	bio->bi_next = NULL;
 	card->biotail = &bio->bi_next;
 	card->biotail = &bio->bi_next;
+	if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+		activate(card);
 	spin_unlock_irq(&card->lock);
 	spin_unlock_irq(&card->lock);
 
 
 	return;
 	return;

+ 2 - 0
drivers/block/xen-blkback/common.h

@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
 		break;
 		break;
 	case BLKIF_OP_DISCARD:
 	case BLKIF_OP_DISCARD:
 		dst->u.discard.flag = src->u.discard.flag;
 		dst->u.discard.flag = src->u.discard.flag;
+		dst->u.discard.id = src->u.discard.id;
 		dst->u.discard.sector_number = src->u.discard.sector_number;
 		dst->u.discard.sector_number = src->u.discard.sector_number;
 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 		break;
 		break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
 		break;
 		break;
 	case BLKIF_OP_DISCARD:
 	case BLKIF_OP_DISCARD:
 		dst->u.discard.flag = src->u.discard.flag;
 		dst->u.discard.flag = src->u.discard.flag;
+		dst->u.discard.id = src->u.discard.id;
 		dst->u.discard.sector_number = src->u.discard.sector_number;
 		dst->u.discard.sector_number = src->u.discard.sector_number;
 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
 		break;
 		break;

+ 46 - 12
drivers/block/xen-blkfront.c

@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
 	return free;
 	return free;
 }
 }
 
 
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
 			       unsigned long id)
 			       unsigned long id)
 {
 {
+	if (info->shadow[id].req.u.rw.id != id)
+		return -EINVAL;
+	if (info->shadow[id].request == NULL)
+		return -EINVAL;
 	info->shadow[id].req.u.rw.id  = info->shadow_free;
 	info->shadow[id].req.u.rw.id  = info->shadow_free;
 	info->shadow[id].request = NULL;
 	info->shadow[id].request = NULL;
 	info->shadow_free = id;
 	info->shadow_free = id;
+	return 0;
 }
 }
 
 
+static const char *op_name(int op)
+{
+	static const char *const names[] = {
+		[BLKIF_OP_READ] = "read",
+		[BLKIF_OP_WRITE] = "write",
+		[BLKIF_OP_WRITE_BARRIER] = "barrier",
+		[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+		[BLKIF_OP_DISCARD] = "discard" };
+
+	if (op < 0 || op >= ARRAY_SIZE(names))
+		return "unknown";
+
+	if (!names[op])
+		return "reserved";
+
+	return names[op];
+}
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
 {
 {
 	unsigned int end = minor + nr;
 	unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
 
 		bret = RING_GET_RESPONSE(&info->ring, i);
 		bret = RING_GET_RESPONSE(&info->ring, i);
 		id   = bret->id;
 		id   = bret->id;
+		/*
+		 * The backend has messed up and given us an id that we would
+		 * never have given to it (we stamp it up to BLK_RING_SIZE -
+		 * look in get_id_from_freelist.
+		 */
+		if (id >= BLK_RING_SIZE) {
+			WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+			     info->gd->disk_name, op_name(bret->operation), id);
+			/* We can't safely get the 'struct request' as
+			 * the id is busted. */
+			continue;
+		}
 		req  = info->shadow[id].request;
 		req  = info->shadow[id].request;
 
 
 		if (bret->operation != BLKIF_OP_DISCARD)
 		if (bret->operation != BLKIF_OP_DISCARD)
 			blkif_completion(&info->shadow[id]);
 			blkif_completion(&info->shadow[id]);
 
 
-		add_id_to_freelist(info, id);
+		if (add_id_to_freelist(info, id)) {
+			WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+			     info->gd->disk_name, op_name(bret->operation), id);
+			continue;
+		}
 
 
 		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
 		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
 		switch (bret->operation) {
 		switch (bret->operation) {
 		case BLKIF_OP_DISCARD:
 		case BLKIF_OP_DISCARD:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				struct request_queue *rq = info->rq;
 				struct request_queue *rq = info->rq;
-				printk(KERN_WARNING "blkfront: %s: discard op failed\n",
-					   info->gd->disk_name);
+				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+					   info->gd->disk_name, op_name(bret->operation));
 				error = -EOPNOTSUPP;
 				error = -EOPNOTSUPP;
 				info->feature_discard = 0;
 				info->feature_discard = 0;
 				info->feature_secdiscard = 0;
 				info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 		case BLKIF_OP_FLUSH_DISKCACHE:
 		case BLKIF_OP_FLUSH_DISKCACHE:
 		case BLKIF_OP_WRITE_BARRIER:
 		case BLKIF_OP_WRITE_BARRIER:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
-				printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
-				       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-				       "barrier" :  "flush disk cache",
-				       info->gd->disk_name);
+				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+				       info->gd->disk_name, op_name(bret->operation));
 				error = -EOPNOTSUPP;
 				error = -EOPNOTSUPP;
 			}
 			}
 			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
 			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
 				     info->shadow[id].req.u.rw.nr_segments == 0)) {
 				     info->shadow[id].req.u.rw.nr_segments == 0)) {
-				printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
-				       info->flush_op == BLKIF_OP_WRITE_BARRIER ?
-				       "barrier" :  "flush disk cache",
-				       info->gd->disk_name);
+				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+				       info->gd->disk_name, op_name(bret->operation));
 				error = -EOPNOTSUPP;
 				error = -EOPNOTSUPP;
 			}
 			}
 			if (unlikely(error)) {
 			if (unlikely(error)) {

+ 13 - 15
drivers/clk/clk.c

@@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 
 
 	old_parent = clk->parent;
 	old_parent = clk->parent;
 
 
-	/* find index of new parent clock using cached parent ptrs */
-	if (clk->parents)
-		for (i = 0; i < clk->num_parents; i++)
-			if (clk->parents[i] == parent)
-				break;
-	else
+	if (!clk->parents)
 		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
 		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
 								GFP_KERNEL);
 								GFP_KERNEL);
 
 
 	/*
 	/*
-	 * find index of new parent clock using string name comparison
-	 * also try to cache the parent to avoid future calls to __clk_lookup
+	 * find index of new parent clock using cached parent ptrs,
+	 * or if not yet cached, use string name comparison and cache
+	 * them now to avoid future calls to __clk_lookup.
 	 */
 	 */
-	if (i == clk->num_parents)
-		for (i = 0; i < clk->num_parents; i++)
-			if (!strcmp(clk->parent_names[i], parent->name)) {
-				if (clk->parents)
-					clk->parents[i] = __clk_lookup(parent->name);
-				break;
-			}
+	for (i = 0; i < clk->num_parents; i++) {
+		if (clk->parents && clk->parents[i] == parent)
+			break;
+		else if (!strcmp(clk->parent_names[i], parent->name)) {
+			if (clk->parents)
+				clk->parents[i] = __clk_lookup(parent->name);
+			break;
+		}
+	}
 
 
 	if (i == clk->num_parents) {
 	if (i == clk->num_parents) {
 		pr_debug("%s: clock %s is not a possible parent of clock %s\n",
 		pr_debug("%s: clock %s is not a possible parent of clock %s\n",

+ 24 - 3
drivers/gpu/drm/drm_edid.c

@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
 	return true;
 	return true;
 }
 }
 
 
+static bool valid_inferred_mode(const struct drm_connector *connector,
+				const struct drm_display_mode *mode)
+{
+	struct drm_display_mode *m;
+	bool ok = false;
+
+	list_for_each_entry(m, &connector->probed_modes, head) {
+		if (mode->hdisplay == m->hdisplay &&
+		    mode->vdisplay == m->vdisplay &&
+		    drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+			return false; /* duplicated */
+		if (mode->hdisplay <= m->hdisplay &&
+		    mode->vdisplay <= m->vdisplay)
+			ok = true;
+	}
+	return ok;
+}
+
 static int
 static int
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 			struct detailed_timing *timing)
 			struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
 
 
 	for (i = 0; i < drm_num_dmt_modes; i++) {
 	for (i = 0; i < drm_num_dmt_modes; i++) {
-		if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+		if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+		    valid_inferred_mode(connector, drm_dmt_modes + i)) {
 			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
 			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
 			if (newmode) {
 			if (newmode) {
 				drm_mode_probed_add(connector, newmode);
 				drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
 			return modes;
 			return modes;
 
 
 		fixup_mode_1366x768(newmode);
 		fixup_mode_1366x768(newmode);
-		if (!mode_in_range(newmode, edid, timing)) {
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
 			drm_mode_destroy(dev, newmode);
 			continue;
 			continue;
 		}
 		}
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
 			return modes;
 			return modes;
 
 
 		fixup_mode_1366x768(newmode);
 		fixup_mode_1366x768(newmode);
-		if (!mode_in_range(newmode, edid, timing)) {
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
 			drm_mode_destroy(dev, newmode);
 			drm_mode_destroy(dev, newmode);
 			continue;
 			continue;
 		}
 		}

+ 30 - 7
drivers/gpu/drm/i915/i915_dma.c

@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
 	}
 	}
 }
 }
 
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+	struct apertures_struct *ap;
+	struct pci_dev *pdev = dev_priv->dev->pdev;
+	bool primary;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return;
+
+	ap->ranges[0].base = dev_priv->dev->agp->base;
+	ap->ranges[0].size =
+		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+	primary =
+		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+	kfree(ap);
+}
+
 /**
 /**
  * i915_driver_load - setup chip and create an initial config
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
  * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto free_priv;
 		goto free_priv;
 	}
 	}
 
 
+	dev_priv->mm.gtt = intel_gtt_get();
+	if (!dev_priv->mm.gtt) {
+		DRM_ERROR("Failed to initialize GTT\n");
+		ret = -ENODEV;
+		goto put_bridge;
+	}
+
+	i915_kick_out_firmware_fb(dev_priv);
+
 	pci_set_master(dev->pdev);
 	pci_set_master(dev->pdev);
 
 
 	/* overlay on gen2 is broken and can't address above 1G */
 	/* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto put_bridge;
 		goto put_bridge;
 	}
 	}
 
 
-	dev_priv->mm.gtt = intel_gtt_get();
-	if (!dev_priv->mm.gtt) {
-		DRM_ERROR("Failed to initialize GTT\n");
-		ret = -ENODEV;
-		goto out_rmmap;
-	}
-
 	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
 
 	dev_priv->mm.gtt_mapping =
 	dev_priv->mm.gtt_mapping =

+ 11 - 2
drivers/gpu/drm/radeon/radeon_gart.c

@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
 	rdev->vm_manager.enabled = false;
 	rdev->vm_manager.enabled = false;
 
 
 	/* mark first vm as always in use, it's the system one */
 	/* mark first vm as always in use, it's the system one */
+	/* allocate enough for 2 full VM pts */
 	r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
 	r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
-				      rdev->vm_manager.max_pfn * 8,
+				      rdev->vm_manager.max_pfn * 8 * 2,
 				      RADEON_GEM_DOMAIN_VRAM);
 				      RADEON_GEM_DOMAIN_VRAM);
 	if (r) {
 	if (r) {
 		dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
 		dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 	mutex_init(&vm->mutex);
 	mutex_init(&vm->mutex);
 	INIT_LIST_HEAD(&vm->list);
 	INIT_LIST_HEAD(&vm->list);
 	INIT_LIST_HEAD(&vm->va);
 	INIT_LIST_HEAD(&vm->va);
-	vm->last_pfn = 0;
+	/* SI requires equal sized PTs for all VMs, so always set
+	 * last_pfn to max_pfn.  cayman allows variable sized
+	 * pts so we can grow then as needed.  Once we switch
+	 * to two level pts we can unify this again.
+	 */
+	if (rdev->family >= CHIP_TAHITI)
+		vm->last_pfn = rdev->vm_manager.max_pfn;
+	else
+		vm->last_pfn = 0;
 	/* map the ib pool buffer at 0 in virtual address space, set
 	/* map the ib pool buffer at 0 in virtual address space, set
 	 * read only
 	 * read only
 	 */
 	 */

+ 6 - 4
drivers/gpu/drm/radeon/radeon_gem.c

@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *filp)
 			  struct drm_file *filp)
 {
 {
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_gem_busy *args = data;
 	struct drm_radeon_gem_busy *args = data;
 	struct drm_gem_object *gobj;
 	struct drm_gem_object *gobj;
 	struct radeon_bo *robj;
 	struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 		break;
 		break;
 	}
 	}
 	drm_gem_object_unreference_unlocked(gobj);
 	drm_gem_object_unreference_unlocked(gobj);
-	r = radeon_gem_handle_lockup(robj->rdev, r);
+	r = radeon_gem_handle_lockup(rdev, r);
 	return r;
 	return r;
 }
 }
 
 
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *filp)
 			      struct drm_file *filp)
 {
 {
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_gem_wait_idle *args = data;
 	struct drm_radeon_gem_wait_idle *args = data;
 	struct drm_gem_object *gobj;
 	struct drm_gem_object *gobj;
 	struct radeon_bo *robj;
 	struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	robj = gem_to_radeon_bo(gobj);
 	robj = gem_to_radeon_bo(gobj);
 	r = radeon_bo_wait(robj, NULL, false);
 	r = radeon_bo_wait(robj, NULL, false);
 	/* callback hw specific functions if any */
 	/* callback hw specific functions if any */
-	if (robj->rdev->asic->ioctl_wait_idle)
-		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+	if (rdev->asic->ioctl_wait_idle)
+		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
 	drm_gem_object_unreference_unlocked(gobj);
 	drm_gem_object_unreference_unlocked(gobj);
-	r = radeon_gem_handle_lockup(robj->rdev, r);
+	r = radeon_gem_handle_lockup(rdev, r);
 	return r;
 	return r;
 }
 }
 
 

+ 2 - 2
drivers/gpu/drm/radeon/si.c

@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
 	WREG32(0x15DC, 0);
 	WREG32(0x15DC, 0);
 
 
 	/* empty context1-15 */
 	/* empty context1-15 */
-	/* FIXME start with 1G, once using 2 level pt switch to full
+	/* FIXME start with 4G, once using 2 level pt switch to full
 	 * vm size space
 	 * vm size space
 	 */
 	 */
 	/* set vm size, must be a multiple of 4 */
 	/* set vm size, must be a multiple of 4 */
 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
 	for (i = 1; i < 16; i++) {
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 		if (i < 8)
 			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
 			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),

+ 2 - 2
drivers/hwmon/coretemp.c

@@ -693,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
 	 * sensors. We check this bit only, all the early CPUs
 	 * sensors. We check this bit only, all the early CPUs
 	 * without thermal sensors will be filtered out.
 	 * without thermal sensors will be filtered out.
 	 */
 	 */
-	if (!cpu_has(c, X86_FEATURE_DTS))
+	if (!cpu_has(c, X86_FEATURE_DTHERM))
 		return;
 		return;
 
 
 	if (!pdev) {
 	if (!pdev) {
@@ -794,7 +794,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 };
 };
 
 
 static const struct x86_cpu_id coretemp_ids[] = {
 static const struct x86_cpu_id coretemp_ids[] = {
-	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
 	{}
 	{}
 };
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);

+ 7 - 0
drivers/md/dm-thin.c

@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
+	r = dm_pool_commit_metadata(pool->pmd);
+	if (r) {
+		DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+		      __func__, r);
+		return r;
+	}
+
 	r = dm_pool_reserve_metadata_snap(pool->pmd);
 	r = dm_pool_reserve_metadata_snap(pool->pmd);
 	if (r)
 	if (r)
 		DMWARN("reserve_metadata_snap message failed.");
 		DMWARN("reserve_metadata_snap message failed.");

+ 5 - 3
drivers/md/md.c

@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
 			super_types[mddev->major_version].
 			super_types[mddev->major_version].
 				validate_super(mddev, rdev);
 				validate_super(mddev, rdev);
 		if ((info->state & (1<<MD_DISK_SYNC)) &&
 		if ((info->state & (1<<MD_DISK_SYNC)) &&
-		    (!test_bit(In_sync, &rdev->flags) ||
-		     rdev->raid_disk != info->raid_disk)) {
+		     rdev->raid_disk != info->raid_disk) {
 			/* This was a hot-add request, but events doesn't
 			/* This was a hot-add request, but events doesn't
 			 * match, so reject it.
 			 * match, so reject it.
 			 */
 			 */
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
 	thread->tsk = kthread_run(md_thread, thread,
 	thread->tsk = kthread_run(md_thread, thread,
 				  "%s_%s",
 				  "%s_%s",
 				  mdname(thread->mddev),
 				  mdname(thread->mddev),
-				  name ?: mddev->pers->name);
+				  name);
 	if (IS_ERR(thread->tsk)) {
 	if (IS_ERR(thread->tsk)) {
 		kfree(thread);
 		kfree(thread);
 		return NULL;
 		return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
 	int skipped = 0;
 	int skipped = 0;
 	struct md_rdev *rdev;
 	struct md_rdev *rdev;
 	char *desc;
 	char *desc;
+	struct blk_plug plug;
 
 
 	/* just incase thread restarts... */
 	/* just incase thread restarts... */
 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
 	}
 	}
 	mddev->curr_resync_completed = j;
 	mddev->curr_resync_completed = j;
 
 
+	blk_start_plug(&plug);
 	while (j < max_sectors) {
 	while (j < max_sectors) {
 		sector_t sectors;
 		sector_t sectors;
 
 
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
 	 * this also signals 'finished resyncing' to md_stop
 	 * this also signals 'finished resyncing' to md_stop
 	 */
 	 */
  out:
  out:
+	blk_finish_plug(&plug);
 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
 
 	/* tell personality that we are finished */
 	/* tell personality that we are finished */

+ 2 - 1
drivers/md/multipath.c

@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
 	}
 	}
 
 
 	{
 	{
-		mddev->thread = md_register_thread(multipathd, mddev, NULL);
+		mddev->thread = md_register_thread(multipathd, mddev,
+						   "multipath");
 		if (!mddev->thread) {
 		if (!mddev->thread) {
 			printk(KERN_ERR "multipath: couldn't allocate thread"
 			printk(KERN_ERR "multipath: couldn't allocate thread"
 				" for %s\n", mdname(mddev));
 				" for %s\n", mdname(mddev));

+ 31 - 23
drivers/md/persistent-data/dm-space-map-checker.c

@@ -8,6 +8,7 @@
 
 
 #include <linux/device-mapper.h>
 #include <linux/device-mapper.h>
 #include <linux/export.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
 
 	ca->nr = nr_blocks;
 	ca->nr = nr_blocks;
 	ca->nr_free = nr_blocks;
 	ca->nr_free = nr_blocks;
-	ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-	if (!ca->counts)
-		return -ENOMEM;
+
+	if (!nr_blocks)
+		ca->counts = NULL;
+	else {
+		ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+		if (!ca->counts)
+			return -ENOMEM;
+	}
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+static void ca_destroy(struct count_array *ca)
+{
+	vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
 {
 	int r;
 	int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
 {
 	dm_block_t nr_blocks = ca->nr + extra_blocks;
 	dm_block_t nr_blocks = ca->nr + extra_blocks;
-	uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+	uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
 	if (!counts)
 	if (!counts)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-	kfree(ca->counts);
+	if (ca->counts) {
+		memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+		ca_destroy(ca);
+	}
 	ca->nr = nr_blocks;
 	ca->nr = nr_blocks;
 	ca->nr_free += extra_blocks;
 	ca->nr_free += extra_blocks;
 	ca->counts = counts;
 	ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
 	return 0;
 	return 0;
 }
 }
 
 
-static void ca_destroy(struct count_array *ca)
-{
-	kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 /*----------------------------------------------------------------*/
 
 
 struct sm_checker {
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
 	int r;
 	int r;
 	struct sm_checker *smc;
 	struct sm_checker *smc;
 
 
-	if (!sm)
-		return NULL;
+	if (IS_ERR_OR_NULL(sm))
+		return ERR_PTR(-EINVAL);
 
 
 	smc = kmalloc(sizeof(*smc), GFP_KERNEL);
 	smc = kmalloc(sizeof(*smc), GFP_KERNEL);
 	if (!smc)
 	if (!smc)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 
 	memcpy(&smc->sm, &ops_, sizeof(smc->sm));
 	memcpy(&smc->sm, &ops_, sizeof(smc->sm));
 	r = ca_create(&smc->old_counts, sm);
 	r = ca_create(&smc->old_counts, sm);
 	if (r) {
 	if (r) {
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	r = ca_create(&smc->counts, sm);
 	r = ca_create(&smc->counts, sm);
 	if (r) {
 	if (r) {
 		ca_destroy(&smc->old_counts);
 		ca_destroy(&smc->old_counts);
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	smc->real_sm = sm;
 	smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
 		ca_destroy(&smc->counts);
 		ca_destroy(&smc->counts);
 		ca_destroy(&smc->old_counts);
 		ca_destroy(&smc->old_counts);
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	r = ca_commit(&smc->old_counts, &smc->counts);
 	r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
 		ca_destroy(&smc->counts);
 		ca_destroy(&smc->counts);
 		ca_destroy(&smc->old_counts);
 		ca_destroy(&smc->old_counts);
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	return &smc->sm;
 	return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
 	int r;
 	int r;
 	struct sm_checker *smc;
 	struct sm_checker *smc;
 
 
-	if (!sm)
-		return NULL;
+	if (IS_ERR_OR_NULL(sm))
+		return ERR_PTR(-EINVAL);
 
 
 	smc = kmalloc(sizeof(*smc), GFP_KERNEL);
 	smc = kmalloc(sizeof(*smc), GFP_KERNEL);
 	if (!smc)
 	if (!smc)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 
 	memcpy(&smc->sm, &ops_, sizeof(smc->sm));
 	memcpy(&smc->sm, &ops_, sizeof(smc->sm));
 	r = ca_create(&smc->old_counts, sm);
 	r = ca_create(&smc->old_counts, sm);
 	if (r) {
 	if (r) {
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	r = ca_create(&smc->counts, sm);
 	r = ca_create(&smc->counts, sm);
 	if (r) {
 	if (r) {
 		ca_destroy(&smc->old_counts);
 		ca_destroy(&smc->old_counts);
 		kfree(smc);
 		kfree(smc);
-		return NULL;
+		return ERR_PTR(r);
 	}
 	}
 
 
 	smc->real_sm = sm;
 	smc->real_sm = sm;

+ 10 - 1
drivers/md/persistent-data/dm-space-map-disk.c

@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
 				       dm_block_t nr_blocks)
 				       dm_block_t nr_blocks)
 {
 {
 	struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
 	struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-	return dm_sm_checker_create_fresh(sm);
+	struct dm_space_map *smc;
+
+	if (IS_ERR_OR_NULL(sm))
+		return sm;
+
+	smc = dm_sm_checker_create_fresh(sm);
+	if (IS_ERR(smc))
+		dm_sm_destroy(sm);
+
+	return smc;
 }
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
 

+ 9 - 2
drivers/md/persistent-data/dm-transaction-manager.c

@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
 {
+	if (!tm->is_clone)
+		wipe_shadow_table(tm);
+
 	kfree(tm);
 	kfree(tm);
 }
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
 		}
 		}
 
 
 		*sm = dm_sm_checker_create(inner);
 		*sm = dm_sm_checker_create(inner);
-		if (!*sm)
+		if (IS_ERR(*sm)) {
+			r = PTR_ERR(*sm);
 			goto bad2;
 			goto bad2;
+		}
 
 
 	} else {
 	} else {
 		r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
 		r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
 		}
 		}
 
 
 		*sm = dm_sm_checker_create(inner);
 		*sm = dm_sm_checker_create(inner);
-		if (!*sm)
+		if (IS_ERR(*sm)) {
+			r = PTR_ERR(*sm);
 			goto bad2;
 			goto bad2;
+		}
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 5 - 8
drivers/md/raid1.c

@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 		int bad_sectors;
 		int bad_sectors;
 
 
 		int disk = start_disk + i;
 		int disk = start_disk + i;
-		if (disk >= conf->raid_disks)
-			disk -= conf->raid_disks;
+		if (disk >= conf->raid_disks * 2)
+			disk -= conf->raid_disks * 2;
 
 
 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 		if (r1_bio->bios[disk] == IO_BLOCKED
 		if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
 	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
 	struct md_rdev *blocked_rdev;
 	struct md_rdev *blocked_rdev;
-	int plugged;
 	int first_clone;
 	int first_clone;
 	int sectors_handled;
 	int sectors_handled;
 	int max_sectors;
 	int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
 	 * the bad blocks.  Each set of writes gets it's own r1bio
 	 * the bad blocks.  Each set of writes gets it's own r1bio
 	 * with a set of bios attached.
 	 * with a set of bios attached.
 	 */
 	 */
-	plugged = mddev_check_plugged(mddev);
 
 
 	disks = conf->raid_disks * 2;
 	disks = conf->raid_disks * 2;
  retry_write:
  retry_write:
@@ -1191,6 +1189,8 @@ read_again:
 		bio_list_add(&conf->pending_bio_list, mbio);
 		bio_list_add(&conf->pending_bio_list, mbio);
 		conf->pending_count++;
 		conf->pending_count++;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
+		if (!mddev_check_plugged(mddev))
+			md_wakeup_thread(mddev->thread);
 	}
 	}
 	/* Mustn't call r1_bio_write_done before this next test,
 	/* Mustn't call r1_bio_write_done before this next test,
 	 * as it could result in the bio being freed.
 	 * as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
 
 
 	/* In case raid1d snuck in to freeze_array */
 	/* In case raid1d snuck in to freeze_array */
 	wake_up(&conf->wait_barrier);
 	wake_up(&conf->wait_barrier);
-
-	if (do_sync || !bitmap || !plugged)
-		md_wakeup_thread(mddev->thread);
 }
 }
 
 
 static void status(struct seq_file *seq, struct mddev *mddev)
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 		goto abort;
 		goto abort;
 	}
 	}
 	err = -ENOMEM;
 	err = -ENOMEM;
-	conf->thread = md_register_thread(raid1d, mddev, NULL);
+	conf->thread = md_register_thread(raid1d, mddev, "raid1");
 	if (!conf->thread) {
 	if (!conf->thread) {
 		printk(KERN_ERR
 		printk(KERN_ERR
 		       "md/raid1:%s: couldn't allocate thread\n",
 		       "md/raid1:%s: couldn't allocate thread\n",

+ 16 - 10
drivers/md/raid10.c

@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
 	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
 	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
 	unsigned long flags;
 	unsigned long flags;
 	struct md_rdev *blocked_rdev;
 	struct md_rdev *blocked_rdev;
-	int plugged;
 	int sectors_handled;
 	int sectors_handled;
 	int max_sectors;
 	int max_sectors;
 	int sectors;
 	int sectors;
@@ -1239,7 +1238,6 @@ read_again:
 	 * of r10_bios is recored in bio->bi_phys_segments just as with
 	 * of r10_bios is recored in bio->bi_phys_segments just as with
 	 * the read case.
 	 * the read case.
 	 */
 	 */
-	plugged = mddev_check_plugged(mddev);
 
 
 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
 	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
 	raid10_find_phys(conf, r10_bio);
 	raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
 		bio_list_add(&conf->pending_bio_list, mbio);
 		bio_list_add(&conf->pending_bio_list, mbio);
 		conf->pending_count++;
 		conf->pending_count++;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
+		if (!mddev_check_plugged(mddev))
+			md_wakeup_thread(mddev->thread);
 
 
 		if (!r10_bio->devs[i].repl_bio)
 		if (!r10_bio->devs[i].repl_bio)
 			continue;
 			continue;
@@ -1423,6 +1423,8 @@ retry_write:
 		bio_list_add(&conf->pending_bio_list, mbio);
 		bio_list_add(&conf->pending_bio_list, mbio);
 		conf->pending_count++;
 		conf->pending_count++;
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		spin_unlock_irqrestore(&conf->device_lock, flags);
+		if (!mddev_check_plugged(mddev))
+			md_wakeup_thread(mddev->thread);
 	}
 	}
 
 
 	/* Don't remove the bias on 'remaining' (one_write_done) until
 	/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
 
 
 	/* In case raid10d snuck in to freeze_array */
 	/* In case raid10d snuck in to freeze_array */
 	wake_up(&conf->wait_barrier);
 	wake_up(&conf->wait_barrier);
-
-	if (do_sync || !mddev->bitmap || !plugged)
-		md_wakeup_thread(mddev->thread);
 }
 }
 
 
 static void status(struct seq_file *seq, struct mddev *mddev)
 static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 			if (r10_sync_page_io(rdev,
 			if (r10_sync_page_io(rdev,
 					     r10_bio->devs[sl].addr +
 					     r10_bio->devs[sl].addr +
 					     sect,
 					     sect,
-					     s<<9, conf->tmppage, WRITE)
+					     s, conf->tmppage, WRITE)
 			    == 0) {
 			    == 0) {
 				/* Well, this device is dead */
 				/* Well, this device is dead */
 				printk(KERN_NOTICE
 				printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 			switch (r10_sync_page_io(rdev,
 			switch (r10_sync_page_io(rdev,
 					     r10_bio->devs[sl].addr +
 					     r10_bio->devs[sl].addr +
 					     sect,
 					     sect,
-					     s<<9, conf->tmppage,
+					     s, conf->tmppage,
 						 READ)) {
 						 READ)) {
 			case 0:
 			case 0:
 				/* Well, this device is dead */
 				/* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
 	slot = r10_bio->read_slot;
 	slot = r10_bio->read_slot;
 	printk_ratelimited(
 	printk_ratelimited(
 		KERN_ERR
 		KERN_ERR
-		"md/raid10:%s: %s: redirecting"
+		"md/raid10:%s: %s: redirecting "
 		"sector %llu to another mirror\n",
 		"sector %llu to another mirror\n",
 		mdname(mddev),
 		mdname(mddev),
 		bdevname(rdev->bdev, b),
 		bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
 	blk_start_plug(&plug);
 	blk_start_plug(&plug);
 	for (;;) {
 	for (;;) {
 
 
-		flush_pending_writes(conf);
+		if (atomic_read(&mddev->plug_cnt) == 0)
+			flush_pending_writes(conf);
 
 
 		spin_lock_irqsave(&conf->device_lock, flags);
 		spin_lock_irqsave(&conf->device_lock, flags);
 		if (list_empty(head)) {
 		if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
 			/* want to reconstruct this device */
 			/* want to reconstruct this device */
 			rb2 = r10_bio;
 			rb2 = r10_bio;
 			sect = raid10_find_virt(conf, sector_nr, i);
 			sect = raid10_find_virt(conf, sector_nr, i);
+			if (sect >= mddev->resync_max_sectors) {
+				/* last stripe is not complete - don't
+				 * try to recover this sector.
+				 */
+				continue;
+			}
 			/* Unless we are doing a full sync, or a replacement
 			/* Unless we are doing a full sync, or a replacement
 			 * we only need to recover the block if it is set in
 			 * we only need to recover the block if it is set in
 			 * the bitmap
 			 * the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
 	spin_lock_init(&conf->resync_lock);
 	spin_lock_init(&conf->resync_lock);
 	init_waitqueue_head(&conf->wait_barrier);
 	init_waitqueue_head(&conf->wait_barrier);
 
 
-	conf->thread = md_register_thread(raid10d, mddev, NULL);
+	conf->thread = md_register_thread(raid10d, mddev, "raid10");
 	if (!conf->thread)
 	if (!conf->thread)
 		goto out;
 		goto out;
 
 

+ 47 - 20
drivers/md/raid5.c

@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
 		BUG_ON(!list_empty(&sh->lru));
 		BUG_ON(!list_empty(&sh->lru));
 		BUG_ON(atomic_read(&conf->active_stripes)==0);
 		BUG_ON(atomic_read(&conf->active_stripes)==0);
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 		if (test_bit(STRIPE_HANDLE, &sh->state)) {
-			if (test_bit(STRIPE_DELAYED, &sh->state))
+			if (test_bit(STRIPE_DELAYED, &sh->state) &&
+			    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 				list_add_tail(&sh->lru, &conf->delayed_list);
 				list_add_tail(&sh->lru, &conf->delayed_list);
 			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 				   sh->bm_seq - conf->seq_write > 0)
 				   sh->bm_seq - conf->seq_write > 0)
 				list_add_tail(&sh->lru, &conf->bitmap_list);
 				list_add_tail(&sh->lru, &conf->bitmap_list);
 			else {
 			else {
+				clear_bit(STRIPE_DELAYED, &sh->state);
 				clear_bit(STRIPE_BIT_DELAY, &sh->state);
 				clear_bit(STRIPE_BIT_DELAY, &sh->state);
 				list_add_tail(&sh->lru, &conf->handle_list);
 				list_add_tail(&sh->lru, &conf->handle_list);
 			}
 			}
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 					 * a chance*/
 					 * a chance*/
 					md_check_recovery(conf->mddev);
 					md_check_recovery(conf->mddev);
 				}
 				}
+				/*
+				 * Because md_wait_for_blocked_rdev
+				 * will dec nr_pending, we must
+				 * increment it first.
+				 */
+				atomic_inc(&rdev->nr_pending);
 				md_wait_for_blocked_rdev(rdev, conf->mddev);
 				md_wait_for_blocked_rdev(rdev, conf->mddev);
 			} else {
 			} else {
 				/* Acknowledged bad block - skip the write */
 				/* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
 	} else {
 	} else {
 		const char *bdn = bdevname(rdev->bdev, b);
 		const char *bdn = bdevname(rdev->bdev, b);
 		int retry = 0;
 		int retry = 0;
+		int set_bad = 0;
 
 
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
 		atomic_inc(&rdev->read_errors);
 		atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
 				mdname(conf->mddev),
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				(unsigned long long)s,
 				bdn);
 				bdn);
-		else if (conf->mddev->degraded >= conf->max_degraded)
+		else if (conf->mddev->degraded >= conf->max_degraded) {
+			set_bad = 1;
 			printk_ratelimited(
 			printk_ratelimited(
 				KERN_WARNING
 				KERN_WARNING
 				"md/raid:%s: read error not correctable "
 				"md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
 				mdname(conf->mddev),
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				(unsigned long long)s,
 				bdn);
 				bdn);
-		else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
 			/* Oh, no!!! */
 			/* Oh, no!!! */
+			set_bad = 1;
 			printk_ratelimited(
 			printk_ratelimited(
 				KERN_WARNING
 				KERN_WARNING
 				"md/raid:%s: read error NOT corrected!! "
 				"md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
 				mdname(conf->mddev),
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				(unsigned long long)s,
 				bdn);
 				bdn);
-		else if (atomic_read(&rdev->read_errors)
+		} else if (atomic_read(&rdev->read_errors)
 			 > conf->max_nr_stripes)
 			 > conf->max_nr_stripes)
 			printk(KERN_WARNING
 			printk(KERN_WARNING
 			       "md/raid:%s: Too many read errors, failing device %s.\n",
 			       "md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
 		else {
 		else {
 			clear_bit(R5_ReadError, &sh->dev[i].flags);
 			clear_bit(R5_ReadError, &sh->dev[i].flags);
 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
-			md_error(conf->mddev, rdev);
+			if (!(set_bad
+			      && test_bit(In_sync, &rdev->flags)
+			      && rdev_set_badblocks(
+				      rdev, sh->sector, STRIPE_SECTORS, 0)))
+				md_error(conf->mddev, rdev);
 		}
 		}
 	}
 	}
 	rdev_dec_pending(rdev, conf->mddev);
 	rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
 
 
 finish:
 finish:
 	/* wait for this device to become unblocked */
 	/* wait for this device to become unblocked */
-	if (conf->mddev->external && unlikely(s.blocked_rdev))
-		md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+	if (unlikely(s.blocked_rdev)) {
+		if (conf->mddev->external)
+			md_wait_for_blocked_rdev(s.blocked_rdev,
+						 conf->mddev);
+		else
+			/* Internal metadata will immediately
+			 * be written by raid5d, so we don't
+			 * need to wait here.
+			 */
+			rdev_dec_pending(s.blocked_rdev,
+					 conf->mddev);
+	}
 
 
 	if (s.handle_bad_blocks)
 	if (s.handle_bad_blocks)
 		for (i = disks; i--; ) {
 		for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
 		raid_bio->bi_next = (void*)rdev;
 		raid_bio->bi_next = (void*)rdev;
 		align_bi->bi_bdev =  rdev->bdev;
 		align_bi->bi_bdev =  rdev->bdev;
 		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-		/* No reshape active, so we can trust rdev->data_offset */
-		align_bi->bi_sector += rdev->data_offset;
 
 
 		if (!bio_fits_rdev(align_bi) ||
 		if (!bio_fits_rdev(align_bi) ||
 		    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
 		    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
 			return 0;
 			return 0;
 		}
 		}
 
 
+		/* No reshape active, so we can trust rdev->data_offset */
+		align_bi->bi_sector += rdev->data_offset;
+
 		spin_lock_irq(&conf->device_lock);
 		spin_lock_irq(&conf->device_lock);
 		wait_event_lock_irq(conf->wait_for_stripe,
 		wait_event_lock_irq(conf->wait_for_stripe,
 				    conf->quiesce == 0,
 				    conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 	struct stripe_head *sh;
 	struct stripe_head *sh;
 	const int rw = bio_data_dir(bi);
 	const int rw = bio_data_dir(bi);
 	int remaining;
 	int remaining;
-	int plugged;
 
 
 	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
 	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bi);
 		md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 	bi->bi_next = NULL;
 	bi->bi_next = NULL;
 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
 
 
-	plugged = mddev_check_plugged(mddev);
 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
 	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
 		DEFINE_WAIT(w);
 		DEFINE_WAIT(w);
 		int previous;
 		int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 			if ((bi->bi_rw & REQ_SYNC) &&
 			if ((bi->bi_rw & REQ_SYNC) &&
 			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 				atomic_inc(&conf->preread_active_stripes);
 				atomic_inc(&conf->preread_active_stripes);
+			mddev_check_plugged(mddev);
 			release_stripe(sh);
 			release_stripe(sh);
 		} else {
 		} else {
 			/* cannot get stripe for read-ahead, just give-up */
 			/* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 			finish_wait(&conf->wait_for_overlap, &w);
 			finish_wait(&conf->wait_for_overlap, &w);
 			break;
 			break;
 		}
 		}
-			
 	}
 	}
-	if (!plugged)
-		md_wakeup_thread(mddev->thread);
 
 
 	spin_lock_irq(&conf->device_lock);
 	spin_lock_irq(&conf->device_lock);
 	remaining = raid5_dec_bi_phys_segments(bi);
 	remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 	int raid_disk, memory, max_disks;
 	int raid_disk, memory, max_disks;
 	struct md_rdev *rdev;
 	struct md_rdev *rdev;
 	struct disk_info *disk;
 	struct disk_info *disk;
+	char pers_name[6];
 
 
 	if (mddev->new_level != 5
 	if (mddev->new_level != 5
 	    && mddev->new_level != 4
 	    && mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
 		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
 		       mdname(mddev), memory);
 		       mdname(mddev), memory);
 
 
-	conf->thread = md_register_thread(raid5d, mddev, NULL);
+	sprintf(pers_name, "raid%d", mddev->new_level);
+	conf->thread = md_register_thread(raid5d, mddev, pers_name);
 	if (!conf->thread) {
 	if (!conf->thread) {
 		printk(KERN_ERR
 		printk(KERN_ERR
 		       "md/raid:%s: couldn't allocate thread.\n",
 		       "md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 	if (rdev->saved_raid_disk >= 0 &&
 	if (rdev->saved_raid_disk >= 0 &&
 	    rdev->saved_raid_disk >= first &&
 	    rdev->saved_raid_disk >= first &&
 	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
 	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
-		disk = rdev->saved_raid_disk;
-	else
-		disk = first;
-	for ( ; disk <= last ; disk++) {
+		first = rdev->saved_raid_disk;
+
+	for (disk = first; disk <= last; disk++) {
 		p = conf->disks + disk;
 		p = conf->disks + disk;
 		if (p->rdev == NULL) {
 		if (p->rdev == NULL) {
 			clear_bit(In_sync, &rdev->flags);
 			clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 			if (rdev->saved_raid_disk != disk)
 			if (rdev->saved_raid_disk != disk)
 				conf->fullsync = 1;
 				conf->fullsync = 1;
 			rcu_assign_pointer(p->rdev, rdev);
 			rcu_assign_pointer(p->rdev, rdev);
-			break;
+			goto out;
 		}
 		}
+	}
+	for (disk = first; disk <= last; disk++) {
+		p = conf->disks + disk;
 		if (test_bit(WantReplacement, &p->rdev->flags) &&
 		if (test_bit(WantReplacement, &p->rdev->flags) &&
 		    p->replacement == NULL) {
 		    p->replacement == NULL) {
 			clear_bit(In_sync, &rdev->flags);
 			clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 			break;
 			break;
 		}
 		}
 	}
 	}
+out:
 	print_raid5_conf(conf);
 	print_raid5_conf(conf);
 	return err;
 	return err;
 }
 }

+ 1 - 0
drivers/net/ethernet/intel/e1000e/defines.h

@@ -103,6 +103,7 @@
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
 #define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
 
 

+ 14 - 61
drivers/net/ethernet/intel/e1000e/netdev.c

@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
  * @sk_buff: socket buffer with received data
  * @sk_buff: socket buffer with received data
  **/
  **/
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
-			      __le16 csum, struct sk_buff *skb)
+			      struct sk_buff *skb)
 {
 {
 	u16 status = (u16)status_err;
 	u16 status = (u16)status_err;
 	u8 errors = (u8)(status_err >> 24);
 	u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 	if (status & E1000_RXD_STAT_IXSM)
 	if (status & E1000_RXD_STAT_IXSM)
 		return;
 		return;
 
 
-	/* TCP/UDP checksum error bit is set */
-	if (errors & E1000_RXD_ERR_TCPE) {
+	/* TCP/UDP checksum error bit or IP checksum error bit is set */
+	if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
 		/* let the stack verify checksum errors */
 		/* let the stack verify checksum errors */
 		adapter->hw_csum_err++;
 		adapter->hw_csum_err++;
 		return;
 		return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 		return;
 		return;
 
 
 	/* It must be a TCP or UDP packet with a valid checksum */
 	/* It must be a TCP or UDP packet with a valid checksum */
-	if (status & E1000_RXD_STAT_TCPCS) {
-		/* TCP checksum is good */
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		/*
-		 * IP fragment with UDP payload
-		 * Hardware complements the payload checksum, so we undo it
-		 * and then put the value in host order for further stack use.
-		 */
-		__sum16 sum = (__force __sum16)swab16((__force u16)csum);
-		skb->csum = csum_unfold(~sum);
-		skb->ip_summed = CHECKSUM_COMPLETE;
-	}
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
 	adapter->hw_csum_good++;
 	adapter->hw_csum_good++;
 }
 }
 
 
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 		skb_put(skb, length);
 		skb_put(skb, length);
 
 
 		/* Receive Checksum Offload */
 		/* Receive Checksum Offload */
-		e1000_rx_checksum(adapter, staterr,
-				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+		e1000_rx_checksum(adapter, staterr, skb);
 
 
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
 
@@ -1341,8 +1328,7 @@ copydone:
 		total_rx_bytes += skb->len;
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 		total_rx_packets++;
 
 
-		e1000_rx_checksum(adapter, staterr,
-				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+		e1000_rx_checksum(adapter, staterr, skb);
 
 
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
 
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 			}
 			}
 		}
 		}
 
 
-		/* Receive Checksum Offload XXX recompute due to CRC strip? */
-		e1000_rx_checksum(adapter, staterr,
-				  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter, staterr, skb);
 
 
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 
 
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
 
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	rxcsum = er32(RXCSUM);
 	rxcsum = er32(RXCSUM);
-	if (adapter->netdev->features & NETIF_F_RXCSUM) {
+	if (adapter->netdev->features & NETIF_F_RXCSUM)
 		rxcsum |= E1000_RXCSUM_TUOFL;
 		rxcsum |= E1000_RXCSUM_TUOFL;
-
-		/*
-		 * IPv4 payload checksum for UDP fragments must be
-		 * used in conjunction with packet-split.
-		 */
-		if (adapter->rx_ps_pages)
-			rxcsum |= E1000_RXCSUM_IPPCSE;
-	} else {
+	else
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
 		rxcsum &= ~E1000_RXCSUM_TUOFL;
-		/* no need to clear IPPCSE as it defaults to 0 */
-	}
 	ew32(RXCSUM, rxcsum);
 	ew32(RXCSUM, rxcsum);
 
 
 	if (adapter->hw.mac.type == e1000_pch2lan) {
 	if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
 
 	/* Jumbo frame support */
 	/* Jumbo frame support */
-	if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
-		if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
-			e_err("Jumbo Frames not supported.\n");
-			return -EINVAL;
-		}
-
-		/*
-		 * IP payload checksum (enabled with jumbos/packet-split when
-		 * Rx checksum is enabled) and generation of RSS hash is
-		 * mutually exclusive in the hardware.
-		 */
-		if ((netdev->features & NETIF_F_RXCSUM) &&
-		    (netdev->features & NETIF_F_RXHASH)) {
-			e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled.  Disable one of the receive offload features before enabling jumbos.\n");
-			return -EINVAL;
-		}
+	if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+	    !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+		e_err("Jumbo Frames not supported.\n");
+		return -EINVAL;
 	}
 	}
 
 
 	/* Supported frame sizes */
 	/* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
 			 NETIF_F_RXALL)))
 			 NETIF_F_RXALL)))
 		return 0;
 		return 0;
 
 
-	/*
-	 * IP payload checksum (enabled with jumbos/packet-split when Rx
-	 * checksum is enabled) and generation of RSS hash is mutually
-	 * exclusive in the hardware.
-	 */
-	if (adapter->rx_ps_pages &&
-	    (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
-		e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames.  Disable jumbos or enable only one of the receive offload features.\n");
-		return -EINVAL;
-	}
-
 	if (changed & NETIF_F_RXFCS) {
 	if (changed & NETIF_F_RXFCS) {
 		if (features & NETIF_F_RXFCS) {
 		if (features & NETIF_F_RXFCS) {
 			adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
 			adapter->flags2 &= ~FLAG2_CRC_STRIPPING;

+ 18 - 11
drivers/net/ethernet/intel/igbvf/ethtool.c

@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	struct igbvf_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_hw *hw = &adapter->hw;
 
 
-	if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
-	    ((ec->rx_coalesce_usecs > 3) &&
-	     (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
-	    (ec->rx_coalesce_usecs == 2))
-		return -EINVAL;
-
-	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+	if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+	     (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+		adapter->current_itr = ec->rx_coalesce_usecs << 2;
+		adapter->requested_itr = 1000000000 /
+					(adapter->current_itr * 256);
+	} else if ((ec->rx_coalesce_usecs == 3) ||
+		   (ec->rx_coalesce_usecs == 2)) {
 		adapter->current_itr = IGBVF_START_ITR;
 		adapter->current_itr = IGBVF_START_ITR;
 		adapter->requested_itr = ec->rx_coalesce_usecs;
 		adapter->requested_itr = ec->rx_coalesce_usecs;
-	} else {
-		adapter->current_itr = ec->rx_coalesce_usecs << 2;
+	} else if (ec->rx_coalesce_usecs == 0) {
+		/*
+		 * The user's desire is to turn off interrupt throttling
+		 * altogether, but due to HW limitations, we can't do that.
+		 * Instead we set a very small value in EITR, which would
+		 * allow ~967k interrupts per second, but allow the adapter's
+		 * internal clocking to still function properly.
+		 */
+		adapter->current_itr = 4;
 		adapter->requested_itr = 1000000000 /
 		adapter->requested_itr = 1000000000 /
 					(adapter->current_itr * 256);
 					(adapter->current_itr * 256);
-	}
+	} else
+		return -EINVAL;
 
 
 	writel(adapter->current_itr,
 	writel(adapter->current_itr,
 	       hw->hw_addr + adapter->rx_ring->itr_register);
 	       hw->hw_addr + adapter->rx_ring->itr_register);

+ 1 - 1
drivers/oprofile/oprofile_perf.c

@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
 static DEFINE_MUTEX(oprofile_perf_mutex);
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 
 static struct op_counter_config *counter_config;
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
 static int num_counters;
 static int num_counters;
 
 
 /*
 /*

+ 20 - 15
fs/splice.c

@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  * descriptions.
  */
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
 {
-	if (pipe->buffers <= PIPE_DEF_BUFFERS)
+	unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+	spd->nr_pages_max = buffers;
+	if (buffers <= PIPE_DEF_BUFFERS)
 		return 0;
 		return 0;
 
 
-	spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-	spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+	spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+	spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
 
 	if (spd->pages && spd->partial)
 	if (spd->pages && spd->partial)
 		return 0;
 		return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-		       struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
 {
-	if (pipe->buffers <= PIPE_DEF_BUFFERS)
+	if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
 		return;
 		return;
 
 
 	kfree(spd->pages);
 	kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.partial = partial,
 		.partial = partial,
+		.nr_pages_max = PIPE_DEF_BUFFERS,
 		.flags = flags,
 		.flags = flags,
 		.ops = &page_cache_pipe_buf_ops,
 		.ops = &page_cache_pipe_buf_ops,
 		.spd_release = spd_release_page,
 		.spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	loff = *ppos & ~PAGE_CACHE_MASK;
 	loff = *ppos & ~PAGE_CACHE_MASK;
 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	nr_pages = min(req_pages, pipe->buffers);
+	nr_pages = min(req_pages, spd.nr_pages_max);
 
 
 	/*
 	/*
 	 * Lookup the (hopefully) full range of pages we need.
 	 * Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
 	if (spd.nr_pages)
 	if (spd.nr_pages)
 		error = splice_to_pipe(pipe, &spd);
 		error = splice_to_pipe(pipe, &spd);
 
 
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 	return error;
 	return error;
 }
 }
 
 
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.partial = partial,
 		.partial = partial,
+		.nr_pages_max = PIPE_DEF_BUFFERS,
 		.flags = flags,
 		.flags = flags,
 		.ops = &default_pipe_buf_ops,
 		.ops = &default_pipe_buf_ops,
 		.spd_release = spd_release_page,
 		.spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
 
 	res = -ENOMEM;
 	res = -ENOMEM;
 	vec = __vec;
 	vec = __vec;
-	if (pipe->buffers > PIPE_DEF_BUFFERS) {
-		vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+	if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+		vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
 		if (!vec)
 		if (!vec)
 			goto shrink_ret;
 			goto shrink_ret;
 	}
 	}
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 	offset = *ppos & ~PAGE_CACHE_MASK;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 	nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
 
-	for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+	for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
 		struct page *page;
 		struct page *page;
 
 
 		page = alloc_page(GFP_USER);
 		page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
 shrink_ret:
 	if (vec != __vec)
 	if (vec != __vec)
 		kfree(vec);
 		kfree(vec);
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 	return res;
 	return res;
 
 
 err:
 err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.partial = partial,
 		.partial = partial,
+		.nr_pages_max = PIPE_DEF_BUFFERS,
 		.flags = flags,
 		.flags = flags,
 		.ops = &user_page_pipe_buf_ops,
 		.ops = &user_page_pipe_buf_ops,
 		.spd_release = spd_release_page,
 		.spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
 
 	spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
 	spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
 					    spd.partial, false,
 					    spd.partial, false,
-					    pipe->buffers);
+					    spd.nr_pages_max);
 	if (spd.nr_pages <= 0)
 	if (spd.nr_pages <= 0)
 		ret = spd.nr_pages;
 		ret = spd.nr_pages;
 	else
 	else
 		ret = splice_to_pipe(pipe, &spd);
 		ret = splice_to_pipe(pipe, &spd);
 
 
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 	return ret;
 	return ret;
 }
 }
 
 

+ 0 - 1
include/linux/blkdev.h

@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
 extern void blk_complete_request(struct request *);
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
 extern void blk_abort_request(struct request *);
 extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
 extern void blk_unprep_request(struct request *);
 extern void blk_unprep_request(struct request *);
 
 
 /*
 /*

+ 0 - 2
include/linux/irq.h

@@ -301,8 +301,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_pm_shutdown:	function called from core code on shutdown once per chip
  * @irq_pm_shutdown:	function called from core code on shutdown once per chip
  * @irq_print_chip:	optional to print special chip info in show_interrupts
  * @irq_print_chip:	optional to print special chip info in show_interrupts
  * @flags:		chip specific flags
  * @flags:		chip specific flags
- *
- * @release:		release function solely used by UML
  */
  */
 struct irq_chip {
 struct irq_chip {
 	const char	*name;
 	const char	*name;

+ 4 - 4
include/linux/splice.h

@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
 struct splice_pipe_desc {
 	struct page **pages;		/* page map */
 	struct page **pages;		/* page map */
 	struct partial_page *partial;	/* pages[] may not be contig */
 	struct partial_page *partial;	/* pages[] may not be contig */
-	int nr_pages;			/* number of pages in map */
+	int nr_pages;			/* number of populated pages in map */
+	unsigned int nr_pages_max;	/* pages[] & partial[] arrays size */
 	unsigned int flags;		/* splice flags */
 	unsigned int flags;		/* splice flags */
 	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
 	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
 	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
 	void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
 /*
  * for dynamic pipe sizing
  * for dynamic pipe sizing
  */
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-				struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;

+ 4 - 0
include/net/sctp/structs.h

@@ -912,6 +912,9 @@ struct sctp_transport {
 		/* Is this structure kfree()able? */
 		/* Is this structure kfree()able? */
 		malloced:1;
 		malloced:1;
 
 
+	/* Has this transport moved the ctsn since we last sacked */
+	__u32 sack_generation;
+
 	struct flowi fl;
 	struct flowi fl;
 
 
 	/* This is the peer's IP address and port. */
 	/* This is the peer's IP address and port. */
@@ -1584,6 +1587,7 @@ struct sctp_association {
 		 */
 		 */
 		__u8    sack_needed;     /* Do we need to sack the peer? */
 		__u8    sack_needed;     /* Do we need to sack the peer? */
 		__u32	sack_cnt;
 		__u32	sack_cnt;
+		__u32	sack_generation;
 
 
 		/* These are capabilities which our peer advertised.  */
 		/* These are capabilities which our peer advertised.  */
 		__u8	ecn_capable:1,	    /* Can peer do ECN? */
 		__u8	ecn_capable:1,	    /* Can peer do ECN? */

+ 2 - 1
include/net/sctp/tsnmap.h

@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
 
 
 /* Mark this TSN as seen.  */
 /* Mark this TSN as seen.  */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+		     struct sctp_transport *trans);
 
 
 /* Mark this TSN and all lower as seen. */
 /* Mark this TSN and all lower as seen. */
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);

+ 214 - 85
kernel/printk.c

@@ -193,12 +193,19 @@ static int console_may_schedule;
  * separated by ',', and find the message after the ';' character.
  * separated by ',', and find the message after the ';' character.
  */
  */
 
 
+enum log_flags {
+	LOG_DEFAULT = 0,
+	LOG_NOCONS = 1,		/* already flushed, do not print to console */
+};
+
 struct log {
 struct log {
 	u64 ts_nsec;		/* timestamp in nanoseconds */
 	u64 ts_nsec;		/* timestamp in nanoseconds */
 	u16 len;		/* length of entire record */
 	u16 len;		/* length of entire record */
 	u16 text_len;		/* length of text buffer */
 	u16 text_len;		/* length of text buffer */
 	u16 dict_len;		/* length of dictionary buffer */
 	u16 dict_len;		/* length of dictionary buffer */
-	u16 level;		/* syslog level + facility */
+	u8 facility;		/* syslog facility */
+	u8 flags:5;		/* internal record flags */
+	u8 level:3;		/* syslog level */
 };
 };
 
 
 /*
 /*
@@ -286,6 +293,7 @@ static u32 log_next(u32 idx)
 
 
 /* insert record into the buffer, discard old ones, update heads */
 /* insert record into the buffer, discard old ones, update heads */
 static void log_store(int facility, int level,
 static void log_store(int facility, int level,
+		      enum log_flags flags, u64 ts_nsec,
 		      const char *dict, u16 dict_len,
 		      const char *dict, u16 dict_len,
 		      const char *text, u16 text_len)
 		      const char *text, u16 text_len)
 {
 {
@@ -329,8 +337,13 @@ static void log_store(int facility, int level,
 	msg->text_len = text_len;
 	msg->text_len = text_len;
 	memcpy(log_dict(msg), dict, dict_len);
 	memcpy(log_dict(msg), dict, dict_len);
 	msg->dict_len = dict_len;
 	msg->dict_len = dict_len;
-	msg->level = (facility << 3) | (level & 7);
-	msg->ts_nsec = local_clock();
+	msg->facility = facility;
+	msg->level = level & 7;
+	msg->flags = flags & 0x1f;
+	if (ts_nsec > 0)
+		msg->ts_nsec = ts_nsec;
+	else
+		msg->ts_nsec = local_clock();
 	memset(log_dict(msg) + dict_len, 0, pad_len);
 	memset(log_dict(msg) + dict_len, 0, pad_len);
 	msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 	msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
 
 
@@ -446,7 +459,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 	ts_usec = msg->ts_nsec;
 	ts_usec = msg->ts_nsec;
 	do_div(ts_usec, 1000);
 	do_div(ts_usec, 1000);
 	len = sprintf(user->buf, "%u,%llu,%llu;",
 	len = sprintf(user->buf, "%u,%llu,%llu;",
-		      msg->level, user->seq, ts_usec);
+		      (msg->facility << 3) | msg->level, user->seq, ts_usec);
 
 
 	/* escape non-printable characters */
 	/* escape non-printable characters */
 	for (i = 0; i < msg->text_len; i++) {
 	for (i = 0; i < msg->text_len; i++) {
@@ -787,6 +800,21 @@ static bool printk_time;
 #endif
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
 
+static size_t print_time(u64 ts, char *buf)
+{
+	unsigned long rem_nsec;
+
+	if (!printk_time)
+		return 0;
+
+	if (!buf)
+		return 15;
+
+	rem_nsec = do_div(ts, 1000000000);
+	return sprintf(buf, "[%5lu.%06lu] ",
+		       (unsigned long)ts, rem_nsec / 1000);
+}
+
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 {
 {
 	size_t len = 0;
 	size_t len = 0;
@@ -803,18 +831,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
 		}
 		}
 	}
 	}
 
 
-	if (printk_time) {
-		if (buf) {
-			unsigned long long ts = msg->ts_nsec;
-			unsigned long rem_nsec = do_div(ts, 1000000000);
-
-			len += sprintf(buf + len, "[%5lu.%06lu] ",
-					 (unsigned long) ts, rem_nsec / 1000);
-		} else {
-			len += 15;
-		}
-	}
-
+	len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
 	return len;
 	return len;
 }
 }
 
 
@@ -862,28 +879,49 @@ static int syslog_print(char __user *buf, int size)
 {
 {
 	char *text;
 	char *text;
 	struct log *msg;
 	struct log *msg;
-	int len;
+	int len = 0;
 
 
 	text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
 	text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
 	if (!text)
 	if (!text)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	raw_spin_lock_irq(&logbuf_lock);
-	if (syslog_seq < log_first_seq) {
-		/* messages are gone, move to first one */
-		syslog_seq = log_first_seq;
-		syslog_idx = log_first_idx;
-	}
-	msg = log_from_idx(syslog_idx);
-	len = msg_print_text(msg, true, text, LOG_LINE_MAX);
-	syslog_idx = log_next(syslog_idx);
-	syslog_seq++;
-	raw_spin_unlock_irq(&logbuf_lock);
+	while (size > 0) {
+		size_t n;
 
 
-	if (len > size)
-		len = -EINVAL;
-	else if (len > 0 && copy_to_user(buf, text, len))
-		len = -EFAULT;
+		raw_spin_lock_irq(&logbuf_lock);
+		if (syslog_seq < log_first_seq) {
+			/* messages are gone, move to first one */
+			syslog_seq = log_first_seq;
+			syslog_idx = log_first_idx;
+		}
+		if (syslog_seq == log_next_seq) {
+			raw_spin_unlock_irq(&logbuf_lock);
+			break;
+		}
+		msg = log_from_idx(syslog_idx);
+		n = msg_print_text(msg, true, text, LOG_LINE_MAX);
+		if (n <= size) {
+			syslog_idx = log_next(syslog_idx);
+			syslog_seq++;
+		} else
+			n = 0;
+		raw_spin_unlock_irq(&logbuf_lock);
+
+		if (!n)
+			break;
+
+		len += n;
+		size -= n;
+		buf += n;
+		n = copy_to_user(buf - n, text, n);
+
+		if (n) {
+			len -= n;
+			if (!len)
+				len = -EFAULT;
+			break;
+		}
+	}
 
 
 	kfree(text);
 	kfree(text);
 	return len;
 	return len;
@@ -1040,6 +1078,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
 	/* Clear ring buffer */
 	/* Clear ring buffer */
 	case SYSLOG_ACTION_CLEAR:
 	case SYSLOG_ACTION_CLEAR:
 		syslog_print_all(NULL, 0, true);
 		syslog_print_all(NULL, 0, true);
+		break;
 	/* Disable logging to console */
 	/* Disable logging to console */
 	case SYSLOG_ACTION_CONSOLE_OFF:
 	case SYSLOG_ACTION_CONSOLE_OFF:
 		if (saved_console_loglevel == -1)
 		if (saved_console_loglevel == -1)
@@ -1272,15 +1311,92 @@ static inline void printk_delay(void)
 	}
 	}
 }
 }
 
 
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+	char buf[LOG_LINE_MAX];
+	size_t len;			/* length == 0 means unused buffer */
+	size_t cons;			/* bytes written to console */
+	struct task_struct *owner;	/* task of first print*/
+	u64 ts_nsec;			/* time of first print */
+	u8 level;			/* log level of first message */
+	u8 facility;			/* log level of first message */
+	bool flushed:1;			/* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+	if (cont.flushed)
+		return;
+	if (cont.len == 0)
+		return;
+
+	log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+		  NULL, 0, cont.buf, cont.len);
+
+	cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+	if (cont.len && cont.flushed)
+		return false;
+
+	if (cont.len + len > sizeof(cont.buf)) {
+		cont_flush();
+		return false;
+	}
+
+	if (!cont.len) {
+		cont.facility = facility;
+		cont.level = level;
+		cont.owner = current;
+		cont.ts_nsec = local_clock();
+		cont.cons = 0;
+		cont.flushed = false;
+	}
+
+	memcpy(cont.buf + cont.len, text, len);
+	cont.len += len;
+	return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+	size_t textlen = 0;
+	size_t len;
+
+	if (cont.cons == 0) {
+		textlen += print_time(cont.ts_nsec, text);
+		size -= textlen;
+	}
+
+	len = cont.len - cont.cons;
+	if (len > 0) {
+		if (len+1 > size)
+			len = size-1;
+		memcpy(text + textlen, cont.buf + cont.cons, len);
+		textlen += len;
+		cont.cons = cont.len;
+	}
+
+	if (cont.flushed) {
+		text[textlen++] = '\n';
+		/* got everything, release buffer */
+		cont.len = 0;
+	}
+	return textlen;
+}
+
 asmlinkage int vprintk_emit(int facility, int level,
 asmlinkage int vprintk_emit(int facility, int level,
 			    const char *dict, size_t dictlen,
 			    const char *dict, size_t dictlen,
 			    const char *fmt, va_list args)
 			    const char *fmt, va_list args)
 {
 {
 	static int recursion_bug;
 	static int recursion_bug;
-	static char cont_buf[LOG_LINE_MAX];
-	static size_t cont_len;
-	static int cont_level;
-	static struct task_struct *cont_task;
 	static char textbuf[LOG_LINE_MAX];
 	static char textbuf[LOG_LINE_MAX];
 	char *text = textbuf;
 	char *text = textbuf;
 	size_t text_len;
 	size_t text_len;
@@ -1326,7 +1442,8 @@ asmlinkage int vprintk_emit(int facility, int level,
 		recursion_bug = 0;
 		recursion_bug = 0;
 		printed_len += strlen(recursion_msg);
 		printed_len += strlen(recursion_msg);
 		/* emit KERN_CRIT message */
 		/* emit KERN_CRIT message */
-		log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+		log_store(0, 2, LOG_DEFAULT, 0,
+			  NULL, 0, recursion_msg, printed_len);
 	}
 	}
 
 
 	/*
 	/*
@@ -1364,55 +1481,37 @@ asmlinkage int vprintk_emit(int facility, int level,
 	}
 	}
 
 
 	if (!newline) {
 	if (!newline) {
-		if (cont_len && (prefix || cont_task != current)) {
-			/*
-			 * Flush earlier buffer, which is either from a
-			 * different thread, or when we got a new prefix.
-			 */
-			log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
-			cont_len = 0;
-		}
-
-		if (!cont_len) {
-			cont_level = level;
-			cont_task = current;
-		}
+		/*
+		 * Flush the conflicting buffer. An earlier newline was missing,
+		 * or another task also prints continuation lines.
+		 */
+		if (cont.len && (prefix || cont.owner != current))
+			cont_flush();
 
 
-		/* buffer or append to earlier buffer from the same thread */
-		if (cont_len + text_len > sizeof(cont_buf))
-			text_len = sizeof(cont_buf) - cont_len;
-		memcpy(cont_buf + cont_len, text, text_len);
-		cont_len += text_len;
+		/* buffer line if possible, otherwise store it right away */
+		if (!cont_add(facility, level, text, text_len))
+			log_store(facility, level, LOG_DEFAULT, 0,
+				  dict, dictlen, text, text_len);
 	} else {
 	} else {
-		if (cont_len && cont_task == current) {
-			if (prefix) {
-				/*
-				 * New prefix from the same thread; flush. We
-				 * either got no earlier newline, or we race
-				 * with an interrupt.
-				 */
-				log_store(facility, cont_level,
-					  NULL, 0, cont_buf, cont_len);
-				cont_len = 0;
-			}
+		bool stored = false;
 
 
-			/* append to the earlier buffer and flush */
-			if (cont_len + text_len > sizeof(cont_buf))
-				text_len = sizeof(cont_buf) - cont_len;
-			memcpy(cont_buf + cont_len, text, text_len);
-			cont_len += text_len;
-			log_store(facility, cont_level,
-				  NULL, 0, cont_buf, cont_len);
-			cont_len = 0;
-			cont_task = NULL;
-			printed_len = cont_len;
-		} else {
-			/* ordinary single and terminated line */
-			log_store(facility, level,
-				  dict, dictlen, text, text_len);
-			printed_len = text_len;
+		/*
+		 * If an earlier newline was missing and it was the same task,
+		 * either merge it with the current buffer and flush, or if
+		 * there was a race with interrupts (prefix == true) then just
+		 * flush it out and store this line separately.
+		 */
+		if (cont.len && cont.owner == current) {
+			if (!prefix)
+				stored = cont_add(facility, level, text, text_len);
+			cont_flush();
 		}
 		}
+
+		if (!stored)
+			log_store(facility, level, LOG_DEFAULT, 0,
+				  dict, dictlen, text, text_len);
 	}
 	}
+	printed_len += text_len;
 
 
 	/*
 	/*
 	 * Try to acquire and then immediately release the console semaphore.
 	 * Try to acquire and then immediately release the console semaphore.
@@ -1499,11 +1598,18 @@ EXPORT_SYMBOL(printk);
 #else
 #else
 
 
 #define LOG_LINE_MAX 0
 #define LOG_LINE_MAX 0
+static struct cont {
+	size_t len;
+	size_t cons;
+	u8 level;
+	bool flushed:1;
+} cont;
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static struct log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static u32 log_next(u32 idx) { return 0; }
 static void call_console_drivers(int level, const char *text, size_t len) {}
 static void call_console_drivers(int level, const char *text, size_t len) {}
 static size_t msg_print_text(const struct log *msg, bool syslog,
 static size_t msg_print_text(const struct log *msg, bool syslog,
 			     char *buf, size_t size) { return 0; }
 			     char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
 
 
 #endif /* CONFIG_PRINTK */
 #endif /* CONFIG_PRINTK */
 
 
@@ -1795,6 +1901,7 @@ static u32 console_idx;
  */
  */
 void console_unlock(void)
 void console_unlock(void)
 {
 {
+	static char text[LOG_LINE_MAX];
 	static u64 seen_seq;
 	static u64 seen_seq;
 	unsigned long flags;
 	unsigned long flags;
 	bool wake_klogd = false;
 	bool wake_klogd = false;
@@ -1807,10 +1914,23 @@ void console_unlock(void)
 
 
 	console_may_schedule = 0;
 	console_may_schedule = 0;
 
 
+	/* flush buffered message fragment immediately to console */
+	raw_spin_lock_irqsave(&logbuf_lock, flags);
+	if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+		size_t len;
+
+		len = cont_print_text(text, sizeof(text));
+		raw_spin_unlock(&logbuf_lock);
+		stop_critical_timings();
+		call_console_drivers(cont.level, text, len);
+		start_critical_timings();
+		local_irq_restore(flags);
+	} else
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
 again:
 again:
 	for (;;) {
 	for (;;) {
 		struct log *msg;
 		struct log *msg;
-		static char text[LOG_LINE_MAX];
 		size_t len;
 		size_t len;
 		int level;
 		int level;
 
 
@@ -1825,13 +1945,22 @@ again:
 			console_seq = log_first_seq;
 			console_seq = log_first_seq;
 			console_idx = log_first_idx;
 			console_idx = log_first_idx;
 		}
 		}
-
+skip:
 		if (console_seq == log_next_seq)
 		if (console_seq == log_next_seq)
 			break;
 			break;
 
 
 		msg = log_from_idx(console_idx);
 		msg = log_from_idx(console_idx);
-		level = msg->level & 7;
+		if (msg->flags & LOG_NOCONS) {
+			/*
+			 * Skip record we have buffered and already printed
+			 * directly to the console when we received it.
+			 */
+			console_idx = log_next(console_idx);
+			console_seq++;
+			goto skip;
+		}
 
 
+		level = msg->level;
 		len = msg_print_text(msg, false, text, sizeof(text));
 		len = msg_print_text(msg, false, text, sizeof(text));
 
 
 		console_idx = log_next(console_idx);
 		console_idx = log_next(console_idx);
@@ -2409,7 +2538,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
  * kmsg_dump_get_buffer - copy kmsg log lines
  * kmsg_dump_get_buffer - copy kmsg log lines
  * @dumper: registered kmsg dumper
  * @dumper: registered kmsg dumper
  * @syslog: include the "<4>" prefixes
  * @syslog: include the "<4>" prefixes
- * @line: buffer to copy the line to
+ * @buf: buffer to copy the line to
  * @size: maximum size of the buffer
  * @size: maximum size of the buffer
  * @len: length of line placed into buffer
  * @len: length of line placed into buffer
  *
  *

+ 7 - 7
kernel/rcutree.c

@@ -1530,7 +1530,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	struct rcu_head *next, *list, **tail;
 	struct rcu_head *next, *list, **tail;
-	int bl, count, count_lazy;
+	int bl, count, count_lazy, i;
 
 
 	/* If no callbacks are ready, just return.*/
 	/* If no callbacks are ready, just return.*/
 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1553,9 +1553,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
 	tail = rdp->nxttail[RCU_DONE_TAIL];
 	tail = rdp->nxttail[RCU_DONE_TAIL];
-	for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
-		if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
-			rdp->nxttail[count] = &rdp->nxtlist;
+	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+			rdp->nxttail[i] = &rdp->nxtlist;
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 
 
 	/* Invoke callbacks. */
 	/* Invoke callbacks. */
@@ -1583,9 +1583,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 	if (list != NULL) {
 	if (list != NULL) {
 		*tail = rdp->nxtlist;
 		*tail = rdp->nxtlist;
 		rdp->nxtlist = list;
 		rdp->nxtlist = list;
-		for (count = 0; count < RCU_NEXT_SIZE; count++)
-			if (&rdp->nxtlist == rdp->nxttail[count])
-				rdp->nxttail[count] = tail;
+		for (i = 0; i < RCU_NEXT_SIZE; i++)
+			if (&rdp->nxtlist == rdp->nxttail[i])
+				rdp->nxttail[i] = tail;
 			else
 			else
 				break;
 				break;
 	}
 	}

+ 3 - 2
kernel/relay.c

@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.nr_pages = 0,
 		.nr_pages = 0,
+		.nr_pages_max = PIPE_DEF_BUFFERS,
 		.partial = partial,
 		.partial = partial,
 		.flags = flags,
 		.flags = flags,
 		.ops = &relay_pipe_buf_ops,
 		.ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
                 ret += padding;
 
 
 out:
 out:
-	splice_shrink_spd(pipe, &spd);
-        return ret;
+	splice_shrink_spd(&spd);
+	return ret;
 }
 }
 
 
 static ssize_t relay_file_splice_read(struct file *in,
 static ssize_t relay_file_splice_read(struct file *in,

+ 4 - 2
kernel/trace/trace.c

@@ -3609,6 +3609,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 		.pages		= pages_def,
 		.pages		= pages_def,
 		.partial	= partial_def,
 		.partial	= partial_def,
 		.nr_pages	= 0, /* This gets updated below. */
 		.nr_pages	= 0, /* This gets updated below. */
+		.nr_pages_max	= PIPE_DEF_BUFFERS,
 		.flags		= flags,
 		.flags		= flags,
 		.ops		= &tracing_pipe_buf_ops,
 		.ops		= &tracing_pipe_buf_ops,
 		.spd_release	= tracing_spd_release_pipe,
 		.spd_release	= tracing_spd_release_pipe,
@@ -3680,7 +3681,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
 
 	ret = splice_to_pipe(pipe, &spd);
 	ret = splice_to_pipe(pipe, &spd);
 out:
 out:
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 	return ret;
 	return ret;
 
 
 out_err:
 out_err:
@@ -4231,6 +4232,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages		= pages_def,
 		.pages		= pages_def,
 		.partial	= partial_def,
 		.partial	= partial_def,
+		.nr_pages_max	= PIPE_DEF_BUFFERS,
 		.flags		= flags,
 		.flags		= flags,
 		.ops		= &buffer_pipe_buf_ops,
 		.ops		= &buffer_pipe_buf_ops,
 		.spd_release	= buffer_spd_release,
 		.spd_release	= buffer_spd_release,
@@ -4318,7 +4320,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 	}
 	}
 
 
 	ret = splice_to_pipe(pipe, &spd);
 	ret = splice_to_pipe(pipe, &spd);
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 out:
 out:
 	return ret;
 	return ret;
 }
 }

+ 2 - 1
mm/shmem.c

@@ -1594,6 +1594,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.partial = partial,
 		.partial = partial,
+		.nr_pages_max = PIPE_DEF_BUFFERS,
 		.flags = flags,
 		.flags = flags,
 		.ops = &page_cache_pipe_buf_ops,
 		.ops = &page_cache_pipe_buf_ops,
 		.spd_release = spd_release_page,
 		.spd_release = spd_release_page,
@@ -1682,7 +1683,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
 	if (spd.nr_pages)
 	if (spd.nr_pages)
 		error = splice_to_pipe(pipe, &spd);
 		error = splice_to_pipe(pipe, &spd);
 
 
-	splice_shrink_spd(pipe, &spd);
+	splice_shrink_spd(&spd);
 
 
 	if (error > 0) {
 	if (error > 0) {
 		*ppos += error;
 		*ppos += error;

+ 1 - 0
net/core/skbuff.c

@@ -1755,6 +1755,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
 	struct splice_pipe_desc spd = {
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.pages = pages,
 		.partial = partial,
 		.partial = partial,
+		.nr_pages_max = MAX_SKB_FRAGS,
 		.flags = flags,
 		.flags = flags,
 		.ops = &sock_pipe_buf_ops,
 		.ops = &sock_pipe_buf_ops,
 		.spd_release = sock_spd_release,
 		.spd_release = sock_spd_release,

+ 12 - 0
net/netfilter/ipset/ip_set_core.c

@@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
 	return 0;
 	return 0;
 }
 }
 
 
+static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	return -EOPNOTSUPP;
+}
+
 static int
 static int
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
 ip_set_create(struct sock *ctnl, struct sk_buff *skb,
 	      const struct nlmsghdr *nlh,
 	      const struct nlmsghdr *nlh,
@@ -1539,6 +1547,10 @@ nlmsg_failure:
 }
 }
 
 
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+	[IPSET_CMD_NONE]	= {
+		.call		= ip_set_none,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+	},
 	[IPSET_CMD_CREATE]	= {
 	[IPSET_CMD_CREATE]	= {
 		.call		= ip_set_create,
 		.call		= ip_set_create,
 		.attr_count	= IPSET_ATTR_CMD_MAX,
 		.attr_count	= IPSET_ATTR_CMD_MAX,

+ 4 - 28
net/netfilter/ipset/ip_set_hash_netiface.c

@@ -38,30 +38,6 @@ struct iface_node {
 
 
 #define iface_data(n)	(rb_entry(n, struct iface_node, node)->iface)
 #define iface_data(n)	(rb_entry(n, struct iface_node, node)->iface)
 
 
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
-	const long *a = (const long *)_a;
-	const long *b = (const long *)_b;
-
-	BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
-	if (a[0] != b[0])
-		return a[0] - b[0];
-	if (IFNAMSIZ > sizeof(long)) {
-		if (a[1] != b[1])
-			return a[1] - b[1];
-	}
-	if (IFNAMSIZ > 2 * sizeof(long)) {
-		if (a[2] != b[2])
-			return a[2] - b[2];
-	}
-	if (IFNAMSIZ > 3 * sizeof(long)) {
-		if (a[3] != b[3])
-			return a[3] - b[3];
-	}
-	return 0;
-}
-
 static void
 static void
 rbtree_destroy(struct rb_root *root)
 rbtree_destroy(struct rb_root *root)
 {
 {
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
 
 
 	while (n) {
 	while (n) {
 		const char *d = iface_data(n);
 		const char *d = iface_data(n);
-		long res = ifname_compare(*iface, d);
+		int res = strcmp(*iface, d);
 
 
 		if (res < 0)
 		if (res < 0)
 			n = n->rb_left;
 			n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
 
 
 	while (*n) {
 	while (*n) {
 		char *ifname = iface_data(*n);
 		char *ifname = iface_data(*n);
-		long res = ifname_compare(*iface, ifname);
+		int res = strcmp(*iface, ifname);
 
 
 		p = *n;
 		p = *n;
 		if (res < 0)
 		if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 	struct hash_netiface4_elem data = { .cidr = HOST_MASK };
 	struct hash_netiface4_elem data = { .cidr = HOST_MASK };
 	u32 ip = 0, ip_to, last;
 	u32 ip = 0, ip_to, last;
 	u32 timeout = h->timeout;
 	u32 timeout = h->timeout;
-	char iface[IFNAMSIZ] = {};
+	char iface[IFNAMSIZ];
 	int ret;
 	int ret;
 
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	ipset_adtfn adtfn = set->variant->adt[adt];
 	struct hash_netiface6_elem data = { .cidr = HOST_MASK };
 	struct hash_netiface6_elem data = { .cidr = HOST_MASK };
 	u32 timeout = h->timeout;
 	u32 timeout = h->timeout;
-	char iface[IFNAMSIZ] = {};
+	char iface[IFNAMSIZ];
 	int ret;
 	int ret;
 
 
 	if (unlikely(!tb[IPSET_ATTR_IP] ||
 	if (unlikely(!tb[IPSET_ATTR_IP] ||

+ 7 - 7
net/netfilter/ipvs/ip_vs_ctl.c

@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
 
 
 #ifdef CONFIG_IP_VS_IPV6
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
-				    const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+				     const struct in6_addr *addr)
 {
 {
-	struct rt6_info *rt;
 	struct flowi6 fl6 = {
 	struct flowi6 fl6 = {
 		.daddr = *addr,
 		.daddr = *addr,
 	};
 	};
+	struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+	bool is_local;
 
 
-	rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
-	if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
-		return 1;
+	is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
 
 
-	return 0;
+	dst_release(dst);
+	return is_local;
 }
 }
 #endif
 #endif
 
 

+ 3 - 1
net/netfilter/nfnetlink.c

@@ -178,8 +178,10 @@ replay:
 
 
 		err = nla_parse(cda, ss->cb[cb_id].attr_count,
 		err = nla_parse(cda, ss->cb[cb_id].attr_count,
 				attr, attrlen, ss->cb[cb_id].policy);
 				attr, attrlen, ss->cb[cb_id].policy);
-		if (err < 0)
+		if (err < 0) {
+			rcu_read_unlock();
 			return err;
 			return err;
+		}
 
 
 		if (nc->call_rcu) {
 		if (nc->call_rcu) {
 			err = nc->call_rcu(net->nfnl, skb, nlh,
 			err = nc->call_rcu(net->nfnl, skb, nlh,

+ 1 - 0
net/sctp/associola.c

@@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 	 */
 	 */
 	asoc->peer.sack_needed = 1;
 	asoc->peer.sack_needed = 1;
 	asoc->peer.sack_cnt = 0;
 	asoc->peer.sack_cnt = 0;
+	asoc->peer.sack_generation = 1;
 
 
 	/* Assume that the peer will tell us if he recognizes ASCONF
 	/* Assume that the peer will tell us if he recognizes ASCONF
 	 * as part of INIT exchange.
 	 * as part of INIT exchange.

+ 5 - 0
net/sctp/output.c

@@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
 		/* If the SACK timer is running, we have a pending SACK */
 		/* If the SACK timer is running, we have a pending SACK */
 		if (timer_pending(timer)) {
 		if (timer_pending(timer)) {
 			struct sctp_chunk *sack;
 			struct sctp_chunk *sack;
+
+			if (pkt->transport->sack_generation !=
+			    pkt->transport->asoc->peer.sack_generation)
+				return retval;
+
 			asoc->a_rwnd = asoc->rwnd;
 			asoc->a_rwnd = asoc->rwnd;
 			sack = sctp_make_sack(asoc);
 			sack = sctp_make_sack(asoc);
 			if (sack) {
 			if (sack) {

+ 16 - 0
net/sctp/sm_make_chunk.c

@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
 	int len;
 	int len;
 	__u32 ctsn;
 	__u32 ctsn;
 	__u16 num_gabs, num_dup_tsns;
 	__u16 num_gabs, num_dup_tsns;
+	struct sctp_association *aptr = (struct sctp_association *)asoc;
 	struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
 	struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
 	struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
 	struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+	struct sctp_transport *trans;
 
 
 	memset(gabs, 0, sizeof(gabs));
 	memset(gabs, 0, sizeof(gabs));
 	ctsn = sctp_tsnmap_get_ctsn(map);
 	ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
 		sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
 		sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
 				 sctp_tsnmap_get_dups(map));
 				 sctp_tsnmap_get_dups(map));
 
 
+	/* Once we have a sack generated, check to see what our sack
+	 * generation is, if its 0, reset the transports to 0, and reset
+	 * the association generation to 1
+	 *
+	 * The idea is that zero is never used as a valid generation for the
+	 * association so no transport will match after a wrap event like this,
+	 * Until the next sack
+	 */
+	if (++aptr->peer.sack_generation == 0) {
+		list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+				    transports)
+			trans->sack_generation = 0;
+		aptr->peer.sack_generation = 1;
+	}
 nodata:
 nodata:
 	return retval;
 	return retval;
 }
 }

Some files were not shown because too many files changed in this diff