Browse Source

Merge tag 'v3.7-rc7' into next/cleanup

Merging in mainline back to next/cleanup since it has collected a few
conflicts between fixes going upstream and some of the cleanup patches.
Git doesn't auto-resolve some of them, and they're mostly noise so let's
take care of it locally.

Conflicts are in:
	arch/arm/mach-omap2/omap_hwmod_44xx_data.c
	arch/arm/plat-omap/i2c.c
	drivers/video/omap2/dss/dss.c

Signed-off-by: Olof Johansson <olof@lixom.net>
Olof Johansson 12 years ago
parent
commit
0f9cb211ba
100 changed files with 1199 additions and 649 deletions
  1. 5 0
      CREDITS
  2. 4 0
      Documentation/cgroups/memory.txt
  3. 8 1
      Documentation/devicetree/bindings/net/mdio-gpio.txt
  4. 12 4
      Documentation/filesystems/proc.txt
  5. 1 1
      Documentation/networking/netdev-features.txt
  6. 2 2
      Documentation/networking/vxlan.txt
  7. 77 19
      MAINTAINERS
  8. 1 1
      Makefile
  9. 3 3
      arch/alpha/kernel/osf_sys.c
  10. 5 5
      arch/arm/boot/Makefile
  11. 2 2
      arch/arm/boot/dts/tegra30.dtsi
  12. 1 1
      arch/arm/mach-at91/at91rm9200_devices.c
  13. 1 1
      arch/arm/mach-at91/at91sam9260_devices.c
  14. 1 1
      arch/arm/mach-at91/at91sam9261_devices.c
  15. 1 1
      arch/arm/mach-at91/at91sam9263_devices.c
  16. 6 6
      arch/arm/mach-at91/at91sam9g45_devices.c
  17. 1 2
      arch/arm/mach-davinci/dm644x.c
  18. 3 0
      arch/arm/mach-exynos/dma.c
  19. 1 0
      arch/arm/mach-exynos/include/mach/map.h
  20. 2 1
      arch/arm/mach-highbank/system.c
  21. 1 1
      arch/arm/mach-imx/clk-gate2.c
  22. 1 1
      arch/arm/mach-imx/ehci-imx25.c
  23. 1 1
      arch/arm/mach-imx/ehci-imx35.c
  24. 5 0
      arch/arm/mach-omap2/board-igep0020.c
  25. 1 1
      arch/arm/mach-omap2/clockdomains44xx_data.c
  26. 20 14
      arch/arm/mach-omap2/common-board-devices.c
  27. 79 0
      arch/arm/mach-omap2/devices.c
  28. 49 14
      arch/arm/mach-omap2/omap_hwmod.c
  29. 6 0
      arch/arm/mach-omap2/omap_hwmod.h
  30. 36 0
      arch/arm/mach-omap2/omap_hwmod_44xx_data.c
  31. 2 1
      arch/arm/mach-omap2/twl-common.c
  32. 1 1
      arch/arm/mach-omap2/vc.c
  33. 7 1
      arch/arm/mach-pxa/hx4700.c
  34. 2 6
      arch/arm/mach-pxa/spitz_pm.c
  35. 1 1
      arch/arm/tools/Makefile
  36. 1 1
      arch/arm64/include/asm/io.h
  37. 4 2
      arch/arm64/include/asm/pgtable-hwdef.h
  38. 20 20
      arch/arm64/include/asm/pgtable.h
  39. 0 1
      arch/ia64/mm/init.c
  40. 3 3
      arch/m68k/include/asm/signal.h
  41. 1 0
      arch/mips/cavium-octeon/executive/cvmx-l2c.c
  42. 1 0
      arch/mips/fw/arc/misc.c
  43. 39 89
      arch/mips/include/asm/bitops.h
  44. 1 1
      arch/mips/include/asm/compat.h
  45. 1 0
      arch/mips/include/asm/io.h
  46. 75 132
      arch/mips/include/asm/irqflags.h
  47. 0 6
      arch/mips/include/asm/thread_info.h
  48. 20 6
      arch/mips/kernel/setup.c
  49. 3 2
      arch/mips/lib/Makefile
  50. 179 0
      arch/mips/lib/bitops.c
  51. 176 0
      arch/mips/lib/mips-atomic.c
  52. 2 1
      arch/mips/mti-malta/malta-platform.c
  53. 4 2
      arch/parisc/kernel/signal32.c
  54. 2 0
      arch/parisc/kernel/sys_parisc.c
  55. 6 0
      arch/powerpc/boot/dts/mpc5200b.dtsi
  56. 0 6
      arch/powerpc/boot/dts/o2d.dtsi
  57. 6 1
      arch/powerpc/boot/dts/pcm030.dts
  58. 5 4
      arch/powerpc/platforms/52xx/mpc52xx_pic.c
  59. 1 1
      arch/powerpc/platforms/pseries/eeh_pe.c
  60. 2 1
      arch/powerpc/platforms/pseries/msi.c
  61. 1 0
      arch/s390/Kconfig
  62. 1 1
      arch/s390/include/asm/compat.h
  63. 3 0
      arch/s390/include/asm/topology.h
  64. 2 2
      arch/s390/include/uapi/asm/ptrace.h
  65. 12 2
      arch/s390/kernel/compat_signal.c
  66. 12 2
      arch/s390/kernel/signal.c
  67. 4 2
      arch/s390/kernel/topology.c
  68. 2 3
      arch/s390/mm/gup.c
  69. 4 1
      arch/sparc/include/asm/prom.h
  70. 1 3
      arch/sparc/kernel/signal_64.c
  71. 6 1
      arch/unicore32/Kconfig
  72. 0 1
      arch/unicore32/include/asm/Kbuild
  73. 0 5
      arch/unicore32/include/asm/bug.h
  74. 1 1
      arch/unicore32/include/asm/cmpxchg.h
  75. 0 1
      arch/unicore32/include/asm/kvm_para.h
  76. 0 5
      arch/unicore32/include/asm/processor.h
  77. 1 75
      arch/unicore32/include/asm/ptrace.h
  78. 7 0
      arch/unicore32/include/uapi/asm/Kbuild
  79. 0 0
      arch/unicore32/include/uapi/asm/byteorder.h
  80. 90 0
      arch/unicore32/include/uapi/asm/ptrace.h
  81. 0 0
      arch/unicore32/include/uapi/asm/sigcontext.h
  82. 1 0
      arch/unicore32/include/uapi/asm/unistd.h
  83. 7 13
      arch/unicore32/kernel/entry.S
  84. 14 44
      arch/unicore32/kernel/process.c
  85. 6 0
      arch/unicore32/kernel/setup.h
  86. 0 63
      arch/unicore32/kernel/sys.c
  87. 27 10
      arch/unicore32/mm/fault.c
  88. 2 0
      arch/x86/boot/compressed/eboot.c
  89. 0 3
      arch/x86/boot/header.S
  90. 4 11
      arch/x86/include/asm/ptrace.h
  91. 14 0
      arch/x86/kernel/cpu/amd.c
  92. 1 1
      arch/x86/kernel/cpu/mcheck/mce_amd.c
  93. 18 13
      arch/x86/kernel/cpu/mcheck/mce_intel.c
  94. 7 7
      arch/x86/kernel/entry_64.S
  95. 6 2
      arch/x86/kernel/microcode_amd.c
  96. 30 0
      arch/x86/kernel/ptrace.c
  97. 3 0
      arch/x86/kvm/cpuid.h
  98. 7 4
      arch/x86/kvm/vmx.c
  99. 3 0
      arch/x86/kvm/x86.c
  100. 1 1
      arch/x86/mm/tlb.c

+ 5 - 0
CREDITS

@@ -1823,6 +1823,11 @@ S: Kattreinstr 38
 S: D-64295
 S: Germany
 
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
 N: Andi Kleen
 E: andi@firstfloor.org
 U: http://www.halobates.de

+ 4 - 0
Documentation/cgroups/memory.txt

@@ -466,6 +466,10 @@ Note:
 5.3 swappiness
 
 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Please note that unlike the global swappiness, memcg knob set to 0
+really prevents from any swapping even if there is a swap storage
+available. This might lead to memcg OOM killer if there are no file
+pages to reclaim.
 
 Following cgroups' swappiness can't be changed.
 - root cgroup (uses /proc/sys/vm/swappiness).

+ 8 - 1
Documentation/devicetree/bindings/net/mdio-gpio.txt

@@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order:
 
 MDC, MDIO.
 
+Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+node.
+
 Example:
 
-mdio {
+aliases {
+	mdio-gpio0 = <&mdio0>;
+};
+
+mdio0: mdio {
 	compatible = "virtual,mdio-gpio";
 	#address-cells = <1>;
 	#size-cells = <0>;

+ 12 - 4
Documentation/filesystems/proc.txt

@@ -33,7 +33,7 @@ Table of Contents
   2	Modifying System Parameters
 
   3	Per-Process Parameters
-  3.1	/proc/<pid>/oom_score_adj - Adjust the oom-killer
+  3.1	/proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer
 								score
   3.2	/proc/<pid>/oom_score - Display current oom-killer score
   3.3	/proc/<pid>/io - Display the IO accounting fields
@@ -1320,10 +1320,10 @@ of the kernel.
 CHAPTER 3: PER-PROCESS PARAMETERS
 ------------------------------------------------------------------------------
 
-3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score
+3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score
 --------------------------------------------------------------------------------
 
-This file can be used to adjust the badness heuristic used to select which
+These file can be used to adjust the badness heuristic used to select which
 process gets killed in out of memory conditions.
 
 The badness heuristic assigns a value to each candidate task ranging from 0
@@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least
 equivalent to discounting 50% of the task's allowed memory from being considered
 as scoring against the task.
 
+For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also
+be used to tune the badness score.  Its acceptable values range from -16
+(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
+(OOM_DISABLE) to disable oom killing entirely for that task.  Its value is
+scaled linearly with /proc/<pid>/oom_score_adj.
+
 The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
 value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
 requires CAP_SYS_RESOURCE.
@@ -1375,7 +1381,9 @@ minimal amount of work.
 -------------------------------------------------------------
 
 This file can be used to check the current score used by the oom-killer is for
-any given <pid>.
+any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
+process should be killed in an out-of-memory situation.
+
 
 3.3  /proc/<pid>/io - Display the IO accounting fields
 -------------------------------------------------------

+ 1 - 1
Documentation/networking/netdev-features.txt

@@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
 This requests that the NIC receive all possible frames, including errored
 frames (such as bad FCS, etc).  This can be helpful when sniffing a link with
 bad packets on it.  Some NICs may receive more packets if also put into normal
-PROMISC mdoe.
+PROMISC mode.

+ 2 - 2
Documentation/networking/vxlan.txt

@@ -32,7 +32,7 @@ no entry is in the forwarding table.
   # ip link delete vxlan0
 
 3. Show vxlan info
-  # ip -d show vxlan0
+  # ip -d link show vxlan0
 
 It is possible to create, destroy and display the vxlan
 forwarding table using the new bridge command.
@@ -41,7 +41,7 @@ forwarding table using the new bridge command.
   # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
 
 2. Delete forwarding table entry
-  # bridge fdb delete 00:17:42:8a:b4:05
+  # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
 
 3. Show forwarding table
   # bridge fdb show dev vxlan0

+ 77 - 19
MAINTAINERS

@@ -526,17 +526,17 @@ F:	drivers/video/geode/
 F:	arch/x86/include/asm/geode.h
 
 AMD IOMMU (AMD-VI)
-M:	Joerg Roedel <joerg.roedel@amd.com>
+M:	Joerg Roedel <joro@8bytes.org>
 L:	iommu@lists.linux-foundation.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S:	Supported
+S:	Maintained
 F:	drivers/iommu/amd_iommu*.[ch]
 F:	include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:	Andreas Herrmann <andreas.herrmann3@amd.com>
+M:	Andreas Herrmann <herrmann.der.user@googlemail.com>
 L:	amd64-microcode@amd64.org
-S:	Supported
+S:	Maintained
 F:	arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
@@ -840,6 +840,14 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:	arch/arm/mach-sa1100/jornada720.c
 F:	arch/arm/mach-sa1100/include/mach/jornada720.h
 
+ARM/IGEP MACHINE SUPPORT
+M:	Enric Balletbo i Serra <eballetbo@gmail.com>
+M:	Javier Martinez Canillas <javier@dowhile0.org>
+L:	linux-omap@vger.kernel.org
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm/mach-omap2/board-igep0020.c
+
 ARM/INCOME PXA270 SUPPORT
 M:	Marek Vasut <marek.vasut@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2707,10 +2715,10 @@ F:	include/linux/edac.h
 
 EDAC-AMD64
 M:	Doug Thompson <dougthompson@xmission.com>
-M:	Borislav Petkov <borislav.petkov@amd.com>
+M:	Borislav Petkov <bp@alien8.de>
 L:	linux-edac@vger.kernel.org
 W:	bluesmoke.sourceforge.net
-S:	Supported
+S:	Maintained
 F:	drivers/edac/amd64_edac*
 
 EDAC-E752X
@@ -3597,6 +3605,49 @@ F:	drivers/hid/hid-hyperv.c
 F:	drivers/net/hyperv/
 F:	drivers/staging/hv/
 
+I2C OVER PARALLEL PORT
+M:	Jean Delvare <khali@linux-fr.org>
+L:	linux-i2c@vger.kernel.org
+S:	Maintained
+F:	Documentation/i2c/busses/i2c-parport
+F:	Documentation/i2c/busses/i2c-parport-light
+F:	drivers/i2c/busses/i2c-parport.c
+F:	drivers/i2c/busses/i2c-parport-light.c
+
+I2C/SMBUS CONTROLLER DRIVERS FOR PC
+M:	Jean Delvare <khali@linux-fr.org>
+L:	linux-i2c@vger.kernel.org
+S:	Maintained
+F:	Documentation/i2c/busses/i2c-ali1535
+F:	Documentation/i2c/busses/i2c-ali1563
+F:	Documentation/i2c/busses/i2c-ali15x3
+F:	Documentation/i2c/busses/i2c-amd756
+F:	Documentation/i2c/busses/i2c-amd8111
+F:	Documentation/i2c/busses/i2c-i801
+F:	Documentation/i2c/busses/i2c-nforce2
+F:	Documentation/i2c/busses/i2c-piix4
+F:	Documentation/i2c/busses/i2c-sis5595
+F:	Documentation/i2c/busses/i2c-sis630
+F:	Documentation/i2c/busses/i2c-sis96x
+F:	Documentation/i2c/busses/i2c-via
+F:	Documentation/i2c/busses/i2c-viapro
+F:	drivers/i2c/busses/i2c-ali1535.c
+F:	drivers/i2c/busses/i2c-ali1563.c
+F:	drivers/i2c/busses/i2c-ali15x3.c
+F:	drivers/i2c/busses/i2c-amd756.c
+F:	drivers/i2c/busses/i2c-amd756-s4882.c
+F:	drivers/i2c/busses/i2c-amd8111.c
+F:	drivers/i2c/busses/i2c-i801.c
+F:	drivers/i2c/busses/i2c-isch.c
+F:	drivers/i2c/busses/i2c-nforce2.c
+F:	drivers/i2c/busses/i2c-nforce2-s4985.c
+F:	drivers/i2c/busses/i2c-piix4.c
+F:	drivers/i2c/busses/i2c-sis5595.c
+F:	drivers/i2c/busses/i2c-sis630.c
+F:	drivers/i2c/busses/i2c-sis96x.c
+F:	drivers/i2c/busses/i2c-via.c
+F:	drivers/i2c/busses/i2c-viapro.c
+
 I2C/SMBUS STUB DRIVER
 M:	"Mark M. Hoffman" <mhoffman@lightlink.com>
 L:	linux-i2c@vger.kernel.org
@@ -3604,9 +3655,8 @@ S:	Maintained
 F:	drivers/i2c/busses/i2c-stub.c
 
 I2C SUBSYSTEM
-M:	"Jean Delvare (PC drivers, core)" <khali@linux-fr.org>
+M:	Wolfram Sang <w.sang@pengutronix.de>
 M:	"Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
-M:	"Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
 L:	linux-i2c@vger.kernel.org
 W:	http://i2c.wiki.kernel.org/
 T:	quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
@@ -3617,6 +3667,13 @@ F:	drivers/i2c/
 F:	include/linux/i2c.h
 F:	include/linux/i2c-*.h
 
+I2C-TAOS-EVM DRIVER
+M:	Jean Delvare <khali@linux-fr.org>
+L:	linux-i2c@vger.kernel.org
+S:	Maintained
+F:	Documentation/i2c/busses/i2c-taos-evm
+F:	drivers/i2c/busses/i2c-taos-evm.c
+
 I2C-TINY-USB DRIVER
 M:	Till Harbaum <till@harbaum.org>
 L:	linux-i2c@vger.kernel.org
@@ -3703,7 +3760,7 @@ S:	Maintained
 F:	drivers/platform/x86/ideapad-laptop.c
 
 IDE/ATAPI DRIVERS
-M:	Borislav Petkov <petkovbb@gmail.com>
+M:	Borislav Petkov <bp@alien8.de>
 L:	linux-ide@vger.kernel.org
 S:	Maintained
 F:	Documentation/cdrom/ide-cd
@@ -4230,8 +4287,8 @@ F:	include/linux/lockd/
 F:	include/linux/sunrpc/
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:	Avi Kivity <avi@redhat.com>
 M:	Marcelo Tosatti <mtosatti@redhat.com>
+M:	Gleb Natapov <gleb@redhat.com>
 L:	kvm@vger.kernel.org
 W:	http://kvm.qumranet.com
 S:	Supported
@@ -5363,7 +5420,7 @@ S:	Maintained
 F:	sound/drivers/opl4/
 
 OPROFILE
-M:	Robert Richter <robert.richter@amd.com>
+M:	Robert Richter <rric@kernel.org>
 L:	oprofile-list@lists.sf.net
 S:	Maintained
 F:	arch/*/include/asm/oprofile*.h
@@ -7209,6 +7266,14 @@ L:	linux-xtensa@linux-xtensa.org
 S:	Maintained
 F:	arch/xtensa/
 
+THERMAL
+M:      Zhang Rui <rui.zhang@intel.com>
+L:      linux-pm@vger.kernel.org
+T:      git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+S:      Supported
+F:      drivers/thermal/
+F:      include/linux/thermal.h
+
 THINKPAD ACPI EXTRAS DRIVER
 M:	Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
 L:	ibm-acpi-devel@lists.sourceforge.net
@@ -7886,13 +7951,6 @@ M:	Roger Luethi <rl@hellgate.ch>
 S:	Maintained
 F:	drivers/net/ethernet/via/via-rhine.c
 
-VIAPRO SMBUS DRIVER
-M:	Jean Delvare <khali@linux-fr.org>
-L:	linux-i2c@vger.kernel.org
-S:	Maintained
-F:	Documentation/i2c/busses/i2c-viapro
-F:	drivers/i2c/busses/i2c-viapro.c
-
 VIA SD/MMC CARD CONTROLLER DRIVER
 M:	Bruce Chang <brucechang@via.com.tw>
 M:	Harald Welte <HaraldWelte@viatech.com>
@@ -8147,7 +8205,7 @@ F:	drivers/platform/x86
 
 X86 MCE INFRASTRUCTURE
 M:	Tony Luck <tony.luck@intel.com>
-M:	Borislav Petkov <bp@amd64.org>
+M:	Borislav Petkov <bp@alien8.de>
 L:	linux-edac@vger.kernel.org
 S:	Maintained
 F:	arch/x86/kernel/cpu/mcheck/*

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*

+ 3 - 3
arch/alpha/kernel/osf_sys.c

@@ -445,7 +445,7 @@ struct procfs_args {
  * unhappy with OSF UFS. [CHECKME]
  */
 static int
-osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
 {
 	int retval;
 	struct cdfs_args tmp;
@@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
 }
 
 static int
-osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
 {
 	int retval;
 	struct cdfs_args tmp;
@@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
 }
 
 static int
-osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
 {
 	struct procfs_args tmp;
 

+ 5 - 5
arch/arm/boot/Makefile

@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
 	$(call if_changed,objcopy)
-	$(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+	@$(kecho) '  Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
 
 $(obj)/Image $(obj)/zImage: FORCE
 	@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
 
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
-	$(kecho) '  Kernel: $@ is ready'
+	@$(kecho) '  Kernel: $@ is ready'
 
 $(obj)/compressed/vmlinux: $(obj)/Image FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
 $(obj)/zImage:	$(obj)/compressed/vmlinux FORCE
 	$(call if_changed,objcopy)
-	$(kecho) '  Kernel: $@ is ready'
+	@$(kecho) '  Kernel: $@ is ready'
 
 endif
 
@@ -90,7 +90,7 @@ fi
 $(obj)/uImage:	$(obj)/zImage FORCE
 	@$(check_for_multiple_loadaddr)
 	$(call if_changed,uimage)
-	$(kecho) '  Image $@ is ready'
+	@$(kecho) '  Image $@ is ready'
 
 $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/bootp $@
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
 
 $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
 	$(call if_changed,objcopy)
-	$(kecho) '  Kernel: $@ is ready'
+	@$(kecho) '  Kernel: $@ is ready'
 
 PHONY += initrd FORCE
 initrd:

+ 2 - 2
arch/arm/boot/dts/tegra30.dtsi

@@ -73,8 +73,8 @@
 
 	pinmux: pinmux {
 		compatible = "nvidia,tegra30-pinmux";
-		reg = <0x70000868 0xd0    /* Pad control registers */
-		       0x70003000 0x3e0>; /* Mux registers */
+		reg = <0x70000868 0xd4    /* Pad control registers */
+		       0x70003000 0x3e4>; /* Mux registers */
 	};
 
 	serial@70006000 {

+ 1 - 1
arch/arm/mach-at91/at91rm9200_devices.c

@@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
 	/* Enable overcurrent notification */
 	for (i = 0; i < data->ports; i++) {
-		if (data->overcurrent_pin[i])
+		if (gpio_is_valid(data->overcurrent_pin[i]))
 			at91_set_gpio_input(data->overcurrent_pin[i], 1);
 	}
 

+ 1 - 1
arch/arm/mach-at91/at91sam9260_devices.c

@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
 	/* Enable overcurrent notification */
 	for (i = 0; i < data->ports; i++) {
-		if (data->overcurrent_pin[i])
+		if (gpio_is_valid(data->overcurrent_pin[i]))
 			at91_set_gpio_input(data->overcurrent_pin[i], 1);
 	}
 

+ 1 - 1
arch/arm/mach-at91/at91sam9261_devices.c

@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
 	/* Enable overcurrent notification */
 	for (i = 0; i < data->ports; i++) {
-		if (data->overcurrent_pin[i])
+		if (gpio_is_valid(data->overcurrent_pin[i]))
 			at91_set_gpio_input(data->overcurrent_pin[i], 1);
 	}
 

+ 1 - 1
arch/arm/mach-at91/at91sam9263_devices.c

@@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
 
 	/* Enable overcurrent notification */
 	for (i = 0; i < data->ports; i++) {
-		if (data->overcurrent_pin[i])
+		if (gpio_is_valid(data->overcurrent_pin[i]))
 			at91_set_gpio_input(data->overcurrent_pin[i], 1);
 	}
 

+ 6 - 6
arch/arm/mach-at91/at91sam9g45_devices.c

@@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= AT91SAM9G45_ID_AESTDESSHA,
-		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.start	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+		.end	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
 		.flags	= IORESOURCE_IRQ,
 	},
 };
@@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= AT91SAM9G45_ID_AESTDESSHA,
-		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.start	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+		.end	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
 		.flags	= IORESOURCE_IRQ,
 	},
 };
@@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = {
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
-		.start	= AT91SAM9G45_ID_AESTDESSHA,
-		.end	= AT91SAM9G45_ID_AESTDESSHA,
+		.start	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+		.end	= NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
 		.flags	= IORESOURCE_IRQ,
 	},
 };

+ 1 - 2
arch/arm/mach-davinci/dm644x.c

@@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
 		break;
 	case VPBE_ENC_CUSTOM_TIMINGS:
 		if (pclock <= 27000000) {
-			v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
-			     DM644X_VPSS_DACCLKEN;
+			v |= DM644X_VPSS_DACCLKEN;
 			writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
 		} else {
 			/*

+ 3 - 0
arch/arm/mach-exynos/dma.c

@@ -275,6 +275,9 @@ static int __init exynos_dma_init(void)
 		exynos_pdma1_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4210_pdma1_peri);
 		exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+		if (samsung_rev() == EXYNOS4210_REV_0)
+			exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
 	} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
 		exynos_pdma0_pdata.nr_valid_peri =
 			ARRAY_SIZE(exynos4212_pdma0_peri);

+ 1 - 0
arch/arm/mach-exynos/include/mach/map.h

@@ -90,6 +90,7 @@
 
 #define EXYNOS4_PA_MDMA0		0x10810000
 #define EXYNOS4_PA_MDMA1		0x12850000
+#define EXYNOS4_PA_S_MDMA1		0x12840000
 #define EXYNOS4_PA_PDMA0		0x12680000
 #define EXYNOS4_PA_PDMA1		0x12690000
 #define EXYNOS5_PA_MDMA0		0x10800000

+ 2 - 1
arch/arm/mach-highbank/system.c

@@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd)
 		hignbank_set_pwr_soft_reset();
 
 	scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
-	cpu_do_idle();
+	while (1)
+		cpu_do_idle();
 }
 

+ 1 - 1
arch/arm/mach-imx/clk-gate2.c

@@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
 
 	clk = clk_register(dev, &gate->hw);
 	if (IS_ERR(clk))
-		kfree(clk);
+		kfree(gate);
 
 	return clk;
 }

+ 1 - 1
arch/arm/mach-imx/ehci-imx25.c

@@ -30,7 +30,7 @@
 #define MX25_H1_SIC_SHIFT	21
 #define MX25_H1_SIC_MASK	(0x3 << MX25_H1_SIC_SHIFT)
 #define MX25_H1_PP_BIT		(1 << 18)
-#define MX25_H1_PM_BIT		(1 << 8)
+#define MX25_H1_PM_BIT		(1 << 16)
 #define MX25_H1_IPPUE_UP_BIT	(1 << 7)
 #define MX25_H1_IPPUE_DOWN_BIT	(1 << 6)
 #define MX25_H1_TLL_BIT		(1 << 5)

+ 1 - 1
arch/arm/mach-imx/ehci-imx35.c

@@ -30,7 +30,7 @@
 #define MX35_H1_SIC_SHIFT	21
 #define MX35_H1_SIC_MASK	(0x3 << MX35_H1_SIC_SHIFT)
 #define MX35_H1_PP_BIT		(1 << 18)
-#define MX35_H1_PM_BIT		(1 << 8)
+#define MX35_H1_PM_BIT		(1 << 16)
 #define MX35_H1_IPPUE_UP_BIT	(1 << 7)
 #define MX35_H1_IPPUE_DOWN_BIT	(1 << 6)
 #define MX35_H1_TLL_BIT		(1 << 5)

+ 5 - 0
arch/arm/mach-omap2/board-igep0020.c

@@ -579,6 +579,11 @@ static void __init igep_wlan_bt_init(void)
 	} else
 		return;
 
+	/* Make sure that the GPIO pins are muxed correctly */
+	omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
+	omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
+	omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
+
 	err = gpio_request_array(igep_wlan_bt_gpios,
 				 ARRAY_SIZE(igep_wlan_bt_gpios));
 	if (err) {

+ 1 - 1
arch/arm/mach-omap2/clockdomains44xx_data.c

@@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = {
 	.clkdm_offs	  = OMAP4430_CM2_CAM_CAM_CDOFFS,
 	.wkdep_srcs	  = iss_wkup_sleep_deps,
 	.sleepdep_srcs	  = iss_wkup_sleep_deps,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain l3_dss_44xx_clkdm = {

+ 20 - 14
arch/arm/mach-omap2/common-board-devices.c

@@ -63,30 +63,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
 	struct spi_board_info *spi_bi = &ads7846_spi_board_info;
 	int err;
 
-	err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
-	if (err) {
-		pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
-		return;
-	}
+	/*
+	 * If a board defines get_pendown_state() function, request the pendown
+	 * GPIO and set the GPIO debounce time.
+	 * If a board does not define the get_pendown_state() function, then
+	 * the ads7846 driver will setup the pendown GPIO itself.
+	 */
+	if (board_pdata && board_pdata->get_pendown_state) {
+		err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+		if (err) {
+			pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+			return;
+		}
+
+		if (gpio_debounce)
+			gpio_set_debounce(gpio_pendown, gpio_debounce);
 
-	if (gpio_debounce)
-		gpio_set_debounce(gpio_pendown, gpio_debounce);
+		gpio_export(gpio_pendown, 0);
+	}
 
 	spi_bi->bus_num	= bus_num;
 	spi_bi->irq	= gpio_to_irq(gpio_pendown);
 
+	ads7846_config.gpio_pendown = gpio_pendown;
+
 	if (board_pdata) {
 		board_pdata->gpio_pendown = gpio_pendown;
+		board_pdata->gpio_pendown_debounce = gpio_debounce;
 		spi_bi->platform_data = board_pdata;
-		if (board_pdata->get_pendown_state)
-			gpio_export(gpio_pendown, 0);
-	} else {
-		ads7846_config.gpio_pendown = gpio_pendown;
 	}
 
-	if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
-		gpio_free(gpio_pendown);
-
 	spi_register_board_info(&ads7846_spi_board_info, 1);
 }
 #else

+ 79 - 0
arch/arm/mach-omap2/devices.c

@@ -19,6 +19,7 @@
 #include <linux/of.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/platform_data/omap4-keypad.h>
+#include <linux/platform_data/omap_ocp2scp.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -615,6 +616,83 @@ static void omap_init_vout(void)
 static inline void omap_init_vout(void) {}
 #endif
 
+#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
+static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
+{
+	int cnt	= 0;
+
+	while (ocp2scp_dev->drv_name != NULL) {
+		cnt++;
+		ocp2scp_dev++;
+	}
+
+	return cnt;
+}
+
+static void omap_init_ocp2scp(void)
+{
+	struct omap_hwmod	*oh;
+	struct platform_device	*pdev;
+	int			bus_id = -1, dev_cnt = 0, i;
+	struct omap_ocp2scp_dev	*ocp2scp_dev;
+	const char		*oh_name, *name;
+	struct omap_ocp2scp_platform_data *pdata;
+
+	if (!cpu_is_omap44xx())
+		return;
+
+	oh_name = "ocp2scp_usb_phy";
+	name	= "omap-ocp2scp";
+
+	oh = omap_hwmod_lookup(oh_name);
+	if (!oh) {
+		pr_err("%s: could not find omap_hwmod for %s\n", __func__,
+								oh_name);
+		return;
+	}
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("%s: No memory for ocp2scp pdata\n", __func__);
+		return;
+	}
+
+	ocp2scp_dev = oh->dev_attr;
+	dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
+
+	if (!dev_cnt) {
+		pr_err("%s: No devices connected to ocp2scp\n", __func__);
+		kfree(pdata);
+		return;
+	}
+
+	pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
+					* dev_cnt, GFP_KERNEL);
+	if (!pdata->devices) {
+		pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
+		kfree(pdata);
+		return;
+	}
+
+	for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
+		pdata->devices[i] = ocp2scp_dev;
+
+	pdata->dev_cnt	= dev_cnt;
+
+	pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
+								0, false);
+	if (IS_ERR(pdev)) {
+		pr_err("Could not build omap_device for %s %s\n",
+						name, oh_name);
+		kfree(pdata->devices);
+		kfree(pdata);
+		return;
+	}
+}
+#else
+static inline void omap_init_ocp2scp(void) { }
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 static int __init omap2_init_devices(void)
@@ -642,6 +720,7 @@ static int __init omap2_init_devices(void)
 	omap_init_sham();
 	omap_init_aes();
 	omap_init_vout();
+	omap_init_ocp2scp();
 
 	return 0;
 }

+ 49 - 14
arch/arm/mach-omap2/omap_hwmod.c

@@ -419,6 +419,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
 	return 0;
 }
 
+/**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+ * Wait until the IP block represented by @oh reports that its OCP
+ * softreset is complete.  This can be triggered by software (see
+ * _ocp_softreset()) or by hardware upon returning from off-mode (one
+ * example is HSMMC).  Waits for up to MAX_MODULE_SOFTRESET_WAIT
+ * microseconds.  Returns the number of microseconds waited.
+ */
+static int _wait_softreset_complete(struct omap_hwmod *oh)
+{
+	struct omap_hwmod_class_sysconfig *sysc;
+	u32 softrst_mask;
+	int c = 0;
+
+	sysc = oh->class->sysc;
+
+	if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+		omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
+				   & SYSS_RESETDONE_MASK),
+				  MAX_MODULE_SOFTRESET_WAIT, c);
+	else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+		softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
+		omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
+				    & softrst_mask),
+				  MAX_MODULE_SOFTRESET_WAIT, c);
+	}
+
+	return c;
+}
+
 /**
  * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
  * @oh: struct omap_hwmod *
@@ -1280,6 +1312,18 @@ static void _enable_sysc(struct omap_hwmod *oh)
 	if (!oh->class->sysc)
 		return;
 
+	/*
+	 * Wait until reset has completed, this is needed as the IP
+	 * block is reset automatically by hardware in some cases
+	 * (off-mode for example), and the drivers require the
+	 * IP to be ready when they access it
+	 */
+	if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+		_enable_optional_clocks(oh);
+	_wait_softreset_complete(oh);
+	if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+		_disable_optional_clocks(oh);
+
 	v = oh->_sysc_cache;
 	sf = oh->class->sysc->sysc_flags;
 
@@ -1802,7 +1846,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh)
  */
 static int _ocp_softreset(struct omap_hwmod *oh)
 {
-	u32 v, softrst_mask;
+	u32 v;
 	int c = 0;
 	int ret = 0;
 
@@ -1832,19 +1876,7 @@ static int _ocp_softreset(struct omap_hwmod *oh)
 	if (oh->class->sysc->srst_udelay)
 		udelay(oh->class->sysc->srst_udelay);
 
-	if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
-		omap_test_timeout((omap_hwmod_read(oh,
-						    oh->class->sysc->syss_offs)
-				   & SYSS_RESETDONE_MASK),
-				  MAX_MODULE_SOFTRESET_WAIT, c);
-	else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
-		softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
-		omap_test_timeout(!(omap_hwmod_read(oh,
-						     oh->class->sysc->sysc_offs)
-				   & softrst_mask),
-				  MAX_MODULE_SOFTRESET_WAIT, c);
-	}
-
+	c = _wait_softreset_complete(oh);
 	if (c == MAX_MODULE_SOFTRESET_WAIT)
 		pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
 			   oh->name, MAX_MODULE_SOFTRESET_WAIT);
@@ -2351,6 +2383,9 @@ static int __init _setup_reset(struct omap_hwmod *oh)
 	if (oh->_state != _HWMOD_STATE_INITIALIZED)
 		return -EINVAL;
 
+	if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
+		return -EPERM;
+
 	if (oh->rst_lines_cnt == 0) {
 		r = _enable(oh);
 		if (r) {

+ 6 - 0
arch/arm/mach-omap2/omap_hwmod.h

@@ -442,6 +442,11 @@ struct omap_hwmod_omap4_prcm {
  *     in order to complete the reset. Optional clocks will be disabled
  *     again after the reset.
  * HWMOD_16BIT_REG: Module has 16bit registers
+ * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
+ *     this IP block comes from an off-chip source and is not always
+ *     enabled.  This prevents the hwmod code from being able to
+ *     enable and reset the IP block early.  XXX Eventually it should
+ *     be possible to query the clock framework for this information.
  */
 #define HWMOD_SWSUP_SIDLE			(1 << 0)
 #define HWMOD_SWSUP_MSTANDBY			(1 << 1)
@@ -452,6 +457,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_NO_IDLEST				(1 << 6)
 #define HWMOD_CONTROL_OPT_CLKS_IN_RESET		(1 << 7)
 #define HWMOD_16BIT_REG				(1 << 8)
+#define HWMOD_EXT_OPT_MAIN_CLK			(1 << 9)
 
 /*
  * omap_hwmod._int_flags definitions

+ 36 - 0
arch/arm/mach-omap2/omap_hwmod_44xx_data.c

@@ -25,6 +25,7 @@
 
 #include <plat-omap/dma-omap.h>
 
+#include <linux/platform_data/omap_ocp2scp.h>
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/platform_data/asoc-ti-mcbsp.h>
 #include <plat/dmtimer.h>
@@ -2126,6 +2127,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
 	.name		= "mcpdm",
 	.class		= &omap44xx_mcpdm_hwmod_class,
 	.clkdm_name	= "abe_clkdm",
+	/*
+	 * It's suspected that the McPDM requires an off-chip main
+	 * functional clock, controlled via I2C.  This IP block is
+	 * currently reset very early during boot, before I2C is
+	 * available, so it doesn't seem that we have any choice in
+	 * the kernel other than to avoid resetting it.
+	 */
+	.flags		= HWMOD_EXT_OPT_MAIN_CLK,
 	.mpu_irqs	= omap44xx_mcpdm_irqs,
 	.sdma_reqs	= omap44xx_mcpdm_sdma_reqs,
 	.main_clk	= "mcpdm_fck",
@@ -2682,6 +2691,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
 	.sysc	= &omap44xx_ocp2scp_sysc,
 };
 
+/* ocp2scp dev_attr */
+static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
+	{
+		.name		= "usb_phy",
+		.start		= 0x4a0ad080,
+		.end		= 0x4a0ae000,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		/* XXX: Remove this once control module driver is in place */
+		.name		= "ctrl_dev",
+		.start		= 0x4a002300,
+		.end		= 0x4a002303,
+		.flags		= IORESOURCE_MEM,
+	},
+	{ }
+};
+
+static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
+	{
+		.drv_name       = "omap-usb2",
+		.res		= omap44xx_usb_phy_and_pll_addrs,
+	},
+	{ }
+};
+
 /* ocp2scp_usb_phy */
 static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
 	.name		= "ocp2scp_usb_phy",
@@ -2695,6 +2730,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
 			.modulemode   = MODULEMODE_HWCTRL,
 		},
 	},
+	.dev_attr	= ocp2scp_dev_attr,
 };
 
 /*

+ 2 - 1
arch/arm/mach-omap2/twl-common.c

@@ -70,6 +70,7 @@ void __init omap4_pmic_init(const char *pmic_type,
 {
 	/* PMIC part*/
 	omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
+	omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
 	omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
 
 	/* Register additional devices on i2c1 bus if needed */
@@ -363,7 +364,7 @@ static struct regulator_init_data omap4_clk32kg_idata = {
 };
 
 static struct regulator_consumer_supply omap4_vdd1_supply[] = {
-	REGULATOR_SUPPLY("vcc", "mpu.0"),
+	REGULATOR_SUPPLY("vcc", "cpu0"),
 };
 
 static struct regulator_consumer_supply omap4_vdd2_supply[] = {

+ 1 - 1
arch/arm/mach-omap2/vc.c

@@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
 
 	if (initialized) {
 		if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
-			pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+			pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
 				__func__, voltdm->name, i2c_high_speed);
 		return;
 	}

+ 7 - 1
arch/arm/mach-pxa/hx4700.c

@@ -28,6 +28,7 @@
 #include <linux/mfd/asic3.h>
 #include <linux/mtd/physmap.h>
 #include <linux/pda_power.h>
+#include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/gpio-regulator.h>
@@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = {
  */
 
 static struct platform_pwm_backlight_data backlight_data = {
-	.pwm_id         = 1,
+	.pwm_id         = -1,	/* Superseded by pwm_lookup */
 	.max_brightness = 200,
 	.dft_brightness = 100,
 	.pwm_period_ns  = 30923,
@@ -571,6 +572,10 @@ static struct platform_device backlight = {
 	},
 };
 
+static struct pwm_lookup hx4700_pwm_lookup[] = {
+	PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
+};
+
 /*
  * USB "Transceiver"
  */
@@ -872,6 +877,7 @@ static void __init hx4700_init(void)
 	pxa_set_stuart_info(NULL);
 
 	platform_add_devices(devices, ARRAY_SIZE(devices));
+	pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
 
 	pxa_set_ficp_info(&ficp_info);
 	pxa27x_set_i2c_power_info(NULL);

+ 2 - 6
arch/arm/mach-pxa/spitz_pm.c

@@ -86,10 +86,7 @@ static void spitz_discharge1(int on)
 	gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
 }
 
-static unsigned long gpio18_config[] = {
-	GPIO18_RDY,
-	GPIO18_GPIO,
-};
+static unsigned long gpio18_config = GPIO18_GPIO;
 
 static void spitz_presuspend(void)
 {
@@ -112,7 +109,7 @@ static void spitz_presuspend(void)
 	PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
 	PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
 
-	pxa2xx_mfp_config(&gpio18_config[0], 1);
+	pxa2xx_mfp_config(&gpio18_config, 1);
 	gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
 	gpio_free(18);
 
@@ -131,7 +128,6 @@ static void spitz_presuspend(void)
 
 static void spitz_postsuspend(void)
 {
-	pxa2xx_mfp_config(&gpio18_config[1], 1);
 }
 
 static int spitz_should_wakeup(unsigned int resume_on_alarm)

+ 1 - 1
arch/arm/tools/Makefile

@@ -5,6 +5,6 @@
 #
 
 include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
-	$(kecho) '  Generating $@'
+	@$(kecho) '  Generating $@'
 	@mkdir -p $(dir $@)
 	$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }

+ 1 - 1
arch/arm64/include/asm/io.h

@@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
 extern void __iounmap(volatile void __iomem *addr);
 
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
 
 #define ioremap(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))

+ 4 - 2
arch/arm64/include/asm/pgtable-hwdef.h

@@ -38,7 +38,8 @@
 #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
 #define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10)
 #define PMD_SECT_NG		(_AT(pmdval_t, 1) << 11)
-#define PMD_SECT_XN		(_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_UXN		(_AT(pmdval_t, 1) << 54)
 
 /*
  * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -57,7 +58,8 @@
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
-#define PTE_XN			(_AT(pteval_t, 1) << 54)	/* XN */
+#define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
+#define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
 
 /*
  * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).

+ 20 - 20
arch/arm64/include/asm/pgtable.h

@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
 
 #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
 
-#define PAGE_NONE		_MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
-#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
-#define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_DIRTY)
-
-#define __PAGE_NONE		__pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
-#define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
-#define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
-#define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
+#define PAGE_NONE		_MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+
+#define __PAGE_NONE		__pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
 
 #endif /* __ASSEMBLY__ */
 
@@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
 #define pte_young(pte)		(pte_val(pte) & PTE_AF)
 #define pte_special(pte)	(pte_val(pte) & PTE_SPECIAL)
 #define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
-#define pte_exec(pte)		(!(pte_val(pte) & PTE_XN))
+#define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
 #define pte_present_exec_user(pte) \
-	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
+	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
 	 (PTE_VALID | PTE_USER))
 
 #define PTE_BIT_FUNC(fn,op) \
@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
+	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }

+ 0 - 1
arch/ia64/mm/init.c

@@ -637,7 +637,6 @@ mem_init (void)
 
 	high_memory = __va(max_low_pfn * PAGE_SIZE);
 
-	reset_zone_present_pages();
 	for_each_online_pgdat(pgdat)
 		if (pgdat->bdata->node_bootmem_map)
 			totalram_pages += free_all_bootmem_node(pgdat);

+ 3 - 3
arch/m68k/include/asm/signal.h

@@ -41,7 +41,7 @@ struct k_sigaction {
 static inline void sigaddset(sigset_t *set, int _sig)
 {
 	asm ("bfset %0{%1,#1}"
-		: "+od" (*set)
+		: "+o" (*set)
 		: "id" ((_sig - 1) ^ 31)
 		: "cc");
 }
@@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
 static inline void sigdelset(sigset_t *set, int _sig)
 {
 	asm ("bfclr %0{%1,#1}"
-		: "+od" (*set)
+		: "+o" (*set)
 		: "id" ((_sig - 1) ^ 31)
 		: "cc");
 }
@@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
 	int ret;
 	asm ("bfextu %1{%2,#1},%0"
 		: "=d" (ret)
-		: "od" (*set), "id" ((_sig-1) ^ 31)
+		: "o" (*set), "id" ((_sig-1) ^ 31)
 		: "cc");
 	return ret;
 }

+ 1 - 0
arch/mips/cavium-octeon/executive/cvmx-l2c.c

@@ -30,6 +30,7 @@
  * measurement, and debugging facilities.
  */
 
+#include <linux/irqflags.h>
 #include <asm/octeon/cvmx.h>
 #include <asm/octeon/cvmx-l2c.h>
 #include <asm/octeon/cvmx-spinlock.h>

+ 1 - 0
arch/mips/fw/arc/misc.c

@@ -11,6 +11,7 @@
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/irqflags.h>
 
 #include <asm/bcache.h>
 

+ 39 - 89
arch/mips/include/asm/bitops.h

@@ -14,7 +14,6 @@
 #endif
 
 #include <linux/compiler.h>
-#include <linux/irqflags.h>
 #include <linux/types.h>
 #include <asm/barrier.h>
 #include <asm/byteorder.h>		/* sigh ... */
@@ -44,6 +43,24 @@
 #define smp_mb__before_clear_bit()	smp_mb__before_llsc()
 #define smp_mb__after_clear_bit()	smp_llsc_mb()
 
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+			    volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+				 volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+			      volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+			       volatile unsigned long *addr);
+
+
 /*
  * set_bit - Atomically set a bit in memory
  * @nr: the bit to set
@@ -57,7 +74,7 @@
 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long temp;
 
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		__mips_set_bit(nr, addr);
 }
 
 /*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long temp;
 
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (~(1UL << bit)));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a &= ~mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		__mips_clear_bit(nr, addr);
 }
 
 /*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
  */
 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 			: "=&r" (temp), "+m" (*m)
 			: "ir" (1UL << bit));
 		} while (unlikely(!temp));
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		*a ^= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		__mips_change_bit(nr, addr);
 }
 
 /*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 static inline int test_and_set_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long res;
 
 	smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = __mips_test_and_set_bit(nr, addr);
 
 	smp_llsc_mb();
 
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
 static inline int test_and_set_bit_lock(unsigned long nr,
 	volatile unsigned long *addr)
 {
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long res;
 
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a |= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = __mips_test_and_set_bit_lock(nr, addr);
 
 	smp_llsc_mb();
 
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 static inline int test_and_clear_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long res;
 
 	smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a &= ~mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = __mips_test_and_clear_bit(nr, addr);
 
 	smp_llsc_mb();
 
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 static inline int test_and_change_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
-	unsigned short bit = nr & SZLONG_MASK;
+	int bit = nr & SZLONG_MASK;
 	unsigned long res;
 
 	smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
 		} while (unlikely(!res));
 
 		res = temp & (1UL << bit);
-	} else {
-		volatile unsigned long *a = addr;
-		unsigned long mask;
-		unsigned long flags;
-
-		a += nr >> SZLONG_LOG;
-		mask = 1UL << bit;
-		raw_local_irq_save(flags);
-		res = (mask & *a);
-		*a ^= mask;
-		raw_local_irq_restore(flags);
-	}
+	} else
+		res = __mips_test_and_change_bit(nr, addr);
 
 	smp_llsc_mb();
 

+ 1 - 1
arch/mips/include/asm/compat.h

@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
 
 static inline int is_compat_task(void)
 {
-	return test_thread_flag(TIF_32BIT);
+	return test_thread_flag(TIF_32BIT_ADDR);
 }
 
 #endif /* _ASM_COMPAT_H */

+ 1 - 0
arch/mips/include/asm/io.h

@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
 #include <asm/bug.h>

+ 75 - 132
arch/mips/include/asm/irqflags.h

@@ -16,83 +16,13 @@
 #include <linux/compiler.h>
 #include <asm/hazards.h>
 
-__asm__(
-	"	.macro	arch_local_irq_enable				\n"
-	"	.set	push						\n"
-	"	.set	reorder						\n"
-	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n"
-	"	ori	$1, 0x400					\n"
-	"	xori	$1, 0x400					\n"
-	"	mtc0	$1, $2, 1					\n"
-#elif defined(CONFIG_CPU_MIPSR2)
-	"	ei							\n"
-#else
-	"	mfc0	$1,$12						\n"
-	"	ori	$1,0x1f						\n"
-	"	xori	$1,0x1e						\n"
-	"	mtc0	$1,$12						\n"
-#endif
-	"	irq_enable_hazard					\n"
-	"	.set	pop						\n"
-	"	.endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of call overhead on each local_irq_enable()
-	 */
-	smtc_ipi_replay();
-#endif
-	__asm__ __volatile__(
-		"arch_local_irq_enable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
 __asm__(
 	"	.macro	arch_local_irq_disable\n"
 	"	.set	push						\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	$1, $2, 1					\n"
-	"	ori	$1, 0x400					\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $2, 1					\n"
-#elif defined(CONFIG_CPU_MIPSR2)
 	"	di							\n"
-#else
-	"	mfc0	$1,$12						\n"
-	"	ori	$1,0x1f						\n"
-	"	xori	$1,0x1f						\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1,$12						\n"
-#endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
 		: "memory");
 }
 
-__asm__(
-	"	.macro	arch_local_save_flags flags			\n"
-	"	.set	push						\n"
-	"	.set	reorder						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\flags, $2, 1					\n"
-#else
-	"	mfc0	\\flags, $12					\n"
-#endif
-	"	.set	pop						\n"
-	"	.endm							\n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
-	unsigned long flags;
-	asm volatile("arch_local_save_flags %0" : "=r" (flags));
-	return flags;
-}
 
 __asm__(
 	"	.macro	arch_local_irq_save result			\n"
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\result, $2, 1					\n"
-	"	ori	$1, \\result, 0x400				\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $2, 1					\n"
-	"	andi	\\result, \\result, 0x400			\n"
-#elif defined(CONFIG_CPU_MIPSR2)
 	"	di	\\result					\n"
 	"	andi	\\result, 1					\n"
-#else
-	"	mfc0	\\result, $12					\n"
-	"	ori	$1, \\result, 0x1f				\n"
-	"	xori	$1, 0x1f					\n"
-	"	.set	noreorder					\n"
-	"	mtc0	$1, $12						\n"
-#endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
 	return flags;
 }
 
+
 __asm__(
 	"	.macro	arch_local_irq_restore flags			\n"
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"mfc0	$1, $2, 1						\n"
-	"andi	\\flags, 0x400						\n"
-	"ori	$1, 0x400						\n"
-	"xori	$1, 0x400						\n"
-	"or	\\flags, $1						\n"
-	"mtc0	\\flags, $2, 1						\n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
 	/*
 	 * Slow, but doesn't suffer from a relatively unlikely race
 	 * condition we're having since days 1.
 	 */
 	"	beqz	\\flags, 1f					\n"
-	"	 di							\n"
+	"	di							\n"
 	"	ei							\n"
 	"1:								\n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
 	/*
 	 * Fast, dangerous.  Life is fun, life is good.
 	 */
 	"	mfc0	$1, $12						\n"
 	"	ins	$1, \\flags, 0, 1				\n"
 	"	mtc0	$1, $12						\n"
-#else
-	"	mfc0	$1, $12						\n"
-	"	andi	\\flags, 1					\n"
-	"	ori	$1, 0x1f					\n"
-	"	xori	$1, 0x1f					\n"
-	"	or	\\flags, $1					\n"
-	"	mtc0	\\flags, $12					\n"
 #endif
 	"	irq_disable_hazard					\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
 
-
 static inline void arch_local_irq_restore(unsigned long flags)
 {
 	unsigned long __tmp1;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of branch and call overhead on each
-	 * local_irq_restore()
-	 */
-	if (unlikely(!(flags & 0x0400)))
-		smtc_ipi_replay();
-#endif
-
 	__asm__ __volatile__(
 		"arch_local_irq_restore\t%0"
 		: "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
 		: "0" (flags)
 		: "memory");
 }
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+	"	.macro	arch_local_irq_enable				\n"
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	mtc0	$1, $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	"	ei							\n"
+#else
+	"	mfc0	$1,$12						\n"
+	"	ori	$1,0x1f						\n"
+	"	xori	$1,0x1e						\n"
+	"	mtc0	$1,$12						\n"
+#endif
+	"	irq_enable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of call overhead on each local_irq_enable()
+	 */
+	smtc_ipi_replay();
+#endif
+	__asm__ __volatile__(
+		"arch_local_irq_enable"
+		: /* no outputs */
+		: /* no inputs */
+		: "memory");
+}
+
+
+__asm__(
+	"	.macro	arch_local_save_flags flags			\n"
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	\\flags, $2, 1					\n"
+#else
+	"	mfc0	\\flags, $12					\n"
+#endif
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	unsigned long flags;
+	asm volatile("arch_local_save_flags %0" : "=r" (flags));
+	return flags;
+}
+
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 #endif
 }
 
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
 
 /*
  * Do the CPU's IRQ-state tracing from assembly code.

+ 0 - 6
arch/mips/include/asm/thread_info.h

@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_LOAD_WATCH		25	/* If set, load watch registers */
 #define TIF_SYSCALL_TRACE	31	/* syscall trace active */
 
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)

+ 20 - 6
arch/mips/kernel/setup.c

@@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", };
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
 	int x = boot_mem_map.nr_map;
-	struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+	int i;
 
 	/* Sanity check */
 	if (start + size < start) {
@@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
 	}
 
 	/*
-	 * Try to merge with previous entry if any.  This is far less than
-	 * perfect but is sufficient for most real world cases.
+	 * Try to merge with existing entry, if any.
 	 */
-	if (x && prev->addr + prev->size == start && prev->type == type) {
-		prev->size += size;
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+		unsigned long top;
+
+		if (entry->type != type)
+			continue;
+
+		if (start + size < entry->addr)
+			continue;			/* no overlap */
+
+		if (entry->addr + entry->size < start)
+			continue;			/* no overlap */
+
+		top = max(entry->addr + entry->size, start + size);
+		entry->addr = min(entry->addr, start);
+		entry->size = top - entry->addr;
+
 		return;
 	}
 
-	if (x == BOOT_MEM_MAP_MAX) {
+	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
 		pr_err("Ooops! Too many entries in the memory map!\n");
 		return;
 	}

+ 3 - 2
arch/mips/lib/Makefile

@@ -2,8 +2,9 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y	+= csum_partial.o delay.o memcpy.o memset.o \
-	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
+	   mips-atomic.o strlen_user.o strncpy_user.o \
+	   strnlen_user.o uncached.o
 
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o

+ 179 - 0
arch/mips/lib/bitops.c

@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/irqflags.h>
+#include <linux/export.h>
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory.  This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory.  This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value.  This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+			    volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value.  This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+				 volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a |= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value.  This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a &= ~mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value.  This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr;
+	unsigned bit = nr & SZLONG_MASK;
+	unsigned long mask;
+	unsigned long flags;
+	unsigned long res;
+
+	a += nr >> SZLONG_LOG;
+	mask = 1UL << bit;
+	raw_local_irq_save(flags);
+	res = (mask & *a);
+	*a ^= mask;
+	raw_local_irq_restore(flags);
+	return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);

+ 176 - 0
arch/mips/lib/mips-atomic.c

@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+	"	.macro	arch_local_irq_disable\n"
+	"	.set	push						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	ori	$1, 0x400					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1,$12						\n"
+	"	ori	$1,0x1f						\n"
+	"	xori	$1,0x1f						\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1,$12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+notrace void arch_local_irq_disable(void)
+{
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_disable"
+		: /* no outputs */
+		: /* no inputs */
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+	"	.macro	arch_local_irq_save result			\n"
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	\\result, $2, 1					\n"
+	"	ori	$1, \\result, 0x400				\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+	"	andi	\\result, \\result, 0x400			\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	\\result, $12					\n"
+	"	ori	$1, \\result, 0x1f				\n"
+	"	xori	$1, 0x1f					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+	preempt_disable();
+	asm volatile("arch_local_irq_save\t%0"
+		     : "=r" (flags)
+		     : /* no inputs */
+		     : "memory");
+	preempt_enable();
+	return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+	"	.macro	arch_local_irq_restore flags			\n"
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"mfc0	$1, $2, 1						\n"
+	"andi	\\flags, 0x400						\n"
+	"ori	$1, 0x400						\n"
+	"xori	$1, 0x400						\n"
+	"or	\\flags, $1						\n"
+	"mtc0	\\flags, $2, 1						\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	\\flags, 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	\\flags, $1					\n"
+	"	mtc0	\\flags, $12					\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of branch and call overhead on each
+	 * local_irq_restore()
+	 */
+	if (unlikely(!(flags & 0x0400)))
+		smtc_ipi_replay();
+#endif
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */

+ 2 - 1
arch/mips/mti-malta/malta-platform.c

@@ -29,6 +29,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
+#include <asm/mips-boards/maltaint.h>
 #include <mtd/mtd-abi.h>
 
 #define SMC_PORT(base, int)						\
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
 	SMC_PORT(0x2F8, 3),
 	{
 		.mapbase	= 0x1f000900,	/* The CBUS UART */
-		.irq		= MIPS_CPU_IRQ_BASE + 2,
+		.irq		= MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
 		.uartclk	= 3686400,	/* Twice the usual clk! */
 		.iotype		= UPIO_MEM32,
 		.flags		= CBUS_UART_FLAGS,

+ 4 - 2
arch/parisc/kernel/signal32.c

@@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 {
 	compat_sigset_t s;
 
-	if (sz != sizeof *set) panic("put_sigset32()");
+	if (sz != sizeof *set)
+		return -EINVAL;
 	sigset_64to32(&s, set);
 
 	return copy_to_user(up, &s, sizeof s);
@@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 	compat_sigset_t s;
 	int r;
 
-	if (sz != sizeof *set) panic("put_sigset32()");
+	if (sz != sizeof *set)
+		return -EINVAL;
 
 	if ((r = copy_from_user(&s, up, sz)) == 0) {
 		sigset_32to64(set, &s);

+ 2 - 0
arch/parisc/kernel/sys_parisc.c

@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
 	struct vm_area_struct *vma;
 	int offset = mapping ? get_offset(mapping) : 0;
 
+	offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
 	addr = DCACHE_ALIGN(addr - offset) + offset;
 
 	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {

+ 6 - 0
arch/powerpc/boot/dts/mpc5200b.dtsi

@@ -231,6 +231,12 @@
 			interrupts = <2 7 0>;
 		};
 
+		sclpc@3c00 {
+			compatible = "fsl,mpc5200-lpbfifo";
+			reg = <0x3c00 0x60>;
+			interrupts = <2 23 0>;
+		};
+
 		i2c@3d00 {
 			#address-cells = <1>;
 			#size-cells = <0>;

+ 0 - 6
arch/powerpc/boot/dts/o2d.dtsi

@@ -86,12 +86,6 @@
 				reg = <0>;
 			};
 		};
-
-		sclpc@3c00 {
-			compatible = "fsl,mpc5200-lpbfifo";
-			reg = <0x3c00 0x60>;
-			interrupts = <3 23 0>;
-		};
 	};
 
 	localbus {

+ 6 - 1
arch/powerpc/boot/dts/pcm030.dts

@@ -59,7 +59,7 @@
 			#gpio-cells = <2>;
 		};
 
-		psc@2000 { /* PSC1 in ac97 mode */
+		audioplatform: psc@2000 { /* PSC1 in ac97 mode */
 			compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
 			cell-index = <0>;
 		};
@@ -134,4 +134,9 @@
 	localbus {
 		status = "disabled";
 	};
+
+	sound {
+		compatible = "phytec,pcm030-audio-fabric";
+		asoc-platform = <&audioplatform>;
+	};
 };

+ 5 - 4
arch/powerpc/platforms/52xx/mpc52xx_pic.c

@@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
 	case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
 	case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
 	case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
-	default:
-		pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
-		       __func__, virq, l1irq, l2irq);
-		return -EINVAL;
+	case MPC52xx_IRQ_L1_CRIT:
+		pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+			__func__, l2irq);
+		irq_set_chip(virq, &no_irq_chip);
+		return 0;
 	}
 
 	irq_set_chip_and_handler(virq, irqchip, handle_level_irq);

+ 1 - 1
arch/powerpc/platforms/pseries/eeh_pe.c

@@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
 			if (list_empty(&pe->edevs)) {
 				cnt = 0;
 				list_for_each_entry(child, &pe->child_list, child) {
-					if (!(pe->type & EEH_PE_INVALID)) {
+					if (!(child->type & EEH_PE_INVALID)) {
 						cnt++;
 						break;
 					}

+ 2 - 1
arch/powerpc/platforms/pseries/msi.c

@@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
 
 	/* Get the top level device in the PE */
 	edev = of_node_to_eeh_dev(dn);
-	edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+	if (edev->pe)
+		edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
 	dn = eeh_dev_to_of_node(edev);
 	if (!dn)
 		return NULL;

+ 1 - 0
arch/s390/Kconfig

@@ -96,6 +96,7 @@ config S390
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_CMPXCHG_LOCAL
 	select HAVE_CMPXCHG_DOUBLE
+	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_VIRT_CPU_ACCOUNTING
 	select VIRT_CPU_ACCOUNTING
 	select ARCH_DISCARD_MEMBLOCK

+ 1 - 1
arch/s390/include/asm/compat.h

@@ -20,7 +20,7 @@
 #define PSW32_MASK_CC		0x00003000UL
 #define PSW32_MASK_PM		0x00000f00UL
 
-#define PSW32_MASK_USER		0x00003F00UL
+#define PSW32_MASK_USER		0x0000FF00UL
 
 #define PSW32_ADDR_AMODE	0x80000000UL
 #define PSW32_ADDR_INSN		0x7FFFFFFFUL

+ 3 - 0
arch/s390/include/asm/topology.h

@@ -8,6 +8,9 @@ struct cpu;
 
 #ifdef CONFIG_SCHED_BOOK
 
+extern unsigned char cpu_socket_id[NR_CPUS];
+#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+
 extern unsigned char cpu_core_id[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 

+ 2 - 2
arch/s390/include/uapi/asm/ptrace.h

@@ -239,7 +239,7 @@ typedef struct
 #define PSW_MASK_EA		0x00000000UL
 #define PSW_MASK_BA		0x00000000UL
 
-#define PSW_MASK_USER		0x00003F00UL
+#define PSW_MASK_USER		0x0000FF00UL
 
 #define PSW_ADDR_AMODE		0x80000000UL
 #define PSW_ADDR_INSN		0x7FFFFFFFUL
@@ -269,7 +269,7 @@ typedef struct
 #define PSW_MASK_EA		0x0000000100000000UL
 #define PSW_MASK_BA		0x0000000080000000UL
 
-#define PSW_MASK_USER		0x00003F8180000000UL
+#define PSW_MASK_USER		0x0000FF8180000000UL
 
 #define PSW_ADDR_AMODE		0x0000000000000000UL
 #define PSW_ADDR_INSN		0xFFFFFFFFFFFFFFFFUL

+ 12 - 2
arch/s390/kernel/compat_signal.c

@@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
 	regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
 		(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
 		(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+	/* Check for invalid user address space control. */
+	if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+		regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+			(regs->psw.mask & ~PSW_MASK_ASC);
 	regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
 	for (i = 0; i < NUM_GPRS; i++)
 		regs->gprs[i] = (__u64) regs32.gprs[i];
@@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
 
 	/* Set up registers for signal handler */
 	regs->gprs[15] = (__force __u64) frame;
-	regs->psw.mask |= PSW_MASK_BA;		/* force amode 31 */
+	/* Force 31 bit amode and default user address space control. */
+	regs->psw.mask = PSW_MASK_BA |
+		(psw_user_bits & PSW_MASK_ASC) |
+		(regs->psw.mask & ~PSW_MASK_ASC);
 	regs->psw.addr = (__force __u64) ka->sa.sa_handler;
 
 	regs->gprs[2] = map_signal(sig);
@@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
 
 	/* Set up registers for signal handler */
 	regs->gprs[15] = (__force __u64) frame;
-	regs->psw.mask |= PSW_MASK_BA;		/* force amode 31 */
+	/* Force 31 bit amode and default user address space control. */
+	regs->psw.mask = PSW_MASK_BA |
+		(psw_user_bits & PSW_MASK_ASC) |
+		(regs->psw.mask & ~PSW_MASK_ASC);
 	regs->psw.addr = (__u64) ka->sa.sa_handler;
 
 	regs->gprs[2] = map_signal(sig);

+ 12 - 2
arch/s390/kernel/signal.c

@@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 	/* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
 	regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
 		(user_sregs.regs.psw.mask & PSW_MASK_USER);
+	/* Check for invalid user address space control. */
+	if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+		regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+			(regs->psw.mask & ~PSW_MASK_ASC);
 	/* Check for invalid amode */
 	if (regs->psw.mask & PSW_MASK_EA)
 		regs->psw.mask |= PSW_MASK_BA;
@@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
 
 	/* Set up registers for signal handler */
 	regs->gprs[15] = (unsigned long) frame;
-	regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA;	/* 64 bit amode */
+	/* Force default amode and default user address space control. */
+	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+		(psw_user_bits & PSW_MASK_ASC) |
+		(regs->psw.mask & ~PSW_MASK_ASC);
 	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
 	regs->gprs[2] = map_signal(sig);
@@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 
 	/* Set up registers for signal handler */
 	regs->gprs[15] = (unsigned long) frame;
-	regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA;	/* 64 bit amode */
+	/* Force default amode and default user address space control. */
+	regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+		(psw_user_bits & PSW_MASK_ASC) |
+		(regs->psw.mask & ~PSW_MASK_ASC);
 	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
 	regs->gprs[2] = map_signal(sig);

+ 4 - 2
arch/s390/kernel/topology.c

@@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock);
 static struct mask_info core_info;
 cpumask_t cpu_core_map[NR_CPUS];
 unsigned char cpu_core_id[NR_CPUS];
+unsigned char cpu_socket_id[NR_CPUS];
 
 static struct mask_info book_info;
 cpumask_t cpu_book_map[NR_CPUS];
@@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
 			cpumask_set_cpu(lcpu, &book->mask);
 			cpu_book_id[lcpu] = book->id;
 			cpumask_set_cpu(lcpu, &core->mask);
+			cpu_core_id[lcpu] = rcpu;
 			if (one_core_per_cpu) {
-				cpu_core_id[lcpu] = rcpu;
+				cpu_socket_id[lcpu] = rcpu;
 				core = core->next;
 			} else {
-				cpu_core_id[lcpu] = core->id;
+				cpu_socket_id[lcpu] = core->id;
 			}
 			smp_cpu_set_polarization(lcpu, tl_cpu->pp);
 		}

+ 2 - 3
arch/s390/mm/gup.c

@@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 	addr = start;
 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 	end = start + len;
-	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
-					(void __user *)start, len)))
+	if ((end < start) || (end > TASK_SIZE))
 		return 0;
 
 	local_irq_save(flags);
@@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 	addr = start;
 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 	end = start + len;
-	if (end < start)
+	if ((end < start) || (end > TASK_SIZE))
 		goto slow_irqon;
 
 	/*

+ 4 - 1
arch/sparc/include/asm/prom.h

@@ -63,10 +63,13 @@ extern char *of_console_options;
 extern void irq_trans_init(struct device_node *dp);
 extern char *build_path_component(struct device_node *dp);
 
-/* SPARC has a local implementation */
+/* SPARC has local implementations */
 extern int of_address_to_resource(struct device_node *dev, int index,
 				  struct resource *r);
 #define of_address_to_resource of_address_to_resource
 
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
 #endif /* __KERNEL__ */
 #endif /* _SPARC_PROM_H */

+ 1 - 3
arch/sparc/kernel/signal_64.c

@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
 		err |= restore_fpu_state(regs, fpu_save);
 
 	err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
-	err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
-	if (err)
+	if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
 		goto segv;
 
 	err |= __get_user(rwin_save, &sf->rwin_save);

+ 6 - 1
arch/unicore32/Kconfig

@@ -16,6 +16,8 @@ config UNICORE32
 	select ARCH_WANT_FRAME_POINTERS
 	select GENERIC_IOMAP
 	select MODULES_USE_ELF_REL
+	select GENERIC_KERNEL_THREAD
+	select GENERIC_KERNEL_EXECVE
 	help
 	  UniCore-32 is 32-bit Instruction Set Architecture,
 	  including a series of low-power-consumption RISC chip
@@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY
 config ARCH_MAY_HAVE_PC_FDC
 	bool
 
+config ZONE_DMA
+	def_bool y
+
 config NEED_DMA_MAP_STATE
        def_bool y
 
@@ -216,7 +221,7 @@ config PUV3_GPIO
 	bool
 	depends on !ARCH_FPGA
 	select GENERIC_GPIO
-	select GPIO_SYSFS if EXPERIMENTAL
+	select GPIO_SYSFS
 	default y
 
 if PUV3_NB0916

+ 0 - 1
arch/unicore32/include/asm/Kbuild

@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
 
 generic-y += atomic.h
 generic-y += auxvec.h

+ 0 - 5
arch/unicore32/include/asm/bug.h

@@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err);
 extern void uc32_notify_die(const char *str, struct pt_regs *regs,
 		struct siginfo *info, unsigned long err, unsigned long trap);
 
-extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-extern void __show_regs(struct pt_regs *);
-
 #endif /* __UNICORE_BUG_H__ */

+ 1 - 1
arch/unicore32/include/asm/cmpxchg.h

@@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
 			: "memory", "cc");
 		break;
 	default:
-		ret = __xchg_bad_pointer();
+		__xchg_bad_pointer();
 	}
 
 	return ret;

+ 0 - 1
arch/unicore32/include/asm/kvm_para.h

@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>

+ 0 - 5
arch/unicore32/include/asm/processor.h

@@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()			barrier()
 
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
 #define task_pt_regs(p) \
 	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 

+ 1 - 75
arch/unicore32/include/asm/ptrace.h

@@ -12,80 +12,10 @@
 #ifndef __UNICORE_PTRACE_H__
 #define __UNICORE_PTRACE_H__
 
-#define PTRACE_GET_THREAD_AREA	22
-
-/*
- * PSR bits
- */
-#define USER_MODE	0x00000010
-#define REAL_MODE	0x00000011
-#define INTR_MODE	0x00000012
-#define PRIV_MODE	0x00000013
-#define ABRT_MODE	0x00000017
-#define EXTN_MODE	0x0000001b
-#define SUSR_MODE	0x0000001f
-#define MODE_MASK	0x0000001f
-#define PSR_R_BIT	0x00000040
-#define PSR_I_BIT	0x00000080
-#define PSR_V_BIT	0x10000000
-#define PSR_C_BIT	0x20000000
-#define PSR_Z_BIT	0x40000000
-#define PSR_S_BIT	0x80000000
-
-/*
- * Groups of PSR bits
- */
-#define PSR_f		0xff000000	/* Flags		*/
-#define PSR_c		0x000000ff	/* Control		*/
+#include <uapi/asm/ptrace.h>
 
 #ifndef __ASSEMBLY__
 
-/*
- * This struct defines the way the registers are stored on the
- * stack during a system call.  Note that sizeof(struct pt_regs)
- * has to be a multiple of 8.
- */
-struct pt_regs {
-	unsigned long uregs[34];
-};
-
-#define UCreg_asr		uregs[32]
-#define UCreg_pc		uregs[31]
-#define UCreg_lr		uregs[30]
-#define UCreg_sp		uregs[29]
-#define UCreg_ip		uregs[28]
-#define UCreg_fp		uregs[27]
-#define UCreg_26		uregs[26]
-#define UCreg_25		uregs[25]
-#define UCreg_24		uregs[24]
-#define UCreg_23		uregs[23]
-#define UCreg_22		uregs[22]
-#define UCreg_21		uregs[21]
-#define UCreg_20		uregs[20]
-#define UCreg_19		uregs[19]
-#define UCreg_18		uregs[18]
-#define UCreg_17		uregs[17]
-#define UCreg_16		uregs[16]
-#define UCreg_15		uregs[15]
-#define UCreg_14		uregs[14]
-#define UCreg_13		uregs[13]
-#define UCreg_12		uregs[12]
-#define UCreg_11		uregs[11]
-#define UCreg_10		uregs[10]
-#define UCreg_09		uregs[9]
-#define UCreg_08		uregs[8]
-#define UCreg_07		uregs[7]
-#define UCreg_06		uregs[6]
-#define UCreg_05		uregs[5]
-#define UCreg_04		uregs[4]
-#define UCreg_03		uregs[3]
-#define UCreg_02		uregs[2]
-#define UCreg_01		uregs[1]
-#define UCreg_00		uregs[0]
-#define UCreg_ORIG_00		uregs[33]
-
-#ifdef __KERNEL__
-
 #define user_mode(regs)	\
 	(processor_mode(regs) == USER_MODE)
 
@@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs)
 
 #define instruction_pointer(regs)	((regs)->UCreg_pc)
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASSEMBLY__ */
-
 #endif
-

+ 7 - 0
arch/unicore32/include/uapi/asm/Kbuild

@@ -1,3 +1,10 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+header-y += byteorder.h
+header-y += kvm_para.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += unistd.h
+
+generic-y += kvm_para.h

+ 0 - 0
arch/unicore32/include/asm/byteorder.h → arch/unicore32/include/uapi/asm/byteorder.h


+ 90 - 0
arch/unicore32/include/uapi/asm/ptrace.h

@@ -0,0 +1,90 @@
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _UAPI__UNICORE_PTRACE_H__
+#define _UAPI__UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA	22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE	0x00000010
+#define REAL_MODE	0x00000011
+#define INTR_MODE	0x00000012
+#define PRIV_MODE	0x00000013
+#define ABRT_MODE	0x00000017
+#define EXTN_MODE	0x0000001b
+#define SUSR_MODE	0x0000001f
+#define MODE_MASK	0x0000001f
+#define PSR_R_BIT	0x00000040
+#define PSR_I_BIT	0x00000080
+#define PSR_V_BIT	0x10000000
+#define PSR_C_BIT	0x20000000
+#define PSR_Z_BIT	0x40000000
+#define PSR_S_BIT	0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f		0xff000000	/* Flags		*/
+#define PSR_c		0x000000ff	/* Control		*/
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call.  Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+	unsigned long uregs[34];
+};
+
+#define UCreg_asr		uregs[32]
+#define UCreg_pc		uregs[31]
+#define UCreg_lr		uregs[30]
+#define UCreg_sp		uregs[29]
+#define UCreg_ip		uregs[28]
+#define UCreg_fp		uregs[27]
+#define UCreg_26		uregs[26]
+#define UCreg_25		uregs[25]
+#define UCreg_24		uregs[24]
+#define UCreg_23		uregs[23]
+#define UCreg_22		uregs[22]
+#define UCreg_21		uregs[21]
+#define UCreg_20		uregs[20]
+#define UCreg_19		uregs[19]
+#define UCreg_18		uregs[18]
+#define UCreg_17		uregs[17]
+#define UCreg_16		uregs[16]
+#define UCreg_15		uregs[15]
+#define UCreg_14		uregs[14]
+#define UCreg_13		uregs[13]
+#define UCreg_12		uregs[12]
+#define UCreg_11		uregs[11]
+#define UCreg_10		uregs[10]
+#define UCreg_09		uregs[9]
+#define UCreg_08		uregs[8]
+#define UCreg_07		uregs[7]
+#define UCreg_06		uregs[6]
+#define UCreg_05		uregs[5]
+#define UCreg_04		uregs[4]
+#define UCreg_03		uregs[3]
+#define UCreg_02		uregs[2]
+#define UCreg_01		uregs[1]
+#define UCreg_00		uregs[0]
+#define UCreg_ORIG_00		uregs[33]
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI__UNICORE_PTRACE_H__ */

+ 0 - 0
arch/unicore32/include/asm/sigcontext.h → arch/unicore32/include/uapi/asm/sigcontext.h


+ 1 - 0
arch/unicore32/include/asm/unistd.h → arch/unicore32/include/uapi/asm/unistd.h

@@ -12,3 +12,4 @@
 
 /* Use the standard ABI for syscalls. */
 #include <asm-generic/unistd.h>
+#define __ARCH_WANT_SYS_EXECVE

+ 7 - 13
arch/unicore32/kernel/entry.S

@@ -573,17 +573,16 @@ ENDPROC(ret_to_user)
  */
 ENTRY(ret_from_fork)
 	b.l	schedule_tail
-	get_thread_info tsk
-	ldw	r1, [tsk+], #TI_FLAGS		@ check for syscall tracing
-	mov	why, #1
-	cand.a	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
-	beq	ret_slow_syscall
-	mov	r1, sp
-	mov	r0, #1				@ trace exit [IP = 1]
-	b.l	syscall_trace
 	b	ret_slow_syscall
 ENDPROC(ret_from_fork)
 
+ENTRY(ret_from_kernel_thread)
+	b.l	schedule_tail
+	mov	r0, r5
+	adr	lr, ret_slow_syscall
+	mov	pc, r4
+ENDPROC(ret_from_kernel_thread)
+
 /*=============================================================================
  * SWI handler
  *-----------------------------------------------------------------------------
@@ -669,11 +668,6 @@ __cr_alignment:
 #endif
 	.ltorg
 
-ENTRY(sys_execve)
-		add	r3, sp, #S_OFF
-		b	__sys_execve
-ENDPROC(sys_execve)
-
 ENTRY(sys_clone)
 		add	ip, sp, #S_OFF
 		stw	ip, [sp+], #4

+ 14 - 44
arch/unicore32/kernel/process.c

@@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task)
 }
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
 
 int
 copy_thread(unsigned long clone_flags, unsigned long stack_start,
@@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 	struct thread_info *thread = task_thread_info(p);
 	struct pt_regs *childregs = task_pt_regs(p);
 
-	*childregs = *regs;
-	childregs->UCreg_00 = 0;
-	childregs->UCreg_sp = stack_start;
-
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 	thread->cpu_context.sp = (unsigned long)childregs;
-	thread->cpu_context.pc = (unsigned long)ret_from_fork;
-
-	if (clone_flags & CLONE_SETTLS)
-		childregs->UCreg_16 = regs->UCreg_03;
+	if (unlikely(!regs)) {
+		thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
+		thread->cpu_context.r4 = stack_start;
+		thread->cpu_context.r5 = stk_sz;
+		memset(childregs, 0, sizeof(struct pt_regs));
+	} else {
+		thread->cpu_context.pc = (unsigned long)ret_from_fork;
+		*childregs = *regs;
+		childregs->UCreg_00 = 0;
+		childregs->UCreg_sp = stack_start;
 
+		if (clone_flags & CLONE_SETTLS)
+			childregs->UCreg_16 = regs->UCreg_03;
+	}
 	return 0;
 }
 
@@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
 }
 EXPORT_SYMBOL(dump_fpu);
 
-/*
- * Shuffle the argument into the correct register before calling the
- * thread function.  r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
- */
-asm(".pushsection .text\n"
-"	.align\n"
-"	.type	kernel_thread_helper, #function\n"
-"kernel_thread_helper:\n"
-"	mov.a	asr, r7\n"
-"	mov	r0, r4\n"
-"	mov	lr, r6\n"
-"	mov	pc, r5\n"
-"	.size	kernel_thread_helper, . - kernel_thread_helper\n"
-"	.popsection");
-
-/*
- * Create a kernel thread.
- */
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
-	struct pt_regs regs;
-
-	memset(&regs, 0, sizeof(regs));
-
-	regs.UCreg_04 = (unsigned long)arg;
-	regs.UCreg_05 = (unsigned long)fn;
-	regs.UCreg_06 = (unsigned long)do_exit;
-	regs.UCreg_07 = PRIV_MODE;
-	regs.UCreg_pc = (unsigned long)kernel_thread_helper;
-	regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
-
-	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
 unsigned long get_wchan(struct task_struct *p)
 {
 	struct stackframe frame;

+ 6 - 0
arch/unicore32/kernel/setup.h

@@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[];
 extern void kernel_thread_helper(void);
 
 extern void __init early_signal_init(void);
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+extern void __show_regs(struct pt_regs *);
+
 #endif

+ 0 - 63
arch/unicore32/kernel/sys.c

@@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
 			parent_tid, child_tid);
 }
 
-/* sys_execve() executes a new program.
- * This is called indirectly via a small wrapper
- */
-asmlinkage long __sys_execve(const char __user *filename,
-			  const char __user *const __user *argv,
-			  const char __user *const __user *envp,
-			  struct pt_regs *regs)
-{
-	int error;
-	struct filename *fn;
-
-	fn = getname(filename);
-	error = PTR_ERR(fn);
-	if (IS_ERR(fn))
-		goto out;
-	error = do_execve(fn->name, argv, envp, regs);
-	putname(fn);
-out:
-	return error;
-}
-
-int kernel_execve(const char *filename,
-		  const char *const argv[],
-		  const char *const envp[])
-{
-	struct pt_regs regs;
-	int ret;
-
-	memset(&regs, 0, sizeof(struct pt_regs));
-	ret = do_execve(filename,
-			(const char __user *const __user *)argv,
-			(const char __user *const __user *)envp, &regs);
-	if (ret < 0)
-		goto out;
-
-	/*
-	 * Save argc to the register structure for userspace.
-	 */
-	regs.UCreg_00 = ret;
-
-	/*
-	 * We were successful.  We won't be returning to our caller, but
-	 * instead to user space by manipulating the kernel stack.
-	 */
-	asm("add	r0, %0, %1\n\t"
-		"mov	r1, %2\n\t"
-		"mov	r2, %3\n\t"
-		"mov	r22, #0\n\t"	/* not a syscall */
-		"mov	r23, %0\n\t"	/* thread structure */
-		"b.l	memmove\n\t"	/* copy regs to top of stack */
-		"mov	sp, r0\n\t"	/* reposition stack pointer */
-		"b	ret_to_user"
-		:
-		: "r" (current_thread_info()),
-		  "Ir" (THREAD_START_SP - sizeof(regs)),
-		  "r" (&regs),
-		  "Ir" (sizeof(regs))
-		: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
-
- out:
-	return ret;
-}
-
 /* Note: used by the compat code even in 64-bit Linux. */
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
 		unsigned long, prot, unsigned long, flags,

+ 27 - 10
arch/unicore32/mm/fault.c

@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 }
 
 static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
-		struct task_struct *tsk)
+		unsigned int flags, struct task_struct *tsk)
 {
 	struct vm_area_struct *vma;
 	int fault;
@@ -194,14 +194,7 @@ good_area:
 	 * If for any reason at all we couldn't handle the fault, make
 	 * sure we exit gracefully rather than endlessly redo the fault.
 	 */
-	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
-			    (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
-	if (unlikely(fault & VM_FAULT_ERROR))
-		return fault;
-	if (fault & VM_FAULT_MAJOR)
-		tsk->maj_flt++;
-	else
-		tsk->min_flt++;
+	fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
 	return fault;
 
 check_stack:
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	int fault, sig, code;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+				 ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
 
 	tsk = current;
 	mm = tsk->mm;
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 		if (!user_mode(regs)
 		    && !search_exception_tables(regs->UCreg_pc))
 			goto no_context;
+retry:
 		down_read(&mm->mmap_sem);
 	} else {
 		/*
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 #endif
 	}
 
-	fault = __do_pf(mm, addr, fsr, tsk);
+	fault = __do_pf(mm, addr, fsr, flags, tsk);
+
+	/* If we need to retry but a fatal signal is pending, handle the
+	 * signal first. We do not need to release the mmap_sem because
+	 * it would already be released in __lock_page_or_retry in
+	 * mm/filemap.c. */
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return 0;
+
+	if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
+		if (fault & VM_FAULT_MAJOR)
+			tsk->maj_flt++;
+		else
+			tsk->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+			* of starvation. */
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			goto retry;
+		}
+	}
+
 	up_read(&mm->mmap_sem);
 
 	/*

+ 2 - 0
arch/x86/boot/compressed/eboot.c

@@ -12,6 +12,8 @@
 #include <asm/setup.h>
 #include <asm/desc.h>
 
+#undef memcpy			/* Use memcpy from misc.c */
+
 #include "eboot.h"
 
 static efi_system_table_t *sys_table;

+ 0 - 3
arch/x86/boot/header.S

@@ -476,6 +476,3 @@ die:
 setup_corrupt:
 	.byte	7
 	.string	"No setup signature found...\n"
-
-	.data
-dummy:	.long	0

+ 4 - 11
arch/x86/include/asm/ptrace.h

@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
 }
 #endif
 
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps.  The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
 #ifdef CONFIG_X86_32
-	return (unsigned long)(&regs->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
 #else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
 	return regs->sp;
-#endif
 }
+#endif
 
 #define GET_IP(regs) ((regs)->ip)
 #define GET_FP(regs) ((regs)->bp)

+ 14 - 0
arch/x86/kernel/cpu/amd.c

@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 		}
 	}
 
+	/*
+	 * The way access filter has a performance penalty on some workloads.
+	 * Disable it on the affected CPUs.
+	 */
+	if ((c->x86 == 0x15) &&
+	    (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+		u64 val;
+
+		if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+			val |= 0x1E;
+			wrmsrl_safe(0xc0011021, val);
+		}
+	}
+
 	cpu_detect_cache_sizes(c);
 
 	/* Multi core CPU? */

+ 1 - 1
arch/x86/kernel/cpu/mcheck/mce_amd.c

@@ -6,7 +6,7 @@
  *
  *  Written by Jacob Shin - AMD, Inc.
  *
- *  Support: borislav.petkov@amd.com
+ *  Maintained by: Borislav Petkov <bp@alien8.de>
  *
  *  April 2006
  *     - added support for AMD Family 0x10 processors

+ 18 - 13
arch/x86/kernel/cpu/mcheck/mce_intel.c

@@ -285,34 +285,39 @@ void cmci_clear(void)
 	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
+static long cmci_rediscover_work_func(void *arg)
+{
+	int banks;
+
+	/* Recheck banks in case CPUs don't all have the same */
+	if (cmci_supported(&banks))
+		cmci_discover(banks);
+
+	return 0;
+}
+
 /*
  * After a CPU went down cycle through all the others and rediscover
  * Must run in process context.
  */
 void cmci_rediscover(int dying)
 {
-	int banks;
-	int cpu;
-	cpumask_var_t old;
+	int cpu, banks;
 
 	if (!cmci_supported(&banks))
 		return;
-	if (!alloc_cpumask_var(&old, GFP_KERNEL))
-		return;
-	cpumask_copy(old, &current->cpus_allowed);
 
 	for_each_online_cpu(cpu) {
 		if (cpu == dying)
 			continue;
-		if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+
+		if (cpu == smp_processor_id()) {
+			cmci_rediscover_work_func(NULL);
 			continue;
-		/* Recheck banks in case CPUs don't all have the same */
-		if (cmci_supported(&banks))
-			cmci_discover(banks);
-	}
+		}
 
-	set_cpus_allowed_ptr(current, old);
-	free_cpumask_var(old);
+		work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
+	}
 }
 
 /*

+ 7 - 7
arch/x86/kernel/entry_64.S

@@ -995,8 +995,8 @@ END(interrupt)
 	 */
 	.p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-	ASM_CLAC
 	XCPT_FRAME
+	ASM_CLAC
 	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
 	interrupt do_IRQ
 	/* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
  */
 .macro apicinterrupt num sym do_sym
 ENTRY(\sym)
-	ASM_CLAC
 	INTR_FRAME
+	ASM_CLAC
 	pushq_cfi $~(\num)
 .Lcommon_\sym:
 	interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
  */
 .macro zeroentry sym do_sym
 ENTRY(\sym)
-	ASM_CLAC
 	INTR_FRAME
+	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
 	subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
 
 .macro paranoidzeroentry sym do_sym
 ENTRY(\sym)
-	ASM_CLAC
 	INTR_FRAME
+	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
 	subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
 .macro paranoidzeroentry_ist sym do_sym ist
 ENTRY(\sym)
-	ASM_CLAC
 	INTR_FRAME
+	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
 	subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
 
 .macro errorentry sym do_sym
 ENTRY(\sym)
-	ASM_CLAC
 	XCPT_FRAME
+	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	subq $ORIG_RAX-R15, %rsp
 	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
 	/* error code is on the stack already */
 .macro paranoiderrorentry sym do_sym
 ENTRY(\sym)
-	ASM_CLAC
 	XCPT_FRAME
+	ASM_CLAC
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	subq $ORIG_RAX-R15, %rsp
 	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15

+ 6 - 2
arch/x86/kernel/microcode_amd.c

@@ -8,8 +8,8 @@
  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *
  *  Maintainers:
- *  Andreas Herrmann <andreas.herrmann3@amd.com>
- *  Borislav Petkov <borislav.petkov@amd.com>
+ *  Andreas Herrmann <herrmann.der.user@googlemail.com>
+ *  Borislav Petkov <bp@alien8.de>
  *
  *  This driver allows to upgrade microcode on F10h AMD
  *  CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F1XH_MPB_MAX_SIZE 2048
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
 
 	switch (c->x86) {
 	case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 	case 0x15:
 		max_size = F15H_MPB_MAX_SIZE;
 		break;
+	case 0x16:
+		max_size = F16H_MPB_MAX_SIZE;
+		break;
 	default:
 		max_size = F1XH_MPB_MAX_SIZE;
 		break;

+ 30 - 0
arch/x86/kernel/ptrace.c

@@ -22,6 +22,7 @@
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
 
 #define FLAG_MASK		FLAG_MASK_32
 
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps.  The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+ *
+ * Now, if the stack is empty, '&regs->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+	unsigned long sp = (unsigned long)&regs->sp;
+	struct thread_info *tinfo;
+
+	if (context == (sp & ~(THREAD_SIZE - 1)))
+		return sp;
+
+	tinfo = (struct thread_info *)context;
+	if (tinfo->previous_esp)
+		return tinfo->previous_esp;
+
+	return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
 {
 	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);

+ 3 - 0
arch/x86/kvm/cpuid.h

@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
 
+	if (!static_cpu_has(X86_FEATURE_XSAVE))
+		return 0;
+
 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
 }

+ 7 - 4
arch/x86/kvm/vmx.c

@@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 		}
 	}
 
-	exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
 	/* Exposing INVPCID only when PCID is exposed */
 	best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
 	if (vmx_invpcid_supported() &&
 	    best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
 	    guest_cpuid_has_pcid(vcpu)) {
+		exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
 		exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
 		vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
 			     exec_control);
 	} else {
-		exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
-		vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
-			     exec_control);
+		if (cpu_has_secondary_exec_ctrls()) {
+			exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+			exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+			vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+				     exec_control);
+		}
 		if (best)
 			best->ebx &= ~bit(X86_FEATURE_INVPCID);
 	}

+ 3 - 0
arch/x86/kvm/x86.c

@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 	int pending_vec, max_bits, idx;
 	struct desc_ptr dt;
 
+	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+		return -EINVAL;
+
 	dt.size = sregs->idt.limit;
 	dt.address = sregs->idt.base;
 	kvm_x86_ops->set_idt(vcpu, &dt);

+ 1 - 1
arch/x86/mm/tlb.c

@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 	}
 
 	if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
-					|| vmflag == VM_HUGETLB) {
+					|| vmflag & VM_HUGETLB) {
 		local_flush_tlb();
 		goto flush_all;
 	}

Some files were not shown because too many files changed in this diff