Browse Source

Merge branch 'merge' into next

We want the irq fixes from the "merge" branch.
Benjamin Herrenschmidt 13 years ago
parent
commit
8b6ee04067
100 changed files with 886 additions and 531 deletions
  1. 2 3
      Documentation/devicetree/bindings/ata/ahci-platform.txt
  2. 2 0
      Documentation/devicetree/bindings/sound/sgtl5000.txt
  3. 2 2
      Documentation/networking/ip-sysctl.txt
  4. 3 5
      MAINTAINERS
  5. 1 1
      Makefile
  6. 1 1
      arch/alpha/Kconfig
  7. 2 6
      arch/alpha/include/asm/rtc.h
  8. 1 0
      arch/alpha/kernel/core_tsunami.c
  9. 1 1
      arch/alpha/kernel/sys_marvel.c
  10. 9 0
      arch/arm/Kconfig
  11. 1 1
      arch/arm/boot/dts/versatile-ab.dts
  12. 1 1
      arch/arm/boot/dts/versatile-pb.dts
  13. 7 0
      arch/arm/include/asm/thread_info.h
  14. 4 0
      arch/arm/include/asm/tls.h
  15. 3 3
      arch/arm/kernel/irq.c
  16. 9 15
      arch/arm/kernel/ptrace.c
  17. 4 51
      arch/arm/kernel/signal.c
  18. 19 13
      arch/arm/kernel/smp.c
  19. 1 1
      arch/arm/kernel/sys_arm.c
  20. 1 0
      arch/arm/mach-kirkwood/board-dt.c
  21. 1 1
      arch/arm/mach-omap1/ams-delta-fiq.c
  22. 1 1
      arch/arm/mach-omap2/board-igep0020.c
  23. 4 4
      arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
  24. 2 2
      arch/arm/mach-orion5x/mpp.h
  25. 11 6
      arch/arm/mm/abort-ev6.S
  26. 14 11
      arch/arm/mm/cache-l2x0.c
  27. 2 2
      arch/arm/mm/init.c
  28. 2 2
      arch/arm/mm/mmu.c
  29. 14 0
      arch/arm/plat-omap/dma.c
  30. 99 0
      arch/arm/vfp/vfpmodule.c
  31. 1 1
      arch/ia64/kvm/kvm-ia64.c
  32. 3 3
      arch/m68k/platform/520x/config.c
  33. 3 3
      arch/m68k/platform/523x/config.c
  34. 3 3
      arch/m68k/platform/5249/config.c
  35. 3 3
      arch/m68k/platform/527x/config.c
  36. 3 3
      arch/m68k/platform/528x/config.c
  37. 3 3
      arch/m68k/platform/532x/config.c
  38. 3 3
      arch/m68k/platform/coldfire/device.c
  39. 1 1
      arch/mips/ath79/dev-wmac.c
  40. 1 1
      arch/mips/include/asm/mach-jz4740/irq.h
  41. 0 6
      arch/mips/include/asm/mmu_context.h
  42. 5 22
      arch/mips/kernel/signal.c
  43. 4 16
      arch/mips/kernel/signal32.c
  44. 2 8
      arch/mips/kernel/signal_n32.c
  45. 2 1
      arch/parisc/include/asm/hardware.h
  46. 6 0
      arch/parisc/include/asm/page.h
  47. 0 7
      arch/parisc/include/asm/pdc.h
  48. 2 0
      arch/parisc/include/asm/pgtable.h
  49. 2 0
      arch/parisc/include/asm/spinlock.h
  50. 2 1
      arch/parisc/kernel/pdc_cons.c
  51. 1 0
      arch/parisc/kernel/time.c
  52. 31 13
      arch/powerpc/kernel/entry_64.S
  53. 13 0
      arch/powerpc/kernel/irq.c
  54. 13 9
      arch/powerpc/kvm/book3s_64_mmu_hv.c
  55. 0 2
      arch/powerpc/kvm/book3s_hv.c
  56. 7 1
      arch/powerpc/net/bpf_jit.h
  57. 95 13
      arch/powerpc/net/bpf_jit_64.S
  58. 9 17
      arch/powerpc/net/bpf_jit_comp.c
  59. 1 1
      arch/sparc/kernel/central.c
  60. 3 3
      arch/sparc/mm/ultra.S
  61. 1 1
      arch/x86/Kconfig
  62. 0 2
      arch/x86/boot/compressed/relocs.c
  63. 1 2
      arch/x86/ia32/ia32_aout.c
  64. 33 0
      arch/x86/include/asm/word-at-a-time.h
  65. 18 0
      arch/x86/kernel/cpu/amd.c
  66. 1 8
      arch/x86/kernel/kvm.c
  67. 1 0
      arch/x86/kernel/process_64.c
  68. 13 1
      arch/x86/kernel/setup_percpu.c
  69. 1 0
      arch/x86/kvm/x86.c
  70. 1 1
      arch/x86/platform/geode/net5501.c
  71. 39 3
      arch/x86/xen/enlighten.c
  72. 6 1
      arch/x86/xen/mmu.c
  73. 1 1
      drivers/acpi/power.c
  74. 7 10
      drivers/acpi/scan.c
  75. 2 0
      drivers/ata/ahci.c
  76. 1 0
      drivers/ata/ahci_platform.c
  77. 1 1
      drivers/ata/libata-core.c
  78. 2 1
      drivers/ata/libata-eh.c
  79. 22 16
      drivers/ata/libata-scsi.c
  80. 1 3
      drivers/ata/pata_arasan_cf.c
  81. 3 1
      drivers/base/regmap/regmap.c
  82. 4 0
      drivers/bluetooth/ath3k.c
  83. 6 0
      drivers/bluetooth/btusb.c
  84. 196 0
      drivers/firmware/efivars.c
  85. 3 0
      drivers/gpu/drm/i915/i915_debugfs.c
  86. 10 5
      drivers/gpu/drm/i915/i915_dma.c
  87. 5 4
      drivers/gpu/drm/i915/intel_display.c
  88. 1 1
      drivers/gpu/drm/i915/intel_hdmi.c
  89. 2 2
      drivers/gpu/drm/i915/intel_lvds.c
  90. 6 3
      drivers/gpu/drm/i915/intel_ringbuffer.c
  91. 6 0
      drivers/gpu/drm/i915/intel_sdvo.c
  92. 1 1
      drivers/gpu/drm/nouveau/nouveau_acpi.c
  93. 7 3
      drivers/gpu/drm/nouveau/nouveau_bios.c
  94. 3 1
      drivers/gpu/drm/nouveau/nouveau_hdmi.c
  95. 21 178
      drivers/gpu/drm/nouveau/nouveau_i2c.c
  96. 1 0
      drivers/gpu/drm/nouveau/nouveau_i2c.h
  97. 1 1
      drivers/gpu/drm/nouveau/nv10_gpio.c
  98. 5 0
      drivers/gpu/drm/nouveau/nvc0_fb.c
  99. 2 2
      drivers/gpu/drm/radeon/radeon_device.c
  100. 5 1
      drivers/hwmon/coretemp.c

+ 2 - 3
Documentation/devicetree/bindings/ata/calxeda-sata.txt → Documentation/devicetree/bindings/ata/ahci-platform.txt

@@ -1,10 +1,10 @@
-* Calxeda SATA Controller
+* AHCI SATA Controller
 
 SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
 Required properties:
-- compatible        : compatible list, contains "calxeda,hb-ahci"
+- compatible        : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
@@ -14,4 +14,3 @@ Example:
                 reg = <0xffe08000 0x1000>;
                 interrupts = <115>;
         };
-

+ 2 - 0
Documentation/devicetree/bindings/sound/sgtl5000.txt

@@ -3,6 +3,8 @@
 Required properties:
 - compatible : "fsl,sgtl5000".
 
+- reg : the I2C address of the device
+
 Example:
 
 codec: sgtl5000@0a {

+ 2 - 2
Documentation/networking/ip-sysctl.txt

@@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
 	(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
 	if it is <= 0.
 	Possible values are [-31, 31], inclusive.
-	Default: 2
+	Default: 1
 
 tcp_allowed_congestion_control - STRING
 	Show/set the congestion control choices available to non-privileged
@@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
 	net.core.rmem_max.  Calling setsockopt() with SO_RCVBUF disables
 	automatic tuning of that socket's receive buffer size, in which
 	case this value is ignored.
-	Default: between 87380B and 4MB, depending on RAM size.
+	Default: between 87380B and 6MB, depending on RAM size.
 
 tcp_sack - BOOLEAN
 	Enable select acknowledgments (SACKS).

+ 3 - 5
MAINTAINERS

@@ -1968,10 +1968,7 @@ S:	Maintained
 F:	drivers/net/ethernet/ti/cpmac.c
 
 CPU FREQUENCY DRIVERS
-M:	Dave Jones <davej@redhat.com>
 L:	cpufreq@vger.kernel.org
-W:	http://www.codemonkey.org.uk/projects/cpufreq/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
 S:	Maintained
 F:	drivers/cpufreq/
 F:	include/linux/cpufreq.h
@@ -4037,6 +4034,7 @@ F:	Documentation/scsi/53c700.txt
 F:	drivers/scsi/53c700*
 
 LED SUBSYSTEM
+M:	Bryan Wu <bryan.wu@canonical.com>
 M:	Richard Purdie <rpurdie@rpsys.net>
 S:	Maintained
 F:	drivers/leds/
@@ -5892,11 +5890,11 @@ F:	Documentation/scsi/st.txt
 F:	drivers/scsi/st*
 
 SCTP PROTOCOL
-M:	Vlad Yasevich <vladislav.yasevich@hp.com>
+M:	Vlad Yasevich <vyasevich@gmail.com>
 M:	Sridhar Samudrala <sri@us.ibm.com>
 L:	linux-sctp@vger.kernel.org
 W:	http://lksctp.sourceforge.net
-S:	Supported
+S:	Maintained
 F:	Documentation/networking/sctp.txt
 F:	include/linux/sctp.h
 F:	include/net/sctp/

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*

+ 1 - 1
arch/alpha/Kconfig

@@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK
 
 config VGA_HOSE
 	bool
-	depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI
+	depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI)
 	default y
 	help
 	  Support VGA on an arbitrary hose; needed for several platforms

+ 2 - 6
arch/alpha/include/asm/rtc.h

@@ -1,14 +1,10 @@
 #ifndef _ALPHA_RTC_H
 #define _ALPHA_RTC_H
 
-#if defined(CONFIG_ALPHA_GENERIC)
+#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
+ || defined(CONFIG_ALPHA_GENERIC)
 # define get_rtc_time		alpha_mv.rtc_get_time
 # define set_rtc_time		alpha_mv.rtc_set_time
-#else
-# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP)
-#  define get_rtc_time		marvel_get_rtc_time
-#  define set_rtc_time		marvel_set_rtc_time
-# endif
 #endif
 
 #include <asm-generic/rtc.h>

+ 1 - 0
arch/alpha/kernel/core_tsunami.c

@@ -11,6 +11,7 @@
 #include <asm/core_tsunami.h>
 #undef __EXTERN_INLINE
 
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/sched.h>

+ 1 - 1
arch/alpha/kernel/sys_marvel.c

@@ -317,7 +317,7 @@ marvel_init_irq(void)
 }
 
 static int 
-marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
 	struct pci_controller *hose = dev->sysdata;
 	struct io7_port *io7_port = hose->sysdata;

+ 9 - 0
arch/arm/Kconfig

@@ -1186,6 +1186,15 @@ if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
 
+config ARM_ERRATA_326103
+	bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
+	depends on CPU_V6
+	help
+	  Executing a SWP instruction to read-only memory does not set bit 11
+	  of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
+	  treat the access as a read, preventing a COW from occurring and
+	  causing the faulting task to livelock.
+
 config ARM_ERRATA_411920
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
 	depends on CPU_V6 || CPU_V6K

+ 1 - 1
arch/arm/boot/dts/versatile-ab.dts

@@ -173,7 +173,7 @@
 			mmc@5000 {
 				compatible = "arm,primecell";
 				reg = < 0x5000 0x1000>;
-				interrupts = <22>;
+				interrupts = <22 34>;
 			};
 			kmi@6000 {
 				compatible = "arm,pl050", "arm,primecell";

+ 1 - 1
arch/arm/boot/dts/versatile-pb.dts

@@ -41,7 +41,7 @@
 			mmc@b000 {
 				compatible = "arm,primecell";
 				reg = <0xb000 0x1000>;
-				interrupts = <23>;
+				interrupts = <23 34>;
 			};
 		};
 	};

+ 7 - 0
arch/arm/include/asm/thread_info.h

@@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
 extern void vfp_sync_hwstate(struct thread_info *);
 extern void vfp_flush_hwstate(struct thread_info *);
 
+struct user_vfp;
+struct user_vfp_exc;
+
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+					   struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+				    struct user_vfp_exc __user *);
 #endif
 
 /*

+ 4 - 0
arch/arm/include/asm/tls.h

@@ -7,6 +7,8 @@
 
 	.macro set_tls_v6k, tp, tmp1, tmp2
 	mcr	p15, 0, \tp, c13, c0, 3		@ set TLS register
+	mov	\tmp1, #0
+	mcr	p15, 0, \tmp1, c13, c0, 2	@ clear user r/w TLS register
 	.endm
 
 	.macro set_tls_v6, tp, tmp1, tmp2
@@ -15,6 +17,8 @@
 	mov	\tmp2, #0xffff0fff
 	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available?
 	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
+	movne	\tmp1, #0
+	mcrne	p15, 0, \tmp1, c13, c0, 2	@ clear user r/w TLS register
 	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
 	.endm
 

+ 3 - 3
arch/arm/kernel/irq.c

@@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
 	}
 
 	c = irq_data_get_irq_chip(d);
-	if (c->irq_set_affinity)
-		c->irq_set_affinity(d, affinity, true);
-	else
+	if (!c->irq_set_affinity)
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+		cpumask_copy(d->affinity, affinity);
 
 	return ret;
 }

+ 9 - 15
arch/arm/kernel/ptrace.c

@@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
 	return ret;
 }
 
-#ifdef __ARMEB__
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
-#else
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
-#endif
-
 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
 {
 	unsigned long ip;
 
-	/*
-	 * Save IP.  IP is used to denote syscall entry/exit:
-	 *  IP = 0 -> entry, = 1 -> exit
-	 */
-	ip = regs->ARM_ip;
-	regs->ARM_ip = why;
-
-	if (!ip)
+	if (why)
 		audit_syscall_exit(regs);
 	else
-		audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
+		audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
 				    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
@@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
 
 	current_thread_info()->syscall = scno;
 
+	/*
+	 * IP is used to denote syscall entry/exit:
+	 * IP = 0 -> entry, =1 -> exit
+	 */
+	ip = regs->ARM_ip;
+	regs->ARM_ip = why;
+
 	/* the 0x80 provides a way for the tracing parent to distinguish
 	   between a syscall stop and SIGTRAP delivery */
 	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)

+ 4 - 51
arch/arm/kernel/signal.c

@@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
 
 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 {
-	struct thread_info *thread = current_thread_info();
-	struct vfp_hard_struct *h = &thread->vfpstate.hard;
 	const unsigned long magic = VFP_MAGIC;
 	const unsigned long size = VFP_STORAGE_SIZE;
 	int err = 0;
 
-	vfp_sync_hwstate(thread);
 	__put_user_error(magic, &frame->magic, err);
 	__put_user_error(size, &frame->size, err);
 
-	/*
-	 * Copy the floating point registers. There can be unused
-	 * registers see asm/hwcap.h for details.
-	 */
-	err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
-			      sizeof(h->fpregs));
-	/*
-	 * Copy the status and control register.
-	 */
-	__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
-
-	/*
-	 * Copy the exception registers.
-	 */
-	__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
-	__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
-	__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+	if (err)
+		return -EFAULT;
 
-	return err ? -EFAULT : 0;
+	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
 }
 
 static int restore_vfp_context(struct vfp_sigframe __user *frame)
 {
-	struct thread_info *thread = current_thread_info();
-	struct vfp_hard_struct *h = &thread->vfpstate.hard;
 	unsigned long magic;
 	unsigned long size;
-	unsigned long fpexc;
 	int err = 0;
 
 	__get_user_error(magic, &frame->magic, err);
@@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 		return -EINVAL;
 
-	vfp_flush_hwstate(thread);
-
-	/*
-	 * Copy the floating point registers. There can be unused
-	 * registers see asm/hwcap.h for details.
-	 */
-	err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
-				sizeof(h->fpregs));
-	/*
-	 * Copy the status and control register.
-	 */
-	__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
-
-	/*
-	 * Sanitise and restore the exception registers.
-	 */
-	__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
-	/* Ensure the VFP is enabled. */
-	fpexc |= FPEXC_EN;
-	/* Ensure FPINST2 is invalid and the exception flag is cleared. */
-	fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
-	h->fpexc = fpexc;
-
-	__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
-	__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
-
-	return err ? -EFAULT : 0;
+	return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
 }
 
 #endif

+ 19 - 13
arch/arm/kernel/smp.c

@@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu = smp_processor_id();
 
-	printk("CPU%u: Booted secondary processor\n", cpu);
-
 	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
@@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	enter_lazy_tlb(mm, current);
 	local_flush_tlb_all();
 
+	printk("CPU%u: Booted secondary processor\n", cpu);
+
 	cpu_init();
 	preempt_disable();
 	trace_hardirqs_off();
@@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
 	local_fiq_disable();
 	local_irq_disable();
 
-#ifdef CONFIG_HOTPLUG_CPU
-	platform_cpu_kill(cpu);
-#endif
-
 	while (1)
 		cpu_relax();
 }
@@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_kill_cpus(cpumask_t *mask)
+{
+	unsigned int cpu;
+	for_each_cpu(cpu, mask)
+		platform_cpu_kill(cpu);
+}
+#else
+static void smp_kill_cpus(cpumask_t *mask) { }
+#endif
+
 void smp_send_stop(void)
 {
 	unsigned long timeout;
+	struct cpumask mask;
 
-	if (num_online_cpus() > 1) {
-		struct cpumask mask;
-		cpumask_copy(&mask, cpu_online_mask);
-		cpumask_clear_cpu(smp_processor_id(), &mask);
-
-		smp_cross_call(&mask, IPI_CPU_STOP);
-	}
+	cpumask_copy(&mask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &mask);
+	smp_cross_call(&mask, IPI_CPU_STOP);
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
@@ -595,6 +599,8 @@ void smp_send_stop(void)
 
 	if (num_online_cpus() > 1)
 		pr_warning("SMP: failed to stop secondary CPUs\n");
+
+	smp_kill_cpus(&mask);
 }
 
 /*

+ 1 - 1
arch/arm/kernel/sys_arm.c

@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
 		  "Ir" (THREAD_START_SP - sizeof(regs)),
 		  "r" (&regs),
 		  "Ir" (sizeof(regs))
-		: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
+		: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
 
  out:
 	return ret;

+ 1 - 0
arch/arm/mach-kirkwood/board-dt.c

@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/kexec.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/bridge-regs.h>

+ 1 - 1
arch/arm/mach-omap1/ams-delta-fiq.c

@@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
 	struct irq_chip *irq_chip = NULL;
 	int gpio, irq_num, fiq_count;
 
-	irq_desc = irq_to_desc(IH_GPIO_BASE);
+	irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
 	if (irq_desc)
 		irq_chip = irq_desc->irq_data.chip;
 

+ 1 - 1
arch/arm/mach-omap2/board-igep0020.c

@@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = {
 
 static void __init igep_init(void)
 {
-	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+	regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 
 	/* Get IGEP2 hardware revision */

+ 4 - 4
arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h

@@ -941,10 +941,10 @@
 #define OMAP4_DSI2_LANEENABLE_MASK				(0x7 << 29)
 #define OMAP4_DSI1_LANEENABLE_SHIFT				24
 #define OMAP4_DSI1_LANEENABLE_MASK				(0x1f << 24)
-#define OMAP4_DSI2_PIPD_SHIFT					19
-#define OMAP4_DSI2_PIPD_MASK					(0x1f << 19)
-#define OMAP4_DSI1_PIPD_SHIFT					14
-#define OMAP4_DSI1_PIPD_MASK					(0x1f << 14)
+#define OMAP4_DSI1_PIPD_SHIFT					19
+#define OMAP4_DSI1_PIPD_MASK					(0x1f << 19)
+#define OMAP4_DSI2_PIPD_SHIFT					14
+#define OMAP4_DSI2_PIPD_MASK					(0x1f << 14)
 
 /* CONTROL_MCBSPLP */
 #define OMAP4_ALBCTRLRX_FSX_SHIFT				31

+ 2 - 2
arch/arm/mach-orion5x/mpp.h

@@ -65,8 +65,8 @@
 #define MPP8_GIGE               MPP(8,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP9_UNUSED		MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GPIO		MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GIGE               MPP(9,  0x1, 1, 1, 1,   1,   1)
+#define MPP9_GPIO		MPP(9,  0x0, 1, 1, 1,   1,   1)
+#define MPP9_GIGE               MPP(9,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP10_UNUSED		MPP(10, 0x0, 0, 0, 1,   1,   1)
 #define MPP10_GPIO		MPP(10, 0x0, 1, 1, 1,   1,   1)

+ 11 - 6
arch/arm/mm/abort-ev6.S

@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 /*
- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
- * The test below covers all the write situations, including Java bytecodes
+ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
  */
-	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
+#ifdef CONFIG_ARM_ERRATA_326103
+	ldr	ip, =0x4107b36
+	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
+	teq	ip, r3, lsr #4			@ r0 ARM1136?
+	bne	do_DataAbort
 	tst	r5, #PSR_J_BIT			@ Java?
+	tsteq	r5, #PSR_T_BIT			@ Thumb?
 	bne	do_DataAbort
-	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
-	ldreq	r3, [r4]			@ read aborted ARM instruction
+	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
+	ldr	r3, [r4]			@ read aborted ARM instruction
 #ifdef CONFIG_CPU_ENDIAN_BE8
-	reveq	r3, r3
+	rev	r3, r3
 #endif
 	do_ldrd_abort tmp=ip, insn=r3
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
+#endif
 	b	do_DataAbort

+ 14 - 11
arch/arm/mm/cache-l2x0.c

@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;	/* Bitmask of active ways */
 static u32 l2x0_size;
+static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
 
 struct l2x0_regs l2x0_saved_regs;
 
@@ -61,12 +62,7 @@ static inline void cache_sync(void)
 {
 	void __iomem *base = l2x0_base;
 
-#ifdef CONFIG_PL310_ERRATA_753970
-	/* write to an unmmapped register */
-	writel_relaxed(0, base + L2X0_DUMMY_REG);
-#else
-	writel_relaxed(0, base + L2X0_CACHE_SYNC);
-#endif
+	writel_relaxed(0, base + sync_reg_offset);
 	cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
 }
 
 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
+static inline void debug_writel(unsigned long val)
+{
+	if (outer_cache.set_debug)
+		outer_cache.set_debug(val);
+}
 
-#define debug_writel(val)	outer_cache.set_debug(val)
-
-static void l2x0_set_debug(unsigned long val)
+static void pl310_set_debug(unsigned long val)
 {
 	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 }
@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
 {
 }
 
-#define l2x0_set_debug	NULL
+#define pl310_set_debug	NULL
 #endif
 
 #ifdef CONFIG_PL310_ERRATA_588369
@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 		else
 			ways = 8;
 		type = "L310";
+#ifdef CONFIG_PL310_ERRATA_753970
+		/* Unmapped register. */
+		sync_reg_offset = L2X0_DUMMY_REG;
+#endif
+		outer_cache.set_debug = pl310_set_debug;
 		break;
 	case L2X0_CACHE_ID_PART_L210:
 		ways = (aux >> 13) & 0xf;
@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 	outer_cache.flush_all = l2x0_flush_all;
 	outer_cache.inv_all = l2x0_inv_all;
 	outer_cache.disable = l2x0_disable;
-	outer_cache.set_debug = l2x0_set_debug;
 
 	printk(KERN_INFO "%s cache controller enabled\n", type);
 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",

+ 2 - 2
arch/arm/mm/init.c

@@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
 #endif
 
 #ifndef CONFIG_SPARSEMEM
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
 {
 }
 #else
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
 {
 	struct memblock_region *reg;
 

+ 2 - 2
arch/arm/mm/mmu.c

@@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
 	}
 }
 
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
-	unsigned long phys, const struct mem_type *type)
+static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+	unsigned long end, unsigned long phys, const struct mem_type *type)
 {
 	pud_t *pud = pud_offset(pgd, addr);
 	unsigned long next;

+ 14 - 0
arch/arm/plat-omap/dma.c

@@ -916,6 +916,13 @@ void omap_start_dma(int lch)
 			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 	l |= OMAP_DMA_CCR_EN;
 
+	/*
+	 * As dma_write() uses IO accessors which are weakly ordered, there
+	 * is no guarantee that data in coherent DMA memory will be visible
+	 * to the DMA device.  Add a memory barrier here to ensure that any
+	 * such data is visible prior to enabling DMA.
+	 */
+	mb();
 	p->dma_write(l, CCR, lch);
 
 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@@ -965,6 +972,13 @@ void omap_stop_dma(int lch)
 		p->dma_write(l, CCR, lch);
 	}
 
+	/*
+	 * Ensure that data transferred by DMA is visible to any access
+	 * after DMA has been disabled.  This is important for coherent
+	 * DMA regions.
+	 */
+	mb();
+
 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 		int next_lch, cur_lch = lch;
 		char dma_chan_link_map[dma_lch_count];

+ 99 - 0
arch/arm/vfp/vfpmodule.c

@@ -17,6 +17,8 @@
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -528,6 +530,103 @@ void vfp_flush_hwstate(struct thread_info *thread)
 	put_cpu();
 }
 
+/*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+				    struct user_vfp_exc __user *ufp_exc)
+{
+	struct thread_info *thread = current_thread_info();
+	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+	int err = 0;
+
+	/* Ensure that the saved hwstate is up-to-date. */
+	vfp_sync_hwstate(thread);
+
+	/*
+	 * Copy the floating point registers. There can be unused
+	 * registers see asm/hwcap.h for details.
+	 */
+	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+			      sizeof(hwstate->fpregs));
+	/*
+	 * Copy the status and control register.
+	 */
+	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+	/*
+	 * Copy the exception registers.
+	 */
+	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+	if (err)
+		return -EFAULT;
+
+	/* Ensure that VFP is disabled. */
+	vfp_flush_hwstate(thread);
+
+	/*
+	 * As per the PCS, clear the length and stride bits for function
+	 * entry.
+	 */
+	hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+
+	/*
+	 * Disable VFP in the hwstate so that we can detect if it gets
+	 * used.
+	 */
+	hwstate->fpexc &= ~FPEXC_EN;
+	return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+			     struct user_vfp_exc __user *ufp_exc)
+{
+	struct thread_info *thread = current_thread_info();
+	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+	unsigned long fpexc;
+	int err = 0;
+
+	/*
+	 * If VFP has been used, then disable it to avoid corrupting
+	 * the new thread state.
+	 */
+	if (hwstate->fpexc & FPEXC_EN)
+		vfp_flush_hwstate(thread);
+
+	/*
+	 * Copy the floating point registers. There can be unused
+	 * registers see asm/hwcap.h for details.
+	 */
+	err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+				sizeof(hwstate->fpregs));
+	/*
+	 * Copy the status and control register.
+	 */
+	__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+	/*
+	 * Sanitise and restore the exception registers.
+	 */
+	__get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+	/* Ensure the VFP is enabled. */
+	fpexc |= FPEXC_EN;
+
+	/* Ensure FPINST2 is invalid and the exception flag is cleared. */
+	fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+	hwstate->fpexc = fpexc;
+
+	__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+	__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+	return err ? -EFAULT : 0;
+}
+
 /*
  * VFP hardware can lose all context when a CPU goes offline.
  * As we will be running in SMP mode with CPU hotplug, we will save the

+ 1 - 1
arch/ia64/kvm/kvm-ia64.c

@@ -1174,7 +1174,7 @@ out:
 
 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
 {
-	return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
+	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
 }
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)

+ 3 - 3
arch/m68k/platform/520x/config.c

@@ -22,7 +22,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m520x_qspi_init(void)
 {
@@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void)
 	writew(par, MCF_GPIO_PAR_UART);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size)
 	mach_sched_init = hw_timer_init;
 	m520x_uarts_init();
 	m520x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m520x_qspi_init();
 #endif
 }

+ 3 - 3
arch/m68k/platform/523x/config.c

@@ -22,7 +22,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m523x_qspi_init(void)
 {
@@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void)
 	writew(par, MCFGPIO_PAR_TIMER);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size)
 {
 	mach_sched_init = hw_timer_init;
 	m523x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m523x_qspi_init();
 #endif
 }

+ 3 - 3
arch/m68k/platform/5249/config.c

@@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = {
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m5249_qspi_init(void)
 {
@@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void)
 	mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
 #ifdef CONFIG_M5249C3
 	m5249_smc91x_init();
 #endif
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m5249_qspi_init();
 #endif
 }

+ 3 - 3
arch/m68k/platform/527x/config.c

@@ -23,7 +23,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m527x_qspi_init(void)
 {
@@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void)
 #endif
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
 	mach_sched_init = hw_timer_init;
 	m527x_uarts_init();
 	m527x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m527x_qspi_init();
 #endif
 }

+ 3 - 3
arch/m68k/platform/528x/config.c

@@ -24,7 +24,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m528x_qspi_init(void)
 {
@@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void)
 	__raw_writeb(0x07, MCFGPIO_PQSPAR);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size)
 	mach_sched_init = hw_timer_init;
 	m528x_uarts_init();
 	m528x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m528x_qspi_init();
 #endif
 }

+ 3 - 3
arch/m68k/platform/532x/config.c

@@ -30,7 +30,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m532x_qspi_init(void)
 {
@@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void)
 	writew(0x01f0, MCF_GPIO_PAR_QSPI);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size)
 	mach_sched_init = hw_timer_init;
 	m532x_uarts_init();
 	m532x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m532x_qspi_init();
 #endif
 

+ 3 - 3
arch/m68k/platform/coldfire/device.c

@@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = {
 #endif /* MCFFEC_BASE1 */
 #endif /* CONFIG_FEC */
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 /*
  *	The ColdFire QSPI module is an SPI protocol hardware block used
  *	on a number of different ColdFire CPUs.
@@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = {
 	.resource		= mcf_qspi_resources,
 	.dev.platform_data	= &mcf_qspi_data,
 };
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 static struct platform_device *mcf_devices[] __initdata = {
 	&mcf_uart,
@@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = {
 	&mcf_fec1,
 #endif
 #endif
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	&mcf_qspi,
 #endif
 };

+ 1 - 1
arch/mips/ath79/dev-wmac.c

@@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void)
 
 static int ar933x_wmac_reset(void)
 {
-	ath79_device_reset_clear(AR933X_RESET_WMAC);
 	ath79_device_reset_set(AR933X_RESET_WMAC);
+	ath79_device_reset_clear(AR933X_RESET_WMAC);
 
 	return 0;
 }

+ 1 - 1
arch/mips/include/asm/mach-jz4740/irq.h

@@ -45,7 +45,7 @@
 #define JZ4740_IRQ_LCD		JZ4740_IRQ(30)
 
 /* 2nd-level interrupts */
-#define JZ4740_IRQ_DMA(x)	(JZ4740_IRQ(32) + (X))
+#define JZ4740_IRQ_DMA(x)	(JZ4740_IRQ(32) + (x))
 
 #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x))
 #define JZ4740_IRQ_GPIO(x)	(JZ4740_IRQ(48) + (x))

+ 0 - 6
arch/mips/include/asm/mmu_context.h

@@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
 		write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
 	} while (0)
 
-
-static inline unsigned long get_current_pgd(void)
-{
-	return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
-}
-
 #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 
 /*

+ 5 - 22
arch/mips/kernel/signal.c

@@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
 		goto badframe;
 
 	sigdelsetmask(&blocked, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = blocked;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext(&regs, &frame->sf_sc);
 	if (sig < 0)
@@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)
@@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
 	if (ret)
 		return ret;
 
-	spin_lock_irq(&current->sighand->siglock);
-	sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&current->blocked, sig);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	block_sigmask(ka, sig);
 
 	return ret;
 }

+ 4 - 16
arch/mips/kernel/signal32.c

@@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
 		goto badframe;
 
 	sigdelsetmask(&blocked, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = blocked;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext32(&regs, &frame->sf_sc);
 	if (sig < 0)
@@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)

+ 2 - 8
arch/mips/kernel/signal_n32.c

@@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
 	sigset_from_compat(&newset, &uset);
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)

+ 2 - 1
arch/parisc/include/asm/hardware.h

@@ -2,7 +2,6 @@
 #define _PARISC_HARDWARE_H
 
 #include <linux/mod_devicetable.h>
-#include <asm/pdc.h>
 
 #define HWTYPE_ANY_ID		PA_HWTYPE_ANY_ID
 #define HVERSION_ANY_ID		PA_HVERSION_ANY_ID
@@ -95,12 +94,14 @@ struct bc_module {
 #define HPHW_MC	       15
 #define HPHW_FAULTY    31
 
+struct parisc_device_id;
 
 /* hardware.c: */
 extern const char *parisc_hardware_description(struct parisc_device_id *id);
 extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
 
 struct pci_dev;
+struct hardware_path;
 
 /* drivers.c: */
 extern struct parisc_device *alloc_pa_dev(unsigned long hpa,

+ 6 - 0
arch/parisc/include/asm/page.h

@@ -160,5 +160,11 @@ extern int npmem_ranges;
 
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
+#include <asm/pdc.h>
+
+#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+
+/* DEFINITION OF THE ZERO-PAGE (PAG0) */
+/* based on work by Jason Eckhardt (jason@equator.com) */
 
 #endif /* _PARISC_PAGE_H */

+ 0 - 7
arch/parisc/include/asm/pdc.h

@@ -343,8 +343,6 @@
 
 #ifdef __KERNEL__
 
-#include <asm/page.h> /* for __PAGE_OFFSET */
-
 extern int pdc_type;
 
 /* Values for pdc_type */
@@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) {
 
 #endif /* __KERNEL__ */
 
-#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
-
-/* DEFINITION OF THE ZERO-PAGE (PAG0) */
-/* based on work by Jason Eckhardt (jason@equator.com) */
-
 /* flags of the device_path */
 #define	PF_AUTOBOOT	0x80
 #define	PF_AUTOSEARCH	0x40

+ 2 - 0
arch/parisc/include/asm/pgtable.h

@@ -44,6 +44,8 @@ struct vm_area_struct;
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/page.h>
+
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \

+ 2 - 0
arch/parisc/include/asm/spinlock.h

@@ -1,6 +1,8 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/barrier.h>
+#include <asm/ldcw.h>
 #include <asm/processor.h>
 #include <asm/spinlock_types.h>
 

+ 2 - 1
arch/parisc/kernel/pdc_cons.c

@@ -50,6 +50,7 @@
 #include <linux/init.h>
 #include <linux/major.h>
 #include <linux/tty.h>
+#include <asm/page.h>		/* for PAGE0 */
 #include <asm/pdc.h>		/* for iodc_call() proto and friends */
 
 static DEFINE_SPINLOCK(pdc_console_lock);
@@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
 
 static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
 {
-	if (!tty->count) {
+	if (tty->count == 1) {
 		del_timer_sync(&pdc_console_timer);
 		tty_port_tty_set(&tty_port, NULL);
 	}

+ 1 - 0
arch/parisc/kernel/time.c

@@ -29,6 +29,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/page.h>
 #include <asm/param.h>
 #include <asm/pdc.h>
 #include <asm/led.h>

+ 31 - 13
arch/powerpc/kernel/entry_64.S

@@ -584,23 +584,19 @@ _GLOBAL(ret_from_except_lite)
 fast_exc_return_irq:
 restore:
 	/*
-	 * This is the main kernel exit path, we first check if we
-	 * have to change our interrupt state.
+	 * This is the main kernel exit path. First we check if we
+	 * are about to re-enable interrupts
 	 */
 	ld	r5,SOFTE(r1)
 	lbz	r6,PACASOFTIRQEN(r13)
-	cmpwi	cr1,r5,0
-	cmpw	cr0,r5,r6
-	beq	cr0,4f
+	cmpwi	cr0,r5,0
+	beq	restore_irq_off
 
-	/* We do, handle disable first, which is easy */
-	bne	cr1,3f;
- 	li	r0,0
-	stb	r0,PACASOFTIRQEN(r13);
-	TRACE_DISABLE_INTS
-	b	4f
+	/* We are enabling, were we already enabled ? Yes, just return */
+	cmpwi	cr0,r6,1
+	beq	cr0,do_restore
 
-3:	/*
+	/*
 	 * We are about to soft-enable interrupts (we are hard disabled
 	 * at this point). We check if there's anything that needs to
 	 * be replayed first.
@@ -622,7 +618,7 @@ restore_no_replay:
 	/*
 	 * Final return path. BookE is handled in a different file
 	 */
-4:
+do_restore:
 #ifdef CONFIG_PPC_BOOK3E
 	b	.exception_return_book3e
 #else
@@ -695,6 +691,25 @@ fast_exception_return:
 
 #endif /* CONFIG_PPC_BOOK3E */
 
+	/*
+	 * We are returning to a context with interrupts soft disabled.
+	 *
+	 * However, we may also about to hard enable, so we need to
+	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+	 * or that bit can get out of sync and bad things will happen
+	 */
+restore_irq_off:
+	ld	r3,_MSR(r1)
+	lbz	r7,PACAIRQHAPPENED(r13)
+	andi.	r0,r3,MSR_EE
+	beq	1f
+	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
+	stb	r7,PACAIRQHAPPENED(r13)
+1:	li	r0,0
+	stb	r0,PACASOFTIRQEN(r13);
+	TRACE_DISABLE_INTS
+	b	do_restore
+
 	/*
 	 * Something did happen, check if a re-emit is needed
 	 * (this also clears paca->irq_happened)
@@ -744,6 +759,9 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 1:	b	.ret_from_except /* What else to do here ? */
  
+
+
+3:
 do_work:
 #ifdef CONFIG_PREEMPT
 	andi.	r0,r3,MSR_PR	/* Returning to user mode? */

+ 13 - 0
arch/powerpc/kernel/irq.c

@@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
 	 */
 	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
 		__hard_irq_disable();
+#ifdef CONFIG_TRACE_IRQFLAG
+	else {
+		/*
+		 * We should already be hard disabled here. We had bugs
+		 * where that wasn't the case so let's dbl check it and
+		 * warn if we are wrong. Only do that when IRQ tracing
+		 * is enabled as mfmsr() can be costly.
+		 */
+		if (WARN_ON(mfmsr() & MSR_EE))
+			__hard_irq_disable();
+	}
+#endif /* CONFIG_TRACE_IRQFLAG */
+
 	set_soft_enabled(0);
 
 	/*

+ 13 - 9
arch/powerpc/kvm/book3s_64_mmu_hv.c

@@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
 			    !(memslot->userspace_addr & (s - 1))) {
 				start &= ~(s - 1);
 				pgsize = s;
+				get_page(hpage);
+				put_page(page);
 				page = hpage;
 			}
 		}
@@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
 	err = 0;
 
  out:
-	if (got) {
-		if (PageHuge(page))
-			page = compound_head(page);
+	if (got)
 		put_page(page);
-	}
 	return err;
 
  up_err:
@@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		SetPageDirty(page);
 
  out_put:
-	if (page)
-		put_page(page);
+	if (page) {
+		/*
+		 * We drop pages[0] here, not page because page might
+		 * have been set to the head page of a compound, but
+		 * we have to drop the reference on the correct tail
+		 * page to match the get inside gup()
+		 */
+		put_page(pages[0]);
+	}
 	return ret;
 
  out_unlock:
@@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
 			pa = *physp;
 		}
 		page = pfn_to_page(pa >> PAGE_SHIFT);
+		get_page(page);
 	} else {
 		hva = gfn_to_hva_memslot(memslot, gfn);
 		npages = get_user_pages_fast(hva, 1, 1, pages);
@@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
 		page = compound_head(page);
 		psize <<= compound_order(page);
 	}
-	if (!kvm->arch.using_mmu_notifiers)
-		get_page(page);
 	offset = gpa & (psize - 1);
 	if (nb_ret)
 		*nb_ret = psize - offset;
@@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
 {
 	struct page *page = virt_to_page(va);
 
-	page = compound_head(page);
 	put_page(page);
 }
 

+ 0 - 2
arch/powerpc/kvm/book3s_hv.c

@@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
 				continue;
 			pfn = physp[j] >> PAGE_SHIFT;
 			page = pfn_to_page(pfn);
-			if (PageHuge(page))
-				page = compound_head(page);
 			SetPageDirty(page);
 			put_page(page);
 		}

+ 7 - 1
arch/powerpc/net/bpf_jit.h

@@ -48,7 +48,13 @@
 /*
  * Assembly helpers from arch/powerpc/net/bpf_jit.S:
  */
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+#define DECLARE_LOAD_FUNC(func)	\
+	extern u8 func[], func##_negative_offset[], func##_positive_offset[]
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+DECLARE_LOAD_FUNC(sk_load_byte_msh);
 
 #define FUNCTION_DESCR_SIZE	24
 

+ 95 - 13
arch/powerpc/net/bpf_jit_64.S

@@ -31,14 +31,13 @@
  * then branch directly to slow_path_XXX if required.  (In fact, could
  * load a spare GPR with the address of slow_path_generic and pass size
  * as an argument, making the call site a mtlr, li and bllr.)
- *
- * Technically, the "is addr < 0" check is unnecessary & slowing down
- * the ABS path, as it's statically checked on generation.
  */
 	.globl	sk_load_word
 sk_load_word:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_word_neg
+	.globl	sk_load_word_positive_offset
+sk_load_word_positive_offset:
 	/* Are we accessing past headlen? */
 	subi	r_scratch1, r_HL, 4
 	cmpd	r_scratch1, r_addr
@@ -51,7 +50,9 @@ sk_load_word:
 	.globl	sk_load_half
 sk_load_half:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_half_neg
+	.globl	sk_load_half_positive_offset
+sk_load_half_positive_offset:
 	subi	r_scratch1, r_HL, 2
 	cmpd	r_scratch1, r_addr
 	blt	bpf_slow_path_half
@@ -61,7 +62,9 @@ sk_load_half:
 	.globl	sk_load_byte
 sk_load_byte:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_byte_neg
+	.globl	sk_load_byte_positive_offset
+sk_load_byte_positive_offset:
 	cmpd	r_HL, r_addr
 	ble	bpf_slow_path_byte
 	lbzx	r_A, r_D, r_addr
@@ -69,22 +72,20 @@ sk_load_byte:
 
 /*
  * BPF_S_LDX_B_MSH: ldxb  4*([offset]&0xf)
- * r_addr is the offset value, already known positive
+ * r_addr is the offset value
  */
 	.globl sk_load_byte_msh
 sk_load_byte_msh:
+	cmpdi	r_addr, 0
+	blt	bpf_slow_path_byte_msh_neg
+	.globl sk_load_byte_msh_positive_offset
+sk_load_byte_msh_positive_offset:
 	cmpd	r_HL, r_addr
 	ble	bpf_slow_path_byte_msh
 	lbzx	r_X, r_D, r_addr
 	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
 	blr
 
-bpf_error:
-	/* Entered with cr0 = lt */
-	li	r3, 0
-	/* Generated code will 'blt epilogue', returning 0. */
-	blr
-
 /* Call out to skb_copy_bits:
  * We'll need to back up our volatile regs first; we have
  * local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
 	lbz	r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
 	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
 	blr
+
+/* Call out to bpf_internal_load_pointer_neg_helper:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define sk_negative_common(SIZE)				\
+	mflr	r0;						\
+	std	r0, 16(r1);					\
+	/* R3 goes in parameter space of caller's frame */	\
+	std	r_skb, (BPF_PPC_STACKFRAME+48)(r1);		\
+	std	r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);		\
+	std	r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);		\
+	stdu	r1, -BPF_PPC_SLOWPATH_FRAME(r1);		\
+	/* R3 = r_skb, as passed */				\
+	mr	r4, r_addr;					\
+	li	r5, SIZE;					\
+	bl	bpf_internal_load_pointer_neg_helper;		\
+	/* R3 != 0 on success */				\
+	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
+	ld	r0, 16(r1);					\
+	ld	r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);		\
+	ld	r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);		\
+	mtlr	r0;						\
+	cmpldi	r3, 0;						\
+	beq	bpf_error_slow;	/* cr0 = EQ */			\
+	mr	r_addr, r3;					\
+	ld	r_skb, (BPF_PPC_STACKFRAME+48)(r1);		\
+	/* Great success! */
+
+bpf_slow_path_word_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_word_negative_offset
+sk_load_word_negative_offset:
+	sk_negative_common(4)
+	lwz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_half_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_half_negative_offset
+sk_load_half_negative_offset:
+	sk_negative_common(2)
+	lhz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_byte_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_byte_negative_offset
+sk_load_byte_negative_offset:
+	sk_negative_common(1)
+	lbz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_byte_msh_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_byte_msh_negative_offset
+sk_load_byte_msh_negative_offset:
+	sk_negative_common(1)
+	lbz	r_X, 0(r_addr)
+	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
+	blr
+
+bpf_error_slow:
+	/* fabricate a cr0 = lt */
+	li	r_scratch1, -1
+	cmpdi	r_scratch1, 0
+bpf_error:
+	/* Entered with cr0 = lt */
+	li	r3, 0
+	/* Generated code will 'blt epilogue', returning 0. */
+	blr

+ 9 - 17
arch/powerpc/net/bpf_jit_comp.c

@@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 	PPC_BLR();
 }
 
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
 /* Assemble the body code between the prologue & epilogue. */
 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 			      struct codegen_context *ctx,
@@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 
 			/*** Absolute loads from packet header/data ***/
 		case BPF_S_LD_W_ABS:
-			func = sk_load_word;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 			goto common_load;
 		case BPF_S_LD_H_ABS:
-			func = sk_load_half;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 			goto common_load;
 		case BPF_S_LD_B_ABS:
-			func = sk_load_byte;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 		common_load:
-			/*
-			 * Load from [K].  Reference with the (negative)
-			 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
-			 */
+			/* Load from [K]. */
 			ctx->seen |= SEEN_DATAREF;
-			if ((int)K < 0)
-				return -ENOTSUPP;
 			PPC_LI64(r_scratch1, func);
 			PPC_MTLR(r_scratch1);
 			PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 		common_load_ind:
 			/*
 			 * Load from [X + K].  Negative offsets are tested for
-			 * in the helper functions, and result in a 'ret 0'.
+			 * in the helper functions.
 			 */
 			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
 			PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 			break;
 
 		case BPF_S_LDX_B_MSH:
-			/*
-			 * x86 version drops packet (RET 0) when K<0, whereas
-			 * interpreter does allow K<0 (__load_pointer, special
-			 * ancillary data).  common_load returns ENOTSUPP if K<0,
-			 * so we fall back to interpreter & filter works.
-			 */
-			func = sk_load_byte_msh;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 			goto common_load;
 			break;
 

+ 1 - 1
arch/sparc/kernel/central.c

@@ -269,4 +269,4 @@ static int __init sunfire_init(void)
 	return 0;
 }
 
-subsys_initcall(sunfire_init);
+fs_initcall(sunfire_init);

+ 3 - 3
arch/sparc/mm/ultra.S

@@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
 	stx		%o7, [%g1 + GR_SNAP_O7]
 	stx		%i7, [%g1 + GR_SNAP_I7]
 	/* Don't try this at home kids... */
-	rdpr		%cwp, %g2
-	sub		%g2, 1, %g7
+	rdpr		%cwp, %g3
+	sub		%g3, 1, %g7
 	wrpr		%g7, %cwp
 	mov		%i7, %g7
-	wrpr		%g2, %cwp
+	wrpr		%g3, %cwp
 	stx		%g7, [%g1 + GR_SNAP_RPC]
 	sethi		%hi(trap_block), %g7
 	or		%g7, %lo(trap_block), %g7

+ 1 - 1
arch/x86/Kconfig

@@ -81,7 +81,7 @@ config X86
 	select CLKEVT_I8253
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select GENERIC_IOMAP
-	select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC
+	select DCACHE_WORD_ACCESS
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS)

+ 0 - 2
arch/x86/boot/compressed/relocs.c

@@ -403,13 +403,11 @@ static void print_absolute_symbols(void)
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		char *sym_strtab;
-		Elf32_Sym *sh_symtab;
 		int j;
 
 		if (sec->shdr.sh_type != SHT_SYMTAB) {
 			continue;
 		}
-		sh_symtab = sec->symtab;
 		sym_strtab = sec->link->strtab;
 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
 			Elf32_Sym *sym;

+ 1 - 2
arch/x86/ia32/ia32_aout.c

@@ -294,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
 
 	/* OK, This is the point of no return */
 	set_personality(PER_LINUX);
-	set_thread_flag(TIF_IA32);
-	current->mm->context.ia32_compat = 1;
+	set_personality_ia32(false);
 
 	setup_new_exec(bprm);
 

+ 33 - 0
arch/x86/include/asm/word-at-a-time.h

@@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a)
 	return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
 }
 
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+	unsigned long ret, dummy;
+
+	asm(
+		"1:\tmov %2,%0\n"
+		"2:\n"
+		".section .fixup,\"ax\"\n"
+		"3:\t"
+		"lea %2,%1\n\t"
+		"and %3,%1\n\t"
+		"mov (%1),%0\n\t"
+		"leal %2,%%ecx\n\t"
+		"andl %4,%%ecx\n\t"
+		"shll $3,%%ecx\n\t"
+		"shr %%cl,%0\n\t"
+		"jmp 2b\n"
+		".previous\n"
+		_ASM_EXTABLE(1b, 3b)
+		:"=&r" (ret),"=&c" (dummy)
+		:"m" (*(unsigned long *)addr),
+		 "i" (-sizeof(unsigned long)),
+		 "i" (sizeof(unsigned long)-1));
+	return ret;
+}
+
 #endif /* _ASM_WORD_AT_A_TIME_H */

+ 18 - 0
arch/x86/kernel/cpu/amd.c

@@ -580,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 		}
 	}
 
+	/* re-enable TopologyExtensions if switched off by BIOS */
+	if ((c->x86 == 0x15) &&
+	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+		u64 val;
+
+		if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+			val |= 1ULL << 54;
+			wrmsrl_amd_safe(0xc0011005, val);
+			rdmsrl(0xc0011005, val);
+			if (val & (1ULL << 54)) {
+				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+				printk(KERN_INFO FW_INFO "CPU: Re-enabling "
+				  "disabled Topology Extensions Support\n");
+			}
+		}
+	}
+
 	cpu_detect_cache_sizes(c);
 
 	/* Multi core CPU? */

+ 1 - 8
arch/x86/kernel/kvm.c

@@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
 	u32 token;
 	int cpu;
 	bool halted;
-	struct mm_struct *mm;
 };
 
 static struct kvm_task_sleep_head {
@@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
 
 	n.token = token;
 	n.cpu = smp_processor_id();
-	n.mm = current->active_mm;
 	n.halted = idle || preempt_count() > 1;
-	atomic_inc(&n.mm->mm_count);
 	init_waitqueue_head(&n.wq);
 	hlist_add_head(&n.link, &b->list);
 	spin_unlock(&b->lock);
@@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
 {
 	hlist_del_init(&n->link);
-	if (!n->mm)
-		return;
-	mmdrop(n->mm);
 	if (n->halted)
 		smp_send_reschedule(n->cpu);
 	else if (waitqueue_active(&n->wq))
@@ -207,7 +201,7 @@ again:
 		 * async PF was not yet handled.
 		 * Add dummy entry for the token.
 		 */
-		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		n = kzalloc(sizeof(*n), GFP_ATOMIC);
 		if (!n) {
 			/*
 			 * Allocation failed! Busy wait while other cpu
@@ -219,7 +213,6 @@ again:
 		}
 		n->token = token;
 		n->cpu = smp_processor_id();
-		n->mm = NULL;
 		init_waitqueue_head(&n->wq);
 		hlist_add_head(&n->link, &b->list);
 	} else

+ 1 - 0
arch/x86/kernel/process_64.c

@@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)
 		current_thread_info()->status |= TS_COMPAT;
 	}
 }
+EXPORT_SYMBOL_GPL(set_personality_ia32);
 
 unsigned long get_wchan(struct task_struct *p)
 {

+ 13 - 1
arch/x86/kernel/setup_percpu.c

@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
 #endif
 	rc = -EINVAL;
 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
-		const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
 		const size_t dyn_size = PERCPU_MODULE_RESERVE +
 			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+		size_t atom_size;
 
+		/*
+		 * On 64bit, use PMD_SIZE for atom_size so that embedded
+		 * percpu areas are aligned to PMD.  This, in the future,
+		 * can also allow using PMD mappings in vmalloc area.  Use
+		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
+		 * and large vmalloc area allocs can easily fail.
+		 */
+#ifdef CONFIG_X86_64
+		atom_size = PMD_SIZE;
+#else
+		atom_size = PAGE_SIZE;
+#endif
 		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
 					    dyn_size, atom_size,
 					    pcpu_cpu_distance,

+ 1 - 0
arch/x86/kvm/x86.c

@@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 		kvm_inject_page_fault(vcpu, &fault);
 	}
 	vcpu->arch.apf.halted = false;
+	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 }
 
 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)

+ 1 - 1
arch/x86/platform/geode/net5501.c

@@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = {
 		.name = "net5501:1",
 		.gpio = 6,
 		.default_trigger = "default-on",
-		.active_low = 1,
+		.active_low = 0,
 	},
 };
 

+ 39 - 3
arch/x86/xen/enlighten.c

@@ -63,6 +63,7 @@
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
 #include <asm/mwait.h>
+#include <asm/pci_x86.h>
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
@@ -809,9 +810,40 @@ static void xen_io_delay(void)
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long xen_set_apic_id(unsigned int x)
+{
+	WARN_ON(1);
+	return x;
+}
+static unsigned int xen_get_apic_id(unsigned long x)
+{
+	return ((x)>>24) & 0xFFu;
+}
 static u32 xen_apic_read(u32 reg)
 {
-	return 0;
+	struct xen_platform_op op = {
+		.cmd = XENPF_get_cpuinfo,
+		.interface_version = XENPF_INTERFACE_VERSION,
+		.u.pcpu_info.xen_cpuid = 0,
+	};
+	int ret = 0;
+
+	/* Shouldn't need this as APIC is turned off for PV, and we only
+	 * get called on the bootup processor. But just in case. */
+	if (!xen_initial_domain() || smp_processor_id())
+		return 0;
+
+	if (reg == APIC_LVR)
+		return 0x10;
+
+	if (reg != APIC_ID)
+		return 0;
+
+	ret = HYPERVISOR_dom0_op(&op);
+	if (ret)
+		return 0;
+
+	return op.u.pcpu_info.apic_id << 24;
 }
 
 static void xen_apic_write(u32 reg, u32 val)
@@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void)
 	apic->icr_write = xen_apic_icr_write;
 	apic->wait_icr_idle = xen_apic_wait_icr_idle;
 	apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
+	apic->set_apic_id = xen_set_apic_id;
+	apic->get_apic_id = xen_get_apic_id;
 }
 
 #endif
@@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void)
 		/* Make sure ACS will be enabled */
 		pci_request_acs();
 	}
-		
-
+#ifdef CONFIG_PCI
+	/* PCI BIOS service won't work from a PV guest. */
+	pci_probe &= ~PCI_PROBE_BIOS;
+#endif
 	xen_raw_console_write("about to get started...\n");
 
 	xen_setup_runstate_info(0);

+ 6 - 1
arch/x86/xen/mmu.c

@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
 {
 	if (val & _PAGE_PRESENT) {
 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+		unsigned long pfn = mfn_to_pfn(mfn);
+
 		pteval_t flags = val & PTE_FLAGS_MASK;
-		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+		if (unlikely(pfn == ~0))
+			val = flags & ~_PAGE_PRESENT;
+		else
+			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
 	}
 
 	return val;

+ 1 - 1
drivers/acpi/power.c

@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
 	 * We know a device's inferred power state when all the resources
 	 * required for a given D-state are 'on'.
 	 */
-	for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) {
+	for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
 		list = &device->power.states[i].resources;
 		if (list->count < 1)
 			continue;

+ 7 - 10
drivers/acpi/scan.c

@@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
 	/*
 	 * Enumerate supported power management states
 	 */
-	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
+	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
 		struct acpi_device_power_state *ps = &device->power.states[i];
 		char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
 
@@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
 				acpi_bus_add_power_resource(ps->resources.handles[j]);
 		}
 
-		/* The exist of _PR3 indicates D3Cold support */
-		if (i == ACPI_STATE_D3) {
-			status = acpi_get_handle(device->handle, object_name, &handle);
-			if (ACPI_SUCCESS(status))
-				device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
-		}
-
 		/* Evaluate "_PSx" to see if we can do explicit sets */
 		object_name[2] = 'S';
 		status = acpi_get_handle(device->handle, object_name, &handle);
 		if (ACPI_SUCCESS(status))
 			ps->flags.explicit_set = 1;
 
-		/* State is valid if we have some power control */
-		if (ps->resources.count || ps->flags.explicit_set)
+		/*
+		 * State is valid if there are means to put the device into it.
+		 * D3hot is only valid if _PR3 present.
+		 */
+		if (ps->resources.count ||
+		    (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
 			ps->flags.valid = 1;
 
 		ps->power = -1;	/* Unknown - driver assigned */

+ 2 - 0
drivers/ata/ahci.c

@@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9128 */
 	{ PCI_DEVICE(0x1b4b, 0x9125),
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9125 */
+	{ PCI_DEVICE(0x1b4b, 0x917a),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
 	{ PCI_DEVICE(0x1b4b, 0x91a3),
 	  .driver_data = board_ahci_yes_fbs },
 

+ 1 - 0
drivers/ata/ahci_platform.c

@@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = {
 
 static const struct of_device_id ahci_of_match[] = {
 	{ .compatible = "calxeda,hb-ahci", },
+	{ .compatible = "snps,spear-ahci", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);

+ 1 - 1
drivers/ata/libata-core.c

@@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
-atomic_t ata_print_id = ATOMIC_INIT(1);
+atomic_t ata_print_id = ATOMIC_INIT(0);
 
 struct ata_force_param {
 	const char	*name;

+ 2 - 1
drivers/ata/libata-eh.c

@@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
 	u64 now = get_jiffies_64();
 	int *trials = void_arg;
 
-	if (ent->timestamp < now - min(now, interval))
+	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
+	    (ent->timestamp < now - min(now, interval)))
 		return -1;
 
 	(*trials)++;

+ 22 - 16
drivers/ata/libata-scsi.c

@@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
 		 */
 		shost->max_host_blocked = 1;
 
-		rc = scsi_add_host(ap->scsi_host, &ap->tdev);
+		rc = scsi_add_host_with_dma(ap->scsi_host,
+						&ap->tdev, ap->host->dev);
 		if (rc)
 			goto err_add;
 	}
@@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_stop);
 
-int ata_sas_async_port_init(struct ata_port *ap)
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
 {
-	int rc = ap->ops->port_start(ap);
-
-	if (!rc) {
-		ap->print_id = atomic_inc_return(&ata_print_id);
-		__ata_port_probe(ap);
-	}
+	__ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
 
-	return rc;
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+	return ata_port_probe(ap);
 }
-EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
 
 /**
  *	ata_sas_port_init - Initialize a SATA device
@@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap)
 {
 	int rc = ap->ops->port_start(ap);
 
-	if (!rc) {
-		ap->print_id = atomic_inc_return(&ata_print_id);
-		rc = ata_port_probe(ap);
-	}
-
-	return rc;
+	if (rc)
+		return rc;
+	ap->print_id = atomic_inc_return(&ata_print_id);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_init);
 

+ 1 - 3
drivers/ata/pata_arasan_cf.c

@@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev)
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
-#endif
 
 static struct platform_driver arasan_cf_driver = {
 	.probe		= arasan_cf_probe,
@@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = {
 	.driver		= {
 		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
-#ifdef CONFIG_PM
 		.pm	= &arasan_cf_pm_ops,
-#endif
 	},
 };
 

+ 3 - 1
drivers/base/regmap/regmap.c

@@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
 			map->format.parse_val(val + i);
 	} else {
 		for (i = 0; i < val_count; i++) {
-			ret = regmap_read(map, reg + i, val + (i * val_bytes));
+			unsigned int ival;
+			ret = regmap_read(map, reg + i, &ival);
 			if (ret != 0)
 				return ret;
+			memcpy(val + (i * val_bytes), &ival, val_bytes);
 		}
 	}
 

+ 4 - 0
drivers/bluetooth/ath3k.c

@@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = {
 	{ USB_DEVICE(0x0CF3, 0x311D) },
 	{ USB_DEVICE(0x13d3, 0x3375) },
 	{ USB_DEVICE(0x04CA, 0x3005) },
+	{ USB_DEVICE(0x13d3, 0x3362) },
+	{ USB_DEVICE(0x0CF3, 0xE004) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
 	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 
 	{ }	/* Terminating entry */
 };

+ 6 - 0
drivers/bluetooth/btusb.c

@@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = {
 	{ USB_DEVICE(0x0c10, 0x0000) },
 
 	/* Broadcom BCM20702A0 */
+	{ USB_DEVICE(0x0489, 0xe042) },
 	{ USB_DEVICE(0x0a5c, 0x21e3) },
 	{ USB_DEVICE(0x0a5c, 0x21e6) },
 	{ USB_DEVICE(0x0a5c, 0x21e8) },
 	{ USB_DEVICE(0x0a5c, 0x21f3) },
 	{ USB_DEVICE(0x413c, 0x8197) },
 
+	/* Foxconn - Hon Hai */
+	{ USB_DEVICE(0x0489, 0xe033) },
+
 	{ }	/* Terminating entry */
 };
 
@@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },

+ 196 - 0
drivers/firmware/efivars.c

@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
 	}
 }
 
+static bool
+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+		     unsigned long len)
+{
+	struct efi_generic_dev_path *node;
+	int offset = 0;
+
+	node = (struct efi_generic_dev_path *)buffer;
+
+	if (len < sizeof(*node))
+		return false;
+
+	while (offset <= len - sizeof(*node) &&
+	       node->length >= sizeof(*node) &&
+		node->length <= len - offset) {
+		offset += node->length;
+
+		if ((node->type == EFI_DEV_END_PATH ||
+		     node->type == EFI_DEV_END_PATH2) &&
+		    node->sub_type == EFI_DEV_END_ENTIRE)
+			return true;
+
+		node = (struct efi_generic_dev_path *)(buffer + offset);
+	}
+
+	/*
+	 * If we're here then either node->length pointed past the end
+	 * of the buffer or we reached the end of the buffer without
+	 * finding a device path end node.
+	 */
+	return false;
+}
+
+static bool
+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+		    unsigned long len)
+{
+	/* An array of 16-bit integers */
+	if ((len % 2) != 0)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+		     unsigned long len)
+{
+	u16 filepathlength;
+	int i, desclength = 0, namelen;
+
+	namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+
+	/* Either "Boot" or "Driver" followed by four digits of hex */
+	for (i = match; i < match+4; i++) {
+		if (var->VariableName[i] > 127 ||
+		    hex_to_bin(var->VariableName[i] & 0xff) < 0)
+			return true;
+	}
+
+	/* Reject it if there's 4 digits of hex and then further content */
+	if (namelen > match + 4)
+		return false;
+
+	/* A valid entry must be at least 8 bytes */
+	if (len < 8)
+		return false;
+
+	filepathlength = buffer[4] | buffer[5] << 8;
+
+	/*
+	 * There's no stored length for the description, so it has to be
+	 * found by hand
+	 */
+	desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+	/* Each boot entry must have a descriptor */
+	if (!desclength)
+		return false;
+
+	/*
+	 * If the sum of the length of the description, the claimed filepath
+	 * length and the original header are greater than the length of the
+	 * variable, it's malformed
+	 */
+	if ((desclength + filepathlength + 6) > len)
+		return false;
+
+	/*
+	 * And, finally, check the filepath
+	 */
+	return validate_device_path(var, match, buffer + desclength + 6,
+				    filepathlength);
+}
+
+static bool
+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+		unsigned long len)
+{
+	/* A single 16-bit integer */
+	if (len != 2)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+		      unsigned long len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (buffer[i] > 127)
+			return false;
+
+		if (buffer[i] == 0)
+			return true;
+	}
+
+	return false;
+}
+
+struct variable_validate {
+	char *name;
+	bool (*validate)(struct efi_variable *var, int match, u8 *data,
+			 unsigned long len);
+};
+
+static const struct variable_validate variable_validate[] = {
+	{ "BootNext", validate_uint16 },
+	{ "BootOrder", validate_boot_order },
+	{ "DriverOrder", validate_boot_order },
+	{ "Boot*", validate_load_option },
+	{ "Driver*", validate_load_option },
+	{ "ConIn", validate_device_path },
+	{ "ConInDev", validate_device_path },
+	{ "ConOut", validate_device_path },
+	{ "ConOutDev", validate_device_path },
+	{ "ErrOut", validate_device_path },
+	{ "ErrOutDev", validate_device_path },
+	{ "Timeout", validate_uint16 },
+	{ "Lang", validate_ascii_string },
+	{ "PlatformLang", validate_ascii_string },
+	{ "", NULL },
+};
+
+static bool
+validate_var(struct efi_variable *var, u8 *data, unsigned long len)
+{
+	int i;
+	u16 *unicode_name = var->VariableName;
+
+	for (i = 0; variable_validate[i].validate != NULL; i++) {
+		const char *name = variable_validate[i].name;
+		int match;
+
+		for (match = 0; ; match++) {
+			char c = name[match];
+			u16 u = unicode_name[match];
+
+			/* All special variables are plain ascii */
+			if (u > 127)
+				return true;
+
+			/* Wildcard in the matching name means we've matched */
+			if (c == '*')
+				return variable_validate[i].validate(var,
+							     match, data, len);
+
+			/* Case sensitive match */
+			if (c != u)
+				break;
+
+			/* Reached the end of the string while matching */
+			if (!c)
+				return variable_validate[i].validate(var,
+							     match, data, len);
+		}
+	}
+
+	return true;
+}
+
 static efi_status_t
 get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
 {
@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
 		return -EINVAL;
 	}
 
+	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
 	spin_lock(&efivars->lock);
 	status = efivars->ops->set_variable(new_var->VariableName,
 					    &new_var->VendorGuid,
@@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
+	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
 	spin_lock(&efivars->lock);
 
 	/*

+ 3 - 0
drivers/gpu/drm/i915/i915_debugfs.c

@@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	unsigned long temp, chipset, gfx;
 	int ret;
 
+	if (!IS_GEN5(dev))
+		return -ENODEV;
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;

+ 10 - 5
drivers/gpu/drm/i915/i915_dma.c

@@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 	unsigned long diffms;
 	u32 count;
 
+	if (dev_priv->info->gen != 5)
+		return;
+
 	getrawmonotonic(&now);
 	diff1 = timespec_sub(now, dev_priv->last_time2);
 
@@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
 
-	spin_lock(&mchdev_lock);
-	i915_mch_dev = dev_priv;
-	dev_priv->mchdev_lock = &mchdev_lock;
-	spin_unlock(&mchdev_lock);
+	if (IS_GEN5(dev)) {
+		spin_lock(&mchdev_lock);
+		i915_mch_dev = dev_priv;
+		dev_priv->mchdev_lock = &mchdev_lock;
+		spin_unlock(&mchdev_lock);
 
-	ips_ping_for_i915_load();
+		ips_ping_for_i915_load();
+	}
 
 	return 0;
 

+ 5 - 4
drivers/gpu/drm/i915/intel_display.c

@@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int dpll_reg = DPLL(pipe);
-	int dpll = I915_READ(dpll_reg);
 
 	if (HAS_PCH_SPLIT(dev))
 		return;
@@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 	 * the manual case.
 	 */
 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+		int pipe = intel_crtc->pipe;
+		int dpll_reg = DPLL(pipe);
+		u32 dpll;
+
 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
 		assert_panel_unlocked(dev_priv, pipe);
 
+		dpll = I915_READ(dpll_reg);
 		dpll |= DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
 		intel_wait_for_vblank(dev, pipe);
@@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 	}
-
 }
 
 /**

+ 1 - 1
drivers/gpu/drm/i915/intel_hdmi.c

@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
 
 	val &= ~VIDEO_DIP_SELECT_MASK;
 
-	I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(VIDEO_DIP_DATA, *data);

+ 2 - 2
drivers/gpu/drm/i915/intel_lvds.c

@@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
 		.ident = "Hewlett-Packard t5745",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-			DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
 		},
 	},
 	{
@@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
 		.ident = "Hewlett-Packard st5747",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-			DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
 		},
 	},
 	{

+ 6 - 3
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -398,10 +398,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 			return ret;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6) {
-		I915_WRITE(INSTPM,
-			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
 
+	if (IS_GEN6(dev)) {
 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
 		 *  policy. [...] This bit must be reset.  LRA replacement
@@ -411,6 +409,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 			   CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
 	}
 
+	if (INTEL_INFO(dev)->gen >= 6) {
+		I915_WRITE(INSTPM,
+			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+	}
+
 	return ret;
 }
 

+ 6 - 0
drivers/gpu/drm/i915/intel_sdvo.c

@@ -1220,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
 
 static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
 {
+	struct drm_device *dev = intel_sdvo->base.base.dev;
 	u8 response[2];
 
+	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+	 * on the line. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		return false;
+
 	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
 				    &response, 2) && response[0];
 }

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_acpi.c

@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
 	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
 	struct pci_dev *pdev = NULL;
 	int has_dsm = 0;
-	int has_optimus;
+	int has_optimus = 0;
 	int vga_count = 0;
 	bool guid_valid;
 	int retval;

+ 7 - 3
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios)
 
 	/* heuristic: if we ever get a non-zero connector field, assume
 	 * that all the indices are valid and we don't need fake them.
+	 *
+	 * and, as usual, a blacklist of boards with bad bios data..
 	 */
-	for (i = 0; i < dcbt->entries; i++) {
-		if (dcbt->entry[i].connector)
-			return;
+	if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
+		for (i = 0; i < dcbt->entries; i++) {
+			if (dcbt->entry[i].connector)
+				return;
+		}
 	}
 
 	/* no useful connector info available, we need to make it up

+ 3 - 1
drivers/gpu/drm/nouveau/nouveau_hdmi.c

@@ -32,7 +32,9 @@ static bool
 hdmi_sor(struct drm_encoder *encoder)
 {
 	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-	if (dev_priv->chipset < 0xa3)
+	if (dev_priv->chipset <  0xa3 ||
+	    dev_priv->chipset == 0xaa ||
+	    dev_priv->chipset == 0xac)
 		return false;
 	return true;
 }

+ 21 - 178
drivers/gpu/drm/nouveau/nouveau_i2c.c

@@ -29,10 +29,6 @@
 #include "nouveau_i2c.h"
 #include "nouveau_hw.h"
 
-#define T_TIMEOUT  2200000
-#define T_RISEFALL 1000
-#define T_HOLD     5000
-
 static void
 i2c_drive_scl(void *data, int state)
 {
@@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
 	return 0;
 }
 
-static void
-i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
-{
-	udelay((nsec + 500) / 1000);
-}
-
-static bool
-i2c_raise_scl(struct nouveau_i2c_chan *port)
-{
-	u32 timeout = T_TIMEOUT / T_RISEFALL;
-
-	i2c_drive_scl(port, 1);
-	do {
-		i2c_delay(port, T_RISEFALL);
-	} while (!i2c_sense_scl(port) && --timeout);
-
-	return timeout != 0;
-}
-
-static int
-i2c_start(struct nouveau_i2c_chan *port)
-{
-	int ret = 0;
-
-	port->state  = i2c_sense_scl(port);
-	port->state |= i2c_sense_sda(port) << 1;
-	if (port->state != 3) {
-		i2c_drive_scl(port, 0);
-		i2c_drive_sda(port, 1);
-		if (!i2c_raise_scl(port))
-			ret = -EBUSY;
-	}
-
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return ret;
-}
-
-static void
-i2c_stop(struct nouveau_i2c_chan *port)
-{
-	i2c_drive_scl(port, 0);
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_RISEFALL);
-
-	i2c_drive_scl(port, 1);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_HOLD);
-}
-
-static int
-i2c_bitw(struct nouveau_i2c_chan *port, int sda)
-{
-	i2c_drive_sda(port, sda);
-	i2c_delay(port, T_RISEFALL);
-
-	if (!i2c_raise_scl(port))
-		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
-
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return 0;
-}
-
-static int
-i2c_bitr(struct nouveau_i2c_chan *port)
-{
-	int sda;
-
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_RISEFALL);
-
-	if (!i2c_raise_scl(port))
-		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
-
-	sda = i2c_sense_sda(port);
-
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return sda;
-}
-
-static int
-i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
-{
-	int i, bit;
-
-	*byte = 0;
-	for (i = 7; i >= 0; i--) {
-		bit = i2c_bitr(port);
-		if (bit < 0)
-			return bit;
-		*byte |= bit << i;
-	}
-
-	return i2c_bitw(port, last ? 1 : 0);
-}
-
-static int
-i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
-{
-	int i, ret;
-	for (i = 7; i >= 0; i--) {
-		ret = i2c_bitw(port, !!(byte & (1 << i)));
-		if (ret < 0)
-			return ret;
-	}
-
-	ret = i2c_bitr(port);
-	if (ret == 1) /* nack */
-		ret = -EIO;
-	return ret;
-}
-
-static int
-i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
-{
-	u32 addr = msg->addr << 1;
-	if (msg->flags & I2C_M_RD)
-		addr |= 1;
-	return i2c_put_byte(port, addr);
-}
-
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
-	struct i2c_msg *msg = msgs;
-	int ret = 0, mcnt = num;
-
-	while (!ret && mcnt--) {
-		u8 remaining = msg->len;
-		u8 *ptr = msg->buf;
-
-		ret = i2c_start(port);
-		if (ret == 0)
-			ret = i2c_addr(port, msg);
-
-		if (msg->flags & I2C_M_RD) {
-			while (!ret && remaining--)
-				ret = i2c_get_byte(port, ptr++, !remaining);
-		} else {
-			while (!ret && remaining--)
-				ret = i2c_put_byte(port, *ptr++);
-		}
-
-		msg++;
-	}
-
-	i2c_stop(port);
-	return (ret < 0) ? ret : num;
-}
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nouveau_i2c_bit_algo = {
-	.master_xfer = i2c_bit_xfer,
-	.functionality = i2c_bit_func
-};
-
 static const uint32_t nv50_i2c_port[] = {
 	0x00e138, 0x00e150, 0x00e168, 0x00e180,
 	0x00e254, 0x00e274, 0x00e764, 0x00e780,
@@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
 		case 0: /* NV04:NV50 */
 			port->drive = entry[0];
 			port->sense = entry[1];
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 4: /* NV4E */
 			port->drive = 0x600800 + entry[1];
 			port->sense = port->drive;
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 5: /* NV50- */
 			port->drive = entry[0] & 0x0f;
@@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
 				port->drive = 0x00d014 + (port->drive * 0x20);
 				port->sense = port->drive;
 			}
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 6: /* NV50- DP AUX */
 			port->drive = entry[0];
@@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
 			break;
 		}
 
-		if (!port->adapter.algo) {
+		if (!port->adapter.algo && !port->drive) {
 			NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
 				 i, port->type, port->drive, port->sense);
 			kfree(port);
@@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
 		port->dcb = ROM32(entry[0]);
 		i2c_set_adapdata(&port->adapter, i2c);
 
-		ret = i2c_add_adapter(&port->adapter);
+		if (port->adapter.algo != &nouveau_dp_i2c_algo) {
+			port->adapter.algo_data = &port->bit;
+			port->bit.udelay = 10;
+			port->bit.timeout = usecs_to_jiffies(2200);
+			port->bit.data = port;
+			port->bit.setsda = i2c_drive_sda;
+			port->bit.setscl = i2c_drive_scl;
+			port->bit.getsda = i2c_sense_sda;
+			port->bit.getscl = i2c_sense_scl;
+
+			i2c_drive_scl(port, 0);
+			i2c_drive_sda(port, 1);
+			i2c_drive_scl(port, 1);
+
+			ret = i2c_bit_add_bus(&port->adapter);
+		} else {
+			port->adapter.algo = &nouveau_dp_i2c_algo;
+			ret = i2c_add_adapter(&port->adapter);
+		}
+
 		if (ret) {
 			NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
 			kfree(port);

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_i2c.h

@@ -34,6 +34,7 @@
 struct nouveau_i2c_chan {
 	struct i2c_adapter adapter;
 	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
 	struct list_head head;
 	u8  index;
 	u8  type;

+ 1 - 1
drivers/gpu/drm/nouveau/nv10_gpio.c

@@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
 	if (line < 10) {
 		line = (line - 2) * 4;
 		reg  = NV_PCRTC_GPIO_EXT;
-		mask = 0x00000003 << ((line - 2) * 4);
+		mask = 0x00000003;
 		data = (dir << 1) | out;
 	} else
 	if (line < 14) {

+ 5 - 0
drivers/gpu/drm/nouveau/nvc0_fb.c

@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
 			nvc0_mfb_subp_isr(dev, unit, subp);
 		units &= ~(1 << unit);
 	}
+
+	/* we do something horribly wrong and upset PMFB a lot, so mask off
+	 * interrupts from it after the first one until it's fixed
+	 */
+	nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
 }
 
 static void

+ 2 - 2
drivers/gpu/drm/radeon/radeon_device.c

@@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
 				rdev->wb.use_event = true;
 		}
 	}
-	/* always use writeback/events on NI */
-	if (ASIC_IS_DCE5(rdev)) {
+	/* always use writeback/events on NI, APUs */
+	if (rdev->family >= CHIP_PALM) {
 		rdev->wb.enabled = true;
 		rdev->wb.use_event = true;
 	}

+ 5 - 1
drivers/hwmon/coretemp.c

@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES		16	/* Number of Real cores per cpu */
+#define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
@@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
 
 	indx = TO_ATTR_NO(cpu);
 
+	/* The core id is too big, just return */
+	if (indx > MAX_CORE_DATA - 1)
+		return;
+
 	if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
 		coretemp_remove_core(pdata, &pdev->dev, indx);
 

Some files were not shown because too many files changed in this diff