Browse Source

Merge branch 'sh/stable-updates'

Paul Mundt 15 years ago
parent
commit
94a46d3cde
100 changed files with 1764 additions and 787 deletions
  1. 2 0
      Documentation/filesystems/00-INDEX
  2. 6 5
      Documentation/filesystems/ceph.txt
  3. 54 0
      Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
  4. 12 2
      MAINTAINERS
  5. 1 1
      Makefile
  6. 1 37
      arch/arm/include/asm/cacheflush.h
  7. 1 0
      arch/arm/include/asm/clkdev.h
  8. 1 0
      arch/arm/include/asm/irq.h
  9. 75 0
      arch/arm/include/asm/outercache.h
  10. 10 6
      arch/arm/include/asm/system.h
  11. 9 1
      arch/arm/kernel/kprobes.c
  12. 2 2
      arch/arm/lib/memmove.S
  13. 13 0
      arch/arm/mm/Kconfig
  14. 10 0
      arch/arm/mm/cache-l2x0.c
  15. 1 1
      arch/arm/vfp/vfpmodule.c
  16. 1 1
      arch/cris/arch-v32/drivers/pci/bios.c
  17. 2 4
      arch/frv/mb93090-mb00/pci-frv.c
  18. 0 3
      arch/microblaze/Kconfig
  19. 3 1
      arch/microblaze/Makefile
  20. 1 5
      arch/microblaze/boot/Makefile
  21. 0 1
      arch/microblaze/include/asm/processor.h
  22. 0 49
      arch/microblaze/include/asm/segment.h
  23. 4 1
      arch/microblaze/include/asm/thread_info.h
  24. 2 1
      arch/microblaze/include/asm/tlbflush.h
  25. 244 203
      arch/microblaze/include/asm/uaccess.h
  26. 1 1
      arch/microblaze/kernel/dma.c
  27. 9 3
      arch/microblaze/kernel/head.S
  28. 48 64
      arch/microblaze/kernel/hw_exception_handler.S
  29. 13 2
      arch/microblaze/kernel/misc.S
  30. 6 4
      arch/microblaze/kernel/process.c
  31. 15 9
      arch/microblaze/kernel/setup.c
  32. 2 4
      arch/microblaze/kernel/traps.c
  33. 1 2
      arch/microblaze/lib/Makefile
  34. 5 1
      arch/microblaze/lib/fastcopy.S
  35. 1 1
      arch/microblaze/lib/memcpy.c
  36. 8 7
      arch/microblaze/lib/memset.c
  37. 0 48
      arch/microblaze/lib/uaccess.c
  38. 31 14
      arch/microblaze/lib/uaccess_old.S
  39. 12 12
      arch/microblaze/mm/fault.c
  40. 0 9
      arch/microblaze/mm/init.c
  41. 1 1
      arch/microblaze/mm/pgtable.c
  42. 2 0
      arch/powerpc/include/asm/asm-compat.h
  43. 26 0
      arch/powerpc/kernel/misc.S
  44. 2 0
      arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
  45. 3 0
      arch/sh/kernel/return_address.c
  46. 28 0
      arch/sh/mm/tlb-pteaex.c
  47. 19 0
      arch/sh/mm/tlb-sh3.c
  48. 28 0
      arch/sh/mm/tlb-sh4.c
  49. 0 28
      arch/sh/mm/tlbflush_32.c
  50. 16 12
      arch/sparc/configs/sparc64_defconfig
  51. 2 2
      arch/sparc/include/asm/stat.h
  52. 75 0
      arch/sparc/kernel/helpers.S
  53. 1 1
      arch/sparc/kernel/perf_event.c
  54. 4 0
      arch/sparc/kernel/ptrace_32.c
  55. 4 0
      arch/sparc/kernel/ptrace_64.c
  56. 2 2
      arch/sparc/kernel/sysfs.c
  57. 4 4
      arch/sparc/kernel/us2e_cpufreq.c
  58. 4 4
      arch/sparc/kernel/us3_cpufreq.c
  59. 1 1
      arch/sparc/mm/init_64.c
  60. 3 3
      arch/x86/include/asm/fixmap.h
  61. 1 0
      arch/x86/include/asm/hw_irq.h
  62. 2 0
      arch/x86/include/asm/msr-index.h
  63. 8 0
      arch/x86/kernel/apic/io_apic.c
  64. 44 10
      arch/x86/kernel/cpu/perf_event.c
  65. 47 33
      arch/x86/kernel/cpu/perf_event_amd.c
  66. 5 0
      arch/x86/kernel/dumpstack.h
  67. 3 1
      arch/x86/kernel/head32.c
  68. 2 1
      arch/x86/kernel/head64.c
  69. 22 0
      arch/x86/kernel/irqinit.c
  70. 1 1
      arch/x86/kernel/kgdb.c
  71. 24 8
      arch/x86/kernel/process.c
  72. 6 4
      arch/x86/kernel/setup.c
  73. 3 3
      arch/x86/kernel/smpboot.c
  74. 1 1
      arch/x86/kernel/vmlinux.lds.S
  75. 26 6
      arch/x86/mm/init.c
  76. 18 4
      arch/x86/pci/acpi.c
  77. 0 5
      arch/x86/pci/i386.c
  78. 4 0
      drivers/ata/pata_via.c
  79. 31 0
      drivers/base/power/main.c
  80. 2 0
      drivers/char/tty_io.c
  81. 40 63
      drivers/firewire/core-device.c
  82. 3 2
      drivers/firewire/core-iso.c
  83. 4 0
      drivers/firewire/ohci.c
  84. 1 0
      drivers/gpu/drm/drm_crtc_helper.c
  85. 0 9
      drivers/gpu/drm/drm_edid.c
  86. 2 0
      drivers/gpu/drm/drm_fb_helper.c
  87. 9 7
      drivers/gpu/drm/drm_fops.c
  88. 1 1
      drivers/gpu/drm/nouveau/Makefile
  89. 26 2
      drivers/gpu/drm/nouveau/nouveau_bios.c
  90. 2 1
      drivers/gpu/drm/nouveau/nouveau_bios.h
  91. 1 2
      drivers/gpu/drm/nouveau/nouveau_bo.c
  92. 1 1
      drivers/gpu/drm/nouveau/nouveau_connector.c
  93. 5 0
      drivers/gpu/drm/nouveau/nouveau_dma.c
  94. 10 0
      drivers/gpu/drm/nouveau/nouveau_drv.c
  95. 6 0
      drivers/gpu/drm/nouveau/nouveau_drv.h
  96. 561 48
      drivers/gpu/drm/nouveau/nouveau_irq.c
  97. 2 3
      drivers/gpu/drm/nouveau/nouveau_state.c
  98. 3 3
      drivers/gpu/drm/nouveau/nv04_crtc.c
  99. 3 3
      drivers/gpu/drm/nouveau/nv04_fbcon.c
  100. 2 2
      drivers/gpu/drm/nouveau/nv50_display.c

+ 2 - 0
Documentation/filesystems/00-INDEX

@@ -16,6 +16,8 @@ befs.txt
 	- information about the BeOS filesystem for Linux.
 	- information about the BeOS filesystem for Linux.
 bfs.txt
 bfs.txt
 	- info for the SCO UnixWare Boot Filesystem (BFS).
 	- info for the SCO UnixWare Boot Filesystem (BFS).
+ceph.txt
+	- info for the Ceph Distributed File System
 cifs.txt
 cifs.txt
 	- description of the CIFS filesystem.
 	- description of the CIFS filesystem.
 coda.txt
 coda.txt

+ 6 - 5
Documentation/filesystems/ceph.txt

@@ -8,7 +8,7 @@ Basic features include:
 
 
  * POSIX semantics
  * POSIX semantics
  * Seamless scaling from 1 to many thousands of nodes
  * Seamless scaling from 1 to many thousands of nodes
- * High availability and reliability.  No single points of failure.
+ * High availability and reliability.  No single point of failure.
  * N-way replication of data across storage nodes
  * N-way replication of data across storage nodes
  * Fast recovery from node failures
  * Fast recovery from node failures
  * Automatic rebalancing of data on node addition/removal
  * Automatic rebalancing of data on node addition/removal
@@ -94,7 +94,7 @@ Mount Options
 
 
   wsize=X
   wsize=X
 	Specify the maximum write size in bytes.  By default there is no
 	Specify the maximum write size in bytes.  By default there is no
-	maximu.  Ceph will normally size writes based on the file stripe
+	maximum.  Ceph will normally size writes based on the file stripe
 	size.
 	size.
 
 
   rsize=X
   rsize=X
@@ -115,7 +115,7 @@ Mount Options
 	number of entries in that directory.
 	number of entries in that directory.
 
 
   nocrc
   nocrc
-	Disable CRC32C calculation for data writes.  If set, the OSD
+	Disable CRC32C calculation for data writes.  If set, the storage node
 	must rely on TCP's error correction to detect data corruption
 	must rely on TCP's error correction to detect data corruption
 	in the data payload.
 	in the data payload.
 
 
@@ -133,7 +133,8 @@ For more information on Ceph, see the home page at
 	http://ceph.newdream.net/
 	http://ceph.newdream.net/
 
 
 The Linux kernel client source tree is available at
 The Linux kernel client source tree is available at
-	git://ceph.newdream.net/linux-ceph-client.git
+	git://ceph.newdream.net/git/ceph-client.git
+	git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 
 
 and the source for the full system is at
 and the source for the full system is at
-	git://ceph.newdream.net/ceph.git
+	git://ceph.newdream.net/git/ceph.git

+ 54 - 0
Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt

@@ -21,6 +21,15 @@ Required properties:
 - fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
 - fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
   threads.
   threads.
 
 
+Optional properties:
+- fsl,firmware-phandle:
+    Usage: required only if there is no fsl,qe-firmware child node
+    Value type: <phandle>
+    Definition: Points to a firmware node (see "QE Firmware Node" below)
+        that contains the firmware that should be uploaded for this QE.
+        The compatible property for the firmware node should say,
+        "fsl,qe-firmware".
+
 Recommended properties
 Recommended properties
 - brg-frequency : the internal clock source frequency for baud-rate
 - brg-frequency : the internal clock source frequency for baud-rate
   generators in Hz.
   generators in Hz.
@@ -59,3 +68,48 @@ Example:
 		reg = <0 c000>;
 		reg = <0 c000>;
 	};
 	};
      };
      };
+
+* QE Firmware Node
+
+This node defines a firmware binary that is embedded in the device tree, for
+the purpose of passing the firmware from bootloader to the kernel, or from
+the hypervisor to the guest.
+
+The firmware node itself contains the firmware binary contents, a compatible
+property, and any firmware-specific properties.  The node should be placed
+inside a QE node that needs it.  Doing so eliminates the need for a
+fsl,firmware-phandle property.  Other QE nodes that need the same firmware
+should define an fsl,firmware-phandle property that points to the firmware node
+in the first QE node.
+
+The fsl,firmware property can be specified in the DTS (possibly using incbin)
+or can be inserted by the boot loader at boot time.
+
+Required properties:
+  - compatible
+      Usage: required
+      Value type: <string>
+      Definition: A standard property.  Specify a string that indicates what
+          kind of firmware it is.  For QE, this should be "fsl,qe-firmware".
+
+   - fsl,firmware
+      Usage: required
+      Value type: <prop-encoded-array>, encoded as an array of bytes
+      Definition: A standard property.  This property contains the firmware
+          binary "blob".
+
+Example:
+	qe1@e0080000 {
+		compatible = "fsl,qe";
+		qe_firmware:qe-firmware {
+			compatible = "fsl,qe-firmware";
+			fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
+		};
+		...
+	};
+
+	qe2@e0090000 {
+		compatible = "fsl,qe";
+		fsl,firmware-phandle = <&qe_firmware>;
+		...
+	};

+ 12 - 2
MAINTAINERS

@@ -1443,7 +1443,7 @@ F:	arch/powerpc/platforms/cell/
 
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT
 CEPH DISTRIBUTED FILE SYSTEM CLIENT
 M:	Sage Weil <sage@newdream.net>
 M:	Sage Weil <sage@newdream.net>
-L:	ceph-devel@lists.sourceforge.net
+L:	ceph-devel@vger.kernel.org
 W:	http://ceph.newdream.net/
 W:	http://ceph.newdream.net/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 S:	Supported
 S:	Supported
@@ -3083,6 +3083,7 @@ F:	include/scsi/*iscsi*
 ISDN SUBSYSTEM
 ISDN SUBSYSTEM
 M:	Karsten Keil <isdn@linux-pingi.de>
 M:	Karsten Keil <isdn@linux-pingi.de>
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
 L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)
+L:	netdev@vger.kernel.org
 W:	http://www.isdn4linux.de
 W:	http://www.isdn4linux.de
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
 S:	Maintained
 S:	Maintained
@@ -3269,6 +3270,16 @@ S:	Maintained
 F:	include/linux/kexec.h
 F:	include/linux/kexec.h
 F:	kernel/kexec.c
 F:	kernel/kexec.c
 
 
+KEYS/KEYRINGS:
+M:	David Howells <dhowells@redhat.com>
+L:	keyrings@linux-nfs.org
+S:	Maintained
+F:	Documentation/keys.txt
+F:	include/linux/key.h
+F:	include/linux/key-type.h
+F:	include/keys/
+F:	security/keys/
+
 KGDB
 KGDB
 M:	Jason Wessel <jason.wessel@windriver.com>
 M:	Jason Wessel <jason.wessel@windriver.com>
 L:	kgdb-bugreport@lists.sourceforge.net
 L:	kgdb-bugreport@lists.sourceforge.net
@@ -5423,7 +5434,6 @@ S:	Maintained
 F:	sound/soc/codecs/twl4030*
 F:	sound/soc/codecs/twl4030*
 
 
 TIPC NETWORK LAYER
 TIPC NETWORK LAYER
-M:	Per Liden <per.liden@ericsson.com>
 M:	Jon Maloy <jon.maloy@ericsson.com>
 M:	Jon Maloy <jon.maloy@ericsson.com>
 M:	Allan Stephens <allan.stephens@windriver.com>
 M:	Allan Stephens <allan.stephens@windriver.com>
 L:	tipc-discussion@lists.sourceforge.net
 L:	tipc-discussion@lists.sourceforge.net

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 34
 SUBLEVEL = 34
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Man-Eating Seals of Antiquity
 NAME = Man-Eating Seals of Antiquity
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 1 - 37
arch/arm/include/asm/cacheflush.h

@@ -15,6 +15,7 @@
 #include <asm/glue.h>
 #include <asm/glue.h>
 #include <asm/shmparam.h>
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/cachetype.h>
+#include <asm/outercache.h>
 
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
 
@@ -219,12 +220,6 @@ struct cpu_cache_fns {
 	void (*dma_flush_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 };
 };
 
 
-struct outer_cache_fns {
-	void (*inv_range)(unsigned long, unsigned long);
-	void (*clean_range)(unsigned long, unsigned long);
-	void (*flush_range)(unsigned long, unsigned long);
-};
-
 /*
 /*
  * Select the calling method
  * Select the calling method
  */
  */
@@ -281,37 +276,6 @@ extern void dmac_flush_range(const void *, const void *);
 
 
 #endif
 #endif
 
 
-#ifdef CONFIG_OUTER_CACHE
-
-extern struct outer_cache_fns outer_cache;
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.inv_range)
-		outer_cache.inv_range(start, end);
-}
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.clean_range)
-		outer_cache.clean_range(start, end);
-}
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{
-	if (outer_cache.flush_range)
-		outer_cache.flush_range(start, end);
-}
-
-#else
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{ }
-
-#endif
-
 /*
 /*
  * Copy user data from/to a page which is mapped into a different
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
  * processes address space.  Really, we want to allow our "user

+ 1 - 0
arch/arm/include/asm/clkdev.h

@@ -13,6 +13,7 @@
 #define __ASM_CLKDEV_H
 #define __ASM_CLKDEV_H
 
 
 struct clk;
 struct clk;
+struct device;
 
 
 struct clk_lookup {
 struct clk_lookup {
 	struct list_head	node;
 	struct list_head	node;

+ 1 - 0
arch/arm/include/asm/irq.h

@@ -17,6 +17,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 struct irqaction;
 struct irqaction;
+struct pt_regs;
 extern void migrate_irqs(void);
 extern void migrate_irqs(void);
 
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);

+ 75 - 0
arch/arm/include/asm/outercache.h

@@ -0,0 +1,75 @@
+/*
+ * arch/arm/include/asm/outercache.h
+ *
+ * Copyright (C) 2010 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_OUTERCACHE_H
+#define __ASM_OUTERCACHE_H
+
+struct outer_cache_fns {
+	void (*inv_range)(unsigned long, unsigned long);
+	void (*clean_range)(unsigned long, unsigned long);
+	void (*flush_range)(unsigned long, unsigned long);
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	void (*sync)(void);
+#endif
+};
+
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.inv_range)
+		outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.clean_range)
+		outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+	if (outer_cache.flush_range)
+		outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
+#ifdef CONFIG_OUTER_CACHE_SYNC
+static inline void outer_sync(void)
+{
+	if (outer_cache.sync)
+		outer_cache.sync();
+}
+#else
+static inline void outer_sync(void)
+{ }
+#endif
+
+#endif	/* __ASM_OUTERCACHE_H */

+ 10 - 6
arch/arm/include/asm/system.h

@@ -60,6 +60,8 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/irqflags.h>
 #include <linux/irqflags.h>
 
 
+#include <asm/outercache.h>
+
 #define __exception	__attribute__((section(".exception.text")))
 #define __exception	__attribute__((section(".exception.text")))
 
 
 struct thread_info;
 struct thread_info;
@@ -137,10 +139,12 @@ extern unsigned int user_debug;
 #define dmb() __asm__ __volatile__ ("" : : : "memory")
 #define dmb() __asm__ __volatile__ ("" : : : "memory")
 #endif
 #endif
 
 
-#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
-#define mb()		dmb()
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb()		do { dsb(); outer_sync(); } while (0)
 #define rmb()		dmb()
 #define rmb()		dmb()
-#define wmb()		dmb()
+#define wmb()		mb()
 #else
 #else
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
 #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@ extern unsigned int user_debug;
 #define smp_rmb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #define smp_wmb()	barrier()
 #else
 #else
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
+#define smp_mb()	dmb()
+#define smp_rmb()	dmb()
+#define smp_wmb()	dmb()
 #endif
 #endif
 
 
 #define read_barrier_depends()		do { } while(0)
 #define read_barrier_depends()		do { } while(0)

+ 9 - 1
arch/arm/kernel/kprobes.c

@@ -393,6 +393,14 @@ void __kprobes jprobe_return(void)
 		/*
 		/*
 		 * Setup an empty pt_regs. Fill SP and PC fields as
 		 * Setup an empty pt_regs. Fill SP and PC fields as
 		 * they're needed by longjmp_break_handler.
 		 * they're needed by longjmp_break_handler.
+		 *
+		 * We allocate some slack between the original SP and start of
+		 * our fabricated regs. To be precise we want to have worst case
+		 * covered which is STMFD with all 16 regs so we allocate 2 *
+		 * sizeof(struct_pt_regs)).
+		 *
+		 * This is to prevent any simulated instruction from writing
+		 * over the regs when they are accessing the stack.
 		 */
 		 */
 		"sub    sp, %0, %1		\n\t"
 		"sub    sp, %0, %1		\n\t"
 		"ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
 		"ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
@@ -410,7 +418,7 @@ void __kprobes jprobe_return(void)
 		"ldmia	sp, {r0 - pc}		\n\t"
 		"ldmia	sp, {r0 - pc}		\n\t"
 		:
 		:
 		: "r" (kcb->jprobe_saved_regs.ARM_sp),
 		: "r" (kcb->jprobe_saved_regs.ARM_sp),
-		  "I" (sizeof(struct pt_regs)),
+		  "I" (sizeof(struct pt_regs) * 2),
 		  "J" (offsetof(struct pt_regs, ARM_sp)),
 		  "J" (offsetof(struct pt_regs, ARM_sp)),
 		  "J" (offsetof(struct pt_regs, ARM_pc)),
 		  "J" (offsetof(struct pt_regs, ARM_pc)),
 		  "J" (offsetof(struct pt_regs, ARM_cpsr))
 		  "J" (offsetof(struct pt_regs, ARM_cpsr))

+ 2 - 2
arch/arm/lib/memmove.S

@@ -74,7 +74,7 @@ ENTRY(memmove)
 		rsb	ip, ip, #32
 		rsb	ip, ip, #32
 		addne	pc, pc, ip		@ C is always clear here
 		addne	pc, pc, ip		@ C is always clear here
 		b	7f
 		b	7f
-6:		nop
+6:		W(nop)
 		W(ldr)	r3, [r1, #-4]!
 		W(ldr)	r3, [r1, #-4]!
 		W(ldr)	r4, [r1, #-4]!
 		W(ldr)	r4, [r1, #-4]!
 		W(ldr)	r5, [r1, #-4]!
 		W(ldr)	r5, [r1, #-4]!
@@ -85,7 +85,7 @@ ENTRY(memmove)
 
 
 		add	pc, pc, ip
 		add	pc, pc, ip
 		nop
 		nop
-		nop
+		W(nop)
 		W(str)	r3, [r0, #-4]!
 		W(str)	r3, [r0, #-4]!
 		W(str)	r4, [r0, #-4]!
 		W(str)	r4, [r0, #-4]!
 		W(str)	r5, [r0, #-4]!
 		W(str)	r5, [r0, #-4]!

+ 13 - 0
arch/arm/mm/Kconfig

@@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
 config OUTER_CACHE
 config OUTER_CACHE
 	bool
 	bool
 
 
+config OUTER_CACHE_SYNC
+	bool
+	help
+	  The outer cache has a outer_cache_fns.sync function pointer
+	  that can be used to drain the write buffer of the outer cache.
+
 config CACHE_FEROCEON_L2
 config CACHE_FEROCEON_L2
 	bool "Enable the Feroceon L2 cache controller"
 	bool "Enable the Feroceon L2 cache controller"
 	depends on ARCH_KIRKWOOD || ARCH_MV78XX0
 	depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -757,6 +763,7 @@ config CACHE_L2X0
 		   REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
 		   REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
 	default y
 	default y
 	select OUTER_CACHE
 	select OUTER_CACHE
+	select OUTER_CACHE_SYNC
 	help
 	help
 	  This option enables the L2x0 PrimeCell.
 	  This option enables the L2x0 PrimeCell.
 
 
@@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT
 	int
 	int
 	default 6 if ARM_L1_CACHE_SHIFT_6
 	default 6 if ARM_L1_CACHE_SHIFT_6
 	default 5
 	default 5
+
+config ARCH_HAS_BARRIERS
+	bool
+	help
+	  This option allows the use of custom mandatory barriers
+	  included via the mach/barriers.h file.

+ 10 - 0
arch/arm/mm/cache-l2x0.c

@@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)
 }
 }
 #endif
 #endif
 
 
+static void l2x0_cache_sync(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&l2x0_lock, flags);
+	cache_sync();
+	spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
 static inline void l2x0_inv_all(void)
 static inline void l2x0_inv_all(void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
@@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 	outer_cache.inv_range = l2x0_inv_range;
 	outer_cache.inv_range = l2x0_inv_range;
 	outer_cache.clean_range = l2x0_clean_range;
 	outer_cache.clean_range = l2x0_clean_range;
 	outer_cache.flush_range = l2x0_flush_range;
 	outer_cache.flush_range = l2x0_flush_range;
+	outer_cache.sync = l2x0_cache_sync;
 
 
 	printk(KERN_INFO "L2X0 cache controller enabled\n");
 	printk(KERN_INFO "L2X0 cache controller enabled\n");
 }
 }

+ 1 - 1
arch/arm/vfp/vfpmodule.c

@@ -545,7 +545,7 @@ static int __init vfp_init(void)
 		 */
 		 */
 		elf_hwcap |= HWCAP_VFP;
 		elf_hwcap |= HWCAP_VFP;
 #ifdef CONFIG_VFPv3
 #ifdef CONFIG_VFPv3
-		if (VFP_arch >= 3) {
+		if (VFP_arch >= 2) {
 			elf_hwcap |= HWCAP_VFPv3;
 			elf_hwcap |= HWCAP_VFPv3;
 
 
 			/*
 			/*

+ 1 - 1
arch/cris/arch-v32/drivers/pci/bios.c

@@ -50,7 +50,7 @@ pcibios_align_resource(void *data, const struct resource *res,
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 		start = (start + 0x3ff) & ~0x3ff;
 		start = (start + 0x3ff) & ~0x3ff;
 
 
-	return start
+	return start;
 }
 }
 
 
 int pcibios_enable_resources(struct pci_dev *dev, int mask)
 int pcibios_enable_resources(struct pci_dev *dev, int mask)

+ 2 - 4
arch/frv/mb93090-mb00/pci-frv.c

@@ -41,7 +41,7 @@ pcibios_align_resource(void *data, const struct resource *res,
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
 		start = (start + 0x3ff) & ~0x3ff;
 		start = (start + 0x3ff) & ~0x3ff;
 
 
-	return start
+	return start;
 }
 }
 
 
 
 
@@ -94,8 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
 				r = &dev->resource[idx];
 				r = &dev->resource[idx];
 				if (!r->start)
 				if (!r->start)
 					continue;
 					continue;
-				if (pci_claim_resource(dev, idx) < 0)
-					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
+				pci_claim_resource(dev, idx);
 			}
 			}
 		}
 		}
 		pcibios_allocate_bus_resources(&bus->children);
 		pcibios_allocate_bus_resources(&bus->children);
@@ -125,7 +124,6 @@ static void __init pcibios_allocate_resources(int pass)
 				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
 				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
 				    r->start, r->end, r->flags, disabled, pass);
 				    r->start, r->end, r->flags, disabled, pass);
 				if (pci_claim_resource(dev, idx) < 0) {
 				if (pci_claim_resource(dev, idx) < 0) {
-					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
 					/* We'll assign a new address later */
 					/* We'll assign a new address later */
 					r->end -= r->start;
 					r->end -= r->start;
 					r->start = 0;
 					r->start = 0;

+ 0 - 3
arch/microblaze/Kconfig

@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
 config HAVE_LATENCYTOP_SUPPORT
 config HAVE_LATENCYTOP_SUPPORT
 	def_bool y
 	def_bool y
 
 
-config PCI
-	def_bool n
-
 config DTC
 config DTC
 	def_bool y
 	def_bool y
 
 

+ 3 - 1
arch/microblaze/Makefile

@@ -84,7 +84,7 @@ define archhelp
   echo '* linux.bin    - Create raw binary'
   echo '* linux.bin    - Create raw binary'
   echo '  linux.bin.gz - Create compressed raw binary'
   echo '  linux.bin.gz - Create compressed raw binary'
   echo '  simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
   echo '  simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
-  echo '                   - stripped elf with fdt blob
+  echo '                   - stripped elf with fdt blob'
   echo '  simpleImage.<dt>.unstrip - full ELF image with fdt blob'
   echo '  simpleImage.<dt>.unstrip - full ELF image with fdt blob'
   echo '  *_defconfig      - Select default config from arch/microblaze/configs'
   echo '  *_defconfig      - Select default config from arch/microblaze/configs'
   echo ''
   echo ''
@@ -94,3 +94,5 @@ define archhelp
   echo '  name of a dts file from the arch/microblaze/boot/dts/ directory'
   echo '  name of a dts file from the arch/microblaze/boot/dts/ directory'
   echo '  (minus the .dts extension).'
   echo '  (minus the .dts extension).'
 endef
 endef
+
+MRPROPER_FILES += $(boot)/simpleImage.*

+ 1 - 5
arch/microblaze/boot/Makefile

@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
 endif
 endif
 
 
 $(obj)/linux.bin: vmlinux FORCE
 $(obj)/linux.bin: vmlinux FORCE
-	[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
-	touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
 	$(call if_changed,objcopy)
 	$(call if_changed,objcopy)
 	$(call if_changed,uimage)
 	$(call if_changed,uimage)
 	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
 	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC     $@
 $(obj)/%.dtb: $(dtstree)/%.dts FORCE
 $(obj)/%.dtb: $(dtstree)/%.dts FORCE
 	$(call if_changed,dtc)
 	$(call if_changed,dtc)
 
 
-clean-kernel += linux.bin linux.bin.gz simpleImage.*
-
-clean-files += *.dtb simpleImage.*.unstrip
+clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub

+ 0 - 1
arch/microblaze/include/asm/processor.h

@@ -14,7 +14,6 @@
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/registers.h>
 #include <asm/registers.h>
-#include <asm/segment.h>
 #include <asm/entry.h>
 #include <asm/entry.h>
 #include <asm/current.h>
 #include <asm/current.h>
 
 

+ 0 - 49
arch/microblaze/include/asm/segment.h

@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2008-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SEGMENT_H
-#define _ASM_MICROBLAZE_SEGMENT_H
-
-# ifndef __ASSEMBLY__
-
-typedef struct {
-	unsigned long seg;
-} mm_segment_t;
-
-/*
- * On Microblaze the fs value is actually the top of the corresponding
- * address space.
- *
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
- */
-# define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
-
-#  ifndef CONFIG_MMU
-#  define KERNEL_DS	MAKE_MM_SEG(0)
-#  define USER_DS	KERNEL_DS
-#  else
-#  define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
-#  define USER_DS	MAKE_MM_SEG(TASK_SIZE - 1)
-#  endif
-
-# define get_ds()	(KERNEL_DS)
-# define get_fs()	(current_thread_info()->addr_limit)
-# define set_fs(val)	(current_thread_info()->addr_limit = (val))
-
-# define segment_eq(a, b)	((a).seg == (b).seg)
-
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_MICROBLAZE_SEGMENT_H */

+ 4 - 1
arch/microblaze/include/asm/thread_info.h

@@ -19,7 +19,6 @@
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 # include <linux/types.h>
 # include <linux/types.h>
 # include <asm/processor.h>
 # include <asm/processor.h>
-# include <asm/segment.h>
 
 
 /*
 /*
  * low level task data that entry.S needs immediate access to
  * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
 	__u32	fsr;
 	__u32	fsr;
 };
 };
 
 
+typedef struct {
+	unsigned long seg;
+} mm_segment_t;
+
 struct thread_info {
 struct thread_info {
 	struct task_struct	*task; /* main task structure */
 	struct task_struct	*task; /* main task structure */
 	struct exec_domain	*exec_domain; /* execution domain */
 	struct exec_domain	*exec_domain; /* execution domain */

+ 2 - 1
arch/microblaze/include/asm/tlbflush.h

@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
 extern void _tlbia(void);
 extern void _tlbia(void);
 
 
 #define __tlbia()	{ preempt_disable(); _tlbia(); preempt_enable(); }
 #define __tlbia()	{ preempt_disable(); _tlbia(); preempt_enable(); }
+#define __tlbie(x)	{ _tlbie(x); }
 
 
 static inline void local_flush_tlb_all(void)
 static inline void local_flush_tlb_all(void)
 	{ __tlbia(); }
 	{ __tlbia(); }
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
 	{ __tlbia(); }
 	{ __tlbia(); }
 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
 				unsigned long vmaddr)
 				unsigned long vmaddr)
-	{ _tlbie(vmaddr); }
+	{ __tlbie(vmaddr); }
 static inline void local_flush_tlb_range(struct vm_area_struct *vma,
 static inline void local_flush_tlb_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 		unsigned long start, unsigned long end)
 	{ __tlbia(); }
 	{ __tlbia(); }

+ 244 - 203
arch/microblaze/include/asm/uaccess.h

@@ -22,101 +22,73 @@
 #include <asm/mmu.h>
 #include <asm/mmu.h>
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
-#include <asm/segment.h>
 #include <linux/string.h>
 #include <linux/string.h>
 
 
 #define VERIFY_READ	0
 #define VERIFY_READ	0
 #define VERIFY_WRITE	1
 #define VERIFY_WRITE	1
 
 
-#define __clear_user(addr, n)	(memset((void *)(addr), 0, (n)), 0)
-
-#ifndef CONFIG_MMU
-
-extern int ___range_ok(unsigned long addr, unsigned long size);
-
-#define __range_ok(addr, size) \
-		___range_ok((unsigned long)(addr), (unsigned long)(size))
-
-#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
-#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
-
-/* Undefined function to trigger linker error */
-extern int bad_user_access_length(void);
-
-/* FIXME this is function for optimalization -> memcpy */
-#define __get_user(var, ptr)				\
-({							\
-	int __gu_err = 0;				\
-	switch (sizeof(*(ptr))) {			\
-	case 1:						\
-	case 2:						\
-	case 4:						\
-		(var) = *(ptr);				\
-		break;					\
-	case 8:						\
-		memcpy((void *) &(var), (ptr), 8);	\
-		break;					\
-	default:					\
-		(var) = 0;				\
-		__gu_err = __get_user_bad();		\
-		break;					\
-	}						\
-	__gu_err;					\
-})
+/*
+ * On Microblaze the fs value is actually the top of the corresponding
+ * address space.
+ *
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
+ */
+# define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
 
 
-#define __get_user_bad()	(bad_user_access_length(), (-EFAULT))
+#  ifndef CONFIG_MMU
+#  define KERNEL_DS	MAKE_MM_SEG(0)
+#  define USER_DS	KERNEL_DS
+#  else
+#  define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
+#  define USER_DS	MAKE_MM_SEG(TASK_SIZE - 1)
+#  endif
 
 
-/* FIXME is not there defined __pu_val */
-#define __put_user(var, ptr)					\
-({								\
-	int __pu_err = 0;					\
-	switch (sizeof(*(ptr))) {				\
-	case 1:							\
-	case 2:							\
-	case 4:							\
-		*(ptr) = (var);					\
-		break;						\
-	case 8: {						\
-		typeof(*(ptr)) __pu_val = (var);		\
-		memcpy(ptr, &__pu_val, sizeof(__pu_val));	\
-		}						\
-		break;						\
-	default:						\
-		__pu_err = __put_user_bad();			\
-		break;						\
-	}							\
-	__pu_err;						\
-})
+# define get_ds()	(KERNEL_DS)
+# define get_fs()	(current_thread_info()->addr_limit)
+# define set_fs(val)	(current_thread_info()->addr_limit = (val))
 
 
-#define __put_user_bad()	(bad_user_access_length(), (-EFAULT))
+# define segment_eq(a, b)	((a).seg == (b).seg)
 
 
-#define put_user(x, ptr)	__put_user((x), (ptr))
-#define get_user(x, ptr)	__get_user((x), (ptr))
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+	unsigned long insn, fixup;
+};
 
 
-#define copy_to_user(to, from, n)	(memcpy((to), (from), (n)), 0)
-#define copy_from_user(to, from, n)	(memcpy((to), (from), (n)), 0)
+/* Returns 0 if exception not found and fixup otherwise.  */
+extern unsigned long search_exception_table(unsigned long);
 
 
-#define __copy_to_user(to, from, n)	(copy_to_user((to), (from), (n)))
-#define __copy_from_user(to, from, n)	(copy_from_user((to), (from), (n)))
-#define __copy_to_user_inatomic(to, from, n) \
-			(__copy_to_user((to), (from), (n)))
-#define __copy_from_user_inatomic(to, from, n) \
-			(__copy_from_user((to), (from), (n)))
+#ifndef CONFIG_MMU
 
 
-static inline unsigned long clear_user(void *addr, unsigned long size)
+/* Check against bounds of physical memory */
+static inline int ___range_ok(unsigned long addr, unsigned long size)
 {
 {
-	if (access_ok(VERIFY_WRITE, addr, size))
-		size = __clear_user(addr, size);
-	return size;
+	return ((addr < memory_start) ||
+		((addr + size) > memory_end));
 }
 }
 
 
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
+#define __range_ok(addr, size) \
+		___range_ok((unsigned long)(addr), (unsigned long)(size))
 
 
-extern long strncpy_from_user(char *dst, const char *src, long count);
-extern long strnlen_user(const char *src, long count);
+#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
 
 
-#else /* CONFIG_MMU */
+#else
 
 
 /*
 /*
  * Address is valid if:
  * Address is valid if:
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
  type?"WRITE":"READ",addr,size,get_fs().seg)) */
  type?"WRITE":"READ",addr,size,get_fs().seg)) */
 
 
-/*
- * All the __XXX versions macros/functions below do not perform
- * access checking. It is assumed that the necessary checks have been
- * already performed before the finction (macro) is called.
- */
+#endif
 
 
-#define get_user(x, ptr)						\
-({									\
-	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
-		? __get_user((x), (ptr)) : -EFAULT;			\
-})
+#ifdef CONFIG_MMU
+# define __FIXUP_SECTION	".section .fixup,\"ax\"\n"
+# define __EX_TABLE_SECTION	".section __ex_table,\"a\"\n"
+#else
+# define __FIXUP_SECTION	".section .discard,\"ax\"\n"
+# define __EX_TABLE_SECTION	".section .discard,\"a\"\n"
+#endif
 
 
-#define put_user(x, ptr)						\
-({									\
-	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
-		? __put_user((x), (ptr)) : -EFAULT;			\
+extern unsigned long __copy_tofrom_user(void __user *to,
+		const void __user *from, unsigned long size);
+
+/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
+static inline unsigned long __must_check __clear_user(void __user *to,
+							unsigned long n)
+{
+	/* normal memset with two words to __ex_table */
+	__asm__ __volatile__ (				\
+			"1:	sb	r0, %2, r0;"	\
+			"	addik	%0, %0, -1;"	\
+			"	bneid	%0, 1b;"	\
+			"	addik	%2, %2, 1;"	\
+			"2:			"	\
+			__EX_TABLE_SECTION		\
+			".word	1b,2b;"			\
+			".previous;"			\
+		: "=r"(n)				\
+		: "0"(n), "r"(to)
+	);
+	return n;
+}
+
+static inline unsigned long __must_check clear_user(void __user *to,
+							unsigned long n)
+{
+	might_sleep();
+	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+		return n;
+
+	return __clear_user(to, n);
+}
+
+/* put_user and get_user macros */
+extern long __user_bad(void);
+
+#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
+({								\
+	__asm__ __volatile__ (					\
+			"1:"	insn	" %1, %2, r0;"		\
+			"	addk	%0, r0, r0;"		\
+			"2:			"		\
+			__FIXUP_SECTION				\
+			"3:	brid	2b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,3b;"				\
+			".previous;"				\
+		: "=&r"(__gu_err), "=r"(__gu_val)		\
+		: "r"(__gu_ptr), "i"(-EFAULT)			\
+	);							\
 })
 })
 
 
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x:   Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+
 #define __get_user(x, ptr)						\
 #define __get_user(x, ptr)						\
 ({									\
 ({									\
 	unsigned long __gu_val;						\
 	unsigned long __gu_val;						\
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
 		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
 		__get_user_asm("lw", (ptr), __gu_val, __gu_err);	\
 		break;							\
 		break;							\
 	default:							\
 	default:							\
-		__gu_val = 0; __gu_err = -EINVAL;			\
+		/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
 	}								\
 	}								\
 	x = (__typeof__(*(ptr))) __gu_val;				\
 	x = (__typeof__(*(ptr))) __gu_val;				\
 	__gu_err;							\
 	__gu_err;							\
 })
 })
 
 
-#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)		\
+
+#define get_user(x, ptr)						\
 ({									\
 ({									\
-	__asm__ __volatile__ (						\
-			"1:"	insn	" %1, %2, r0;			\
-				addk	%0, r0, r0;			\
-			2:						\
-			.section .fixup,\"ax\";				\
-			3:	brid	2b;				\
-				addik	%0, r0, %3;			\
-			.previous;					\
-			.section __ex_table,\"a\";			\
-			.word	1b,3b;					\
-			.previous;"					\
-		: "=r"(__gu_err), "=r"(__gu_val)			\
-		: "r"(__gu_ptr), "i"(-EFAULT)				\
-	);								\
+	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))			\
+		? __get_user((x), (ptr)) : -EFAULT;			\
+})
+
+#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
+({								\
+	__asm__ __volatile__ (					\
+			"1:"	insn	" %1, %2, r0;"		\
+			"	addk	%0, r0, r0;"		\
+			"2:			"		\
+			__FIXUP_SECTION				\
+			"3:	brid	2b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,3b;"				\
+			".previous;"				\
+		: "=&r"(__gu_err)				\
+		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
+	);							\
 })
 })
 
 
+#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)		\
+({								\
+	__asm__ __volatile__ ("	lwi	%0, %1, 0;"		\
+			"1:	swi	%0, %2, 0;"		\
+			"	lwi	%0, %1, 4;"		\
+			"2:	swi	%0, %2, 4;"		\
+			"	addk	%0, r0, r0;"		\
+			"3:			"		\
+			__FIXUP_SECTION				\
+			"4:	brid	3b;"			\
+			"	addik	%0, r0, %3;"		\
+			".previous;"				\
+			__EX_TABLE_SECTION			\
+			".word	1b,4b,2b,4b;"			\
+			".previous;"				\
+		: "=&r"(__gu_err)				\
+		: "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
+		);						\
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x:   Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space.  It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+
 #define __put_user(x, ptr)						\
 #define __put_user(x, ptr)						\
 ({									\
 ({									\
 	__typeof__(*(ptr)) volatile __gu_val = (x);			\
 	__typeof__(*(ptr)) volatile __gu_val = (x);			\
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
 	case 1:								\
 	case 1:								\
 		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
 		__put_user_asm("sb", (ptr), __gu_val, __gu_err);	\
 		break;							\
 		break;							\
-	case 2: 							\
+	case 2:								\
 		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
 		__put_user_asm("sh", (ptr), __gu_val, __gu_err);	\
 		break;							\
 		break;							\
 	case 4:								\
 	case 4:								\
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
 		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
 		__put_user_asm_8((ptr), __gu_val, __gu_err);		\
 		break;							\
 		break;							\
 	default:							\
 	default:							\
-		__gu_err = -EINVAL;					\
+		/*__gu_err = -EINVAL;*/	__gu_err = __user_bad();	\
 	}								\
 	}								\
 	__gu_err;							\
 	__gu_err;							\
 })
 })
 
 
-#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)	\
-({							\
-__asm__ __volatile__ ("	lwi	%0, %1, 0;		\
-		1:	swi	%0, %2, 0;		\
-			lwi	%0, %1, 4;		\
-		2:	swi	%0, %2, 4;		\
-			addk	%0,r0,r0;		\
-		3:					\
-		.section .fixup,\"ax\";			\
-		4:	brid	3b;			\
-			addik	%0, r0, %3;		\
-		.previous;				\
-		.section __ex_table,\"a\";		\
-		.word	1b,4b,2b,4b;			\
-		.previous;"				\
-	: "=&r"(__gu_err)				\
-	: "r"(&__gu_val),				\
-	"r"(__gu_ptr), "i"(-EFAULT)			\
-	);						\
-})
+#ifndef CONFIG_MMU
 
 
-#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)	\
-({								\
-	__asm__ __volatile__ (					\
-			"1:"	insn	" %1, %2, r0;		\
-				addk	%0, r0, r0;		\
-			2:					\
-			.section .fixup,\"ax\";			\
-			3:	brid	2b;			\
-				addik	%0, r0, %3;		\
-			.previous;				\
-			.section __ex_table,\"a\";		\
-			.word	1b,3b;				\
-			.previous;"				\
-		: "=r"(__gu_err)				\
-		: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)	\
-	);							\
-})
+#define put_user(x, ptr)	__put_user((x), (ptr))
 
 
-/*
- * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
- */
-static inline int clear_user(char *to, int size)
-{
-	if (size && access_ok(VERIFY_WRITE, to, size)) {
-		__asm__ __volatile__ ("				\
-				1:				\
-					sb	r0, %2, r0;	\
-					addik	%0, %0, -1;	\
-					bneid	%0, 1b;		\
-					addik	%2, %2, 1;	\
-				2:				\
-				.section __ex_table,\"a\";	\
-				.word	1b,2b;			\
-				.section .text;"		\
-			: "=r"(size)				\
-			: "0"(size), "r"(to)
-		);
-	}
-	return size;
-}
+#else /* CONFIG_MMU */
 
 
-#define __copy_from_user(to, from, n)	copy_from_user((to), (from), (n))
+#define put_user(x, ptr)						\
+({									\
+	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))			\
+		? __put_user((x), (ptr)) : -EFAULT;			\
+})
+#endif /* CONFIG_MMU */
+
+/* copy_to_from_user */
+#define __copy_from_user(to, from, n)	\
+	__copy_tofrom_user((__force void __user *)(to), \
+				(void __user *)(from), (n))
 #define __copy_from_user_inatomic(to, from, n) \
 #define __copy_from_user_inatomic(to, from, n) \
 		copy_from_user((to), (from), (n))
 		copy_from_user((to), (from), (n))
 
 
-#define copy_to_user(to, from, n)					\
-	(access_ok(VERIFY_WRITE, (to), (n)) ?				\
-		__copy_tofrom_user((void __user *)(to),			\
-			(__force const void __user *)(from), (n))	\
-		: -EFAULT)
+static inline long copy_from_user(void *to,
+		const void __user *from, unsigned long n)
+{
+	might_sleep();
+	if (access_ok(VERIFY_READ, from, n))
+		return __copy_from_user(to, from, n);
+	return n;
+}
 
 
-#define __copy_to_user(to, from, n)	copy_to_user((to), (from), (n))
+#define __copy_to_user(to, from, n)	\
+		__copy_tofrom_user((void __user *)(to), \
+			(__force const void __user *)(from), (n))
 #define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
 #define __copy_to_user_inatomic(to, from, n)	copy_to_user((to), (from), (n))
 
 
-#define copy_from_user(to, from, n)					\
-	(access_ok(VERIFY_READ, (from), (n)) ?				\
-		__copy_tofrom_user((__force void __user *)(to),		\
-			(void __user *)(from), (n))			\
-		: -EFAULT)
+static inline long copy_to_user(void __user *to,
+		const void *from, unsigned long n)
+{
+	might_sleep();
+	if (access_ok(VERIFY_WRITE, to, n))
+		return __copy_to_user(to, from, n);
+	return n;
+}
 
 
+/*
+ * Copy a null terminated string from userspace.
+ */
 extern int __strncpy_user(char *to, const char __user *from, int len);
 extern int __strncpy_user(char *to, const char __user *from, int len);
-extern int __strnlen_user(const char __user *sstr, int len);
 
 
-#define strncpy_from_user(to, from, len)	\
-		(access_ok(VERIFY_READ, from, 1) ?	\
-			__strncpy_user(to, from, len) : -EFAULT)
-#define strnlen_user(str, len)	\
-		(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
+#define __strncpy_from_user	__strncpy_user
 
 
-#endif /* CONFIG_MMU */
-
-extern unsigned long __copy_tofrom_user(void __user *to,
-		const void __user *from, unsigned long size);
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
 
 
 /*
 /*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * Return the size of a string (including the ending 0)
  *
  *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
+ * Return 0 on exception, a value greater than N if too long
  */
  */
-struct exception_table_entry {
-	unsigned long insn, fixup;
-};
+extern int __strnlen_user(const char __user *sstr, int len);
+
+static inline long strnlen_user(const char __user *src, long n)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return 0;
+	return __strnlen_user(src, n);
+}
 
 
 #endif  /* __ASSEMBLY__ */
 #endif  /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */

+ 1 - 1
arch/microblaze/kernel/dma.c

@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
 
 
 static unsigned long get_dma_direct_offset(struct device *dev)
 static unsigned long get_dma_direct_offset(struct device *dev)
 {
 {
-	if (dev)
+	if (likely(dev))
 		return (unsigned long)dev->archdata.dma_data;
 		return (unsigned long)dev->archdata.dma_data;
 
 
 	return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
 	return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */

+ 9 - 3
arch/microblaze/kernel/head.S

@@ -51,6 +51,12 @@ swapper_pg_dir:
 
 
 	.text
 	.text
 ENTRY(_start)
 ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+	brai	TOPHYS(real_start)
+	.org	0x100
+real_start:
+#endif
+
 	mfs	r1, rmsr
 	mfs	r1, rmsr
 	andi	r1, r1, ~2
 	andi	r1, r1, ~2
 	mts	rmsr, r1
 	mts	rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
 	tophys(r4,r4)			/* convert to phys address */
 	tophys(r4,r4)			/* convert to phys address */
 	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
 _copy_command_line:
-	lbu	r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
-	sb	r2, r4, r6		/* addr[r4+r6]= r7*/
+	lbu	r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
+	sb	r2, r4, r6		/* addr[r4+r6]= r2*/
 	addik	r6, r6, 1		/* increment counting */
 	addik	r6, r6, 1		/* increment counting */
 	bgtid	r3, _copy_command_line	/* loop for all entries       */
 	bgtid	r3, _copy_command_line	/* loop for all entries       */
 	addik	r3, r3, -1		/* descrement loop */
 	addik	r3, r3, -1		/* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
 	 * virtual to physical.
 	 * virtual to physical.
 	 */
 	 */
 	nop
 	nop
-	addik	r3, r0, 63		/* Invalidate all TLB entries */
+	addik	r3, r0, MICROBLAZE_TLB_SIZE -1	/* Invalidate all TLB entries */
 _invalidate:
 _invalidate:
 	mts	rtlbx, r3
 	mts	rtlbx, r3
 	mts	rtlbhi, r0			/* flush: ensure V is clear   */
 	mts	rtlbhi, r0			/* flush: ensure V is clear   */

+ 48 - 64
arch/microblaze/kernel/hw_exception_handler.S

@@ -313,13 +313,13 @@ _hw_exception_handler:
 	mfs	r5, rmsr;
 	mfs	r5, rmsr;
 	nop
 	nop
 	swi	r5, r1, 0;
 	swi	r5, r1, 0;
-	mfs	r3, resr
+	mfs	r4, resr
 	nop
 	nop
-	mfs	r4, rear;
+	mfs	r3, rear;
 	nop
 	nop
 
 
 #ifndef CONFIG_MMU
 #ifndef CONFIG_MMU
-	andi	r5, r3, 0x1000;		/* Check ESR[DS] */
+	andi	r5, r4, 0x1000;		/* Check ESR[DS] */
 	beqi	r5, not_in_delay_slot;	/* Branch if ESR[DS] not set */
 	beqi	r5, not_in_delay_slot;	/* Branch if ESR[DS] not set */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
 	nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
 	swi	r17, r1, PT_R17
 	swi	r17, r1, PT_R17
 #endif
 #endif
 
 
-	andi	r5, r3, 0x1F;		/* Extract ESR[EXC] */
+	andi	r5, r4, 0x1F;		/* Extract ESR[EXC] */
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 	/* Calculate exception vector offset = r5 << 2 */
 	/* Calculate exception vector offset = r5 << 2 */
 	addk	r6, r5, r5; /* << 1 */
 	addk	r6, r5, r5; /* << 1 */
 	addk	r6, r6, r6; /* << 2 */
 	addk	r6, r6, r6; /* << 2 */
 
 
+#ifdef DEBUG
 /* counting which exception happen */
 /* counting which exception happen */
 	lwi	r5, r0, 0x200 + TOPHYS(r0_ram)
 	lwi	r5, r0, 0x200 + TOPHYS(r0_ram)
 	addi	r5, r5, 1
 	addi	r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
 	lwi	r5, r6, 0x200 + TOPHYS(r0_ram)
 	lwi	r5, r6, 0x200 + TOPHYS(r0_ram)
 	addi	r5, r5, 1
 	addi	r5, r5, 1
 	swi	r5, r6, 0x200 + TOPHYS(r0_ram)
 	swi	r5, r6, 0x200 + TOPHYS(r0_ram)
+#endif
 /* end */
 /* end */
 	/* Load the HW Exception vector */
 	/* Load the HW Exception vector */
 	lwi	r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
 	lwi	r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
 	swi	r18, r1, PT_R18
 	swi	r18, r1, PT_R18
 
 
 	or	r5, r1, r0
 	or	r5, r1, r0
-	andi	r6, r3, 0x1F; /* Load ESR[EC] */
+	andi	r6, r4, 0x1F; /* Load ESR[EC] */
 	lwi	r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
 	lwi	r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
 	swi	r7, r1, PT_MODE
 	swi	r7, r1, PT_MODE
 	mfs	r7, rfsr
 	mfs	r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
  */
  */
 handle_unaligned_ex:
 handle_unaligned_ex:
 	/* Working registers already saved: R3, R4, R5, R6
 	/* Working registers already saved: R3, R4, R5, R6
-	 *  R3 = ESR
-	 *  R4 = EAR
+	 *  R4 = ESR
+	 *  R3 = EAR
 	 */
 	 */
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
-	andi	r6, r3, 0x1000			/* Check ESR[DS] */
+	andi	r6, r4, 0x1000			/* Check ESR[DS] */
 	beqi	r6, _no_delayslot		/* Branch if ESR[DS] not set */
 	beqi	r6, _no_delayslot		/* Branch if ESR[DS] not set */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	mfs	r17, rbtr;	/* ESR[DS] set - return address in BTR */
 	nop
 	nop
@@ -439,7 +441,7 @@ _no_delayslot:
 	RESTORE_STATE;
 	RESTORE_STATE;
 	bri	unaligned_data_trap
 	bri	unaligned_data_trap
 #endif
 #endif
-	andi	r6, r3, 0x3E0; /* Mask and extract the register operand */
+	andi	r6, r4, 0x3E0; /* Mask and extract the register operand */
 	srl	r6, r6; /* r6 >> 5 */
 	srl	r6, r6; /* r6 >> 5 */
 	srl	r6, r6;
 	srl	r6, r6;
 	srl	r6, r6;
 	srl	r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
 	/* Store the register operand in a temporary location */
 	/* Store the register operand in a temporary location */
 	sbi	r6, r0, TOPHYS(ex_reg_op);
 	sbi	r6, r0, TOPHYS(ex_reg_op);
 
 
-	andi	r6, r3, 0x400; /* Extract ESR[S] */
+	andi	r6, r4, 0x400; /* Extract ESR[S] */
 	bnei	r6, ex_sw;
 	bnei	r6, ex_sw;
 ex_lw:
 ex_lw:
-	andi	r6, r3, 0x800; /* Extract ESR[W] */
+	andi	r6, r4, 0x800; /* Extract ESR[W] */
 	beqi	r6, ex_lhw;
 	beqi	r6, ex_lhw;
-	lbui	r5, r4, 0; /* Exception address in r4 */
+	lbui	r5, r3, 0; /* Exception address in r3 */
 	/* Load a word, byte-by-byte from destination address
 	/* Load a word, byte-by-byte from destination address
 		and save it in tmp space */
 		and save it in tmp space */
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r5, r4, 1;
+	lbui	r5, r3, 1;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
-	lbui	r5, r4, 2;
+	lbui	r5, r3, 2;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_2);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_2);
-	lbui	r5, r4, 3;
+	lbui	r5, r3, 3;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_3);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_3);
-	/* Get the destination register value into r3 */
-	lwi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	/* Get the destination register value into r4 */
+	lwi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 	bri	ex_lw_tail;
 	bri	ex_lw_tail;
 ex_lhw:
 ex_lhw:
-	lbui	r5, r4, 0; /* Exception address in r4 */
+	lbui	r5, r3, 0; /* Exception address in r3 */
 	/* Load a half-word, byte-by-byte from destination
 	/* Load a half-word, byte-by-byte from destination
 		address and save it in tmp space */
 		address and save it in tmp space */
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r5, r4, 1;
+	lbui	r5, r3, 1;
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
 	sbi	r5, r0, TOPHYS(ex_tmp_data_loc_1);
-	/* Get the destination register value into r3 */
-	lhui	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	/* Get the destination register value into r4 */
+	lhui	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 ex_lw_tail:
 ex_lw_tail:
 	/* Get the destination register number into r5 */
 	/* Get the destination register number into r5 */
 	lbui	r5, r0, TOPHYS(ex_reg_op);
 	lbui	r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
 	andi	r6, r6, 0x800; /* Extract ESR[W] */
 	andi	r6, r6, 0x800; /* Extract ESR[W] */
 	beqi	r6, ex_shw;
 	beqi	r6, ex_shw;
 	/* Get the word - delay slot */
 	/* Get the word - delay slot */
-	swi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
+	swi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
 	/* Store the word, byte-by-byte into destination address */
 	/* Store the word, byte-by-byte into destination address */
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_0);
-	sbi	r3, r4, 0;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_1);
-	sbi	r3, r4, 1;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_2);
-	sbi	r3, r4, 2;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_3);
-	sbi	r3, r4, 3;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_0);
+	sbi	r4, r3, 0;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_1);
+	sbi	r4, r3, 1;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_2);
+	sbi	r4, r3, 2;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_3);
+	sbi	r4, r3, 3;
 	bri	ex_handler_done;
 	bri	ex_handler_done;
 
 
 ex_shw:
 ex_shw:
 	/* Store the lower half-word, byte-by-byte into destination address */
 	/* Store the lower half-word, byte-by-byte into destination address */
-	swi	r3, r0, TOPHYS(ex_tmp_data_loc_0);
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_2);
-	sbi	r3, r4, 0;
-	lbui	r3, r0, TOPHYS(ex_tmp_data_loc_3);
-	sbi	r3, r4, 1;
+	swi	r4, r0, TOPHYS(ex_tmp_data_loc_0);
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_2);
+	sbi	r4, r3, 0;
+	lbui	r4, r0, TOPHYS(ex_tmp_data_loc_3);
+	sbi	r4, r3, 1;
 ex_sw_end: /* Exception handling of store word, ends. */
 ex_sw_end: /* Exception handling of store word, ends. */
 
 
 ex_handler_done:
 ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
 		 */
 		 */
 		mfs	r11, rpid
 		mfs	r11, rpid
 		nop
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 		/* If we are faulting a kernel address, we have to use the
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables.
 		 * kernel page tables.
 		 */
 		 */
-		ori	r4, r0, CONFIG_KERNEL_START
-		cmpu	r4, r3, r4
-		bgti	r4, ex3
+		ori	r5, r0, CONFIG_KERNEL_START
+		cmpu	r5, r3, r5
+		bgti	r5, ex3
 		/* First, check if it was a zone fault (which means a user
 		/* First, check if it was a zone fault (which means a user
 		 * tried to access a kernel or read-protected page - always
 		 * tried to access a kernel or read-protected page - always
 		 * a SEGV). All other faults here must be stores, so no
 		 * a SEGV). All other faults here must be stores, so no
 		 * need to check ESR_S as well. */
 		 * need to check ESR_S as well. */
-		mfs	r4, resr
-		nop
 		andi	r4, r4, 0x800		/* ESR_Z - zone protection */
 		andi	r4, r4, 0x800		/* ESR_Z - zone protection */
 		bnei	r4, ex2
 		bnei	r4, ex2
 
 
@@ -589,8 +586,6 @@ ex_handler_done:
 		 * tried to access a kernel or read-protected page - always
 		 * tried to access a kernel or read-protected page - always
 		 * a SEGV). All other faults here must be stores, so no
 		 * a SEGV). All other faults here must be stores, so no
 		 * need to check ESR_S as well. */
 		 * need to check ESR_S as well. */
-		mfs	r4, resr
-		nop
 		andi	r4, r4, 0x800		/* ESR_Z */
 		andi	r4, r4, 0x800		/* ESR_Z */
 		bnei	r4, ex2
 		bnei	r4, ex2
 		/* get current task address */
 		/* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
 		 * R3 = ESR
 		 * R3 = ESR
 		 */
 		 */
 
 
-		mfs	r3, rear		/* Get faulting address */
-		nop
 		RESTORE_STATE;
 		RESTORE_STATE;
 		bri	page_fault_instr_trap
 		bri	page_fault_instr_trap
 
 
@@ -677,18 +670,15 @@ ex_handler_done:
 	 */
 	 */
 	handle_data_tlb_miss_exception:
 	handle_data_tlb_miss_exception:
 		/* Working registers already saved: R3, R4, R5, R6
 		/* Working registers already saved: R3, R4, R5, R6
-		 * R3 = ESR
+		 * R3 = EAR, R4 = ESR
 		 */
 		 */
 		mfs	r11, rpid
 		mfs	r11, rpid
 		nop
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 
 
 		/* If we are faulting a kernel address, we have to use the
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables. */
 		 * kernel page tables. */
-		ori	r4, r0, CONFIG_KERNEL_START
-		cmpu	r4, r3, r4
+		ori	r6, r0, CONFIG_KERNEL_START
+		cmpu	r4, r3, r6
 		bgti	r4, ex5
 		bgti	r4, ex5
 		ori	r4, r0, swapper_pg_dir
 		ori	r4, r0, swapper_pg_dir
 		mts	rpid, r0		/* TLB will have 0 TID */
 		mts	rpid, r0		/* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
 		 * Many of these bits are software only. Bits we don't set
 		 * Many of these bits are software only. Bits we don't set
 		 * here we (properly should) assume have the appropriate value.
 		 * here we (properly should) assume have the appropriate value.
 		 */
 		 */
+		brid	finish_tlb_load
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
-
-		bri	finish_tlb_load
 	ex7:
 	ex7:
 		/* The bailout. Restore registers to pre-exception conditions
 		/* The bailout. Restore registers to pre-exception conditions
 		 * and call the heavyweights to help us out.
 		 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
 		 */
 		 */
 		mfs	r11, rpid
 		mfs	r11, rpid
 		nop
 		nop
-		bri	4
-		mfs	r3, rear		/* Get faulting address */
-		nop
 
 
 		/* If we are faulting a kernel address, we have to use the
 		/* If we are faulting a kernel address, we have to use the
 		 * kernel page tables.
 		 * kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
 		lwi	r4, r5, 0		/* Get Linux PTE */
 		lwi	r4, r5, 0		/* Get Linux PTE */
 
 
 		andi	r6, r4, _PAGE_PRESENT
 		andi	r6, r4, _PAGE_PRESENT
-		beqi	r6, ex7
+		beqi	r6, ex10
 
 
 		ori	r4, r4, _PAGE_ACCESSED
 		ori	r4, r4, _PAGE_ACCESSED
 		swi	r4, r5, 0
 		swi	r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
 		 * Many of these bits are software only. Bits we don't set
 		 * Many of these bits are software only. Bits we don't set
 		 * here we (properly should) assume have the appropriate value.
 		 * here we (properly should) assume have the appropriate value.
 		 */
 		 */
+		brid	finish_tlb_load
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
 		andni	r4, r4, 0x0ce2		/* Make sure 20, 21 are zero */
-
-		bri	finish_tlb_load
 	ex10:
 	ex10:
 		/* The bailout. Restore registers to pre-exception conditions
 		/* The bailout. Restore registers to pre-exception conditions
 		 * and call the heavyweights to help us out.
 		 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
 		andi	r5, r5, (MICROBLAZE_TLB_SIZE-1)
 		andi	r5, r5, (MICROBLAZE_TLB_SIZE-1)
 		ori	r6, r0, 1
 		ori	r6, r0, 1
 		cmp	r31, r5, r6
 		cmp	r31, r5, r6
-		blti	r31, sem
+		blti	r31, ex12
 		addik	r5, r6, 1
 		addik	r5, r6, 1
-	sem:
+	ex12:
 		/* MS: save back current TLB index */
 		/* MS: save back current TLB index */
 		swi	r5, r0, TOPHYS(tlb_index)
 		swi	r5, r0, TOPHYS(tlb_index)
 
 
@@ -859,7 +844,6 @@ ex_handler_done:
 		nop
 		nop
 
 
 		/* Done...restore registers and get out of here. */
 		/* Done...restore registers and get out of here. */
-	ex12:
 		mts	rpid, r11
 		mts	rpid, r11
 		nop
 		nop
 		bri 4
 		bri 4

+ 13 - 2
arch/microblaze/kernel/misc.S

@@ -26,9 +26,10 @@
  * We avoid flushing the pinned 0, 1 and possibly 2 entries.
  * We avoid flushing the pinned 0, 1 and possibly 2 entries.
  */
  */
 .globl _tlbia;
 .globl _tlbia;
+.type  _tlbia, @function
 .align 4;
 .align 4;
 _tlbia:
 _tlbia:
-	addik	r12, r0, 63 /* flush all entries (63 - 3) */
+	addik	r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
 	/* isync */
 	/* isync */
 _tlbia_1:
 _tlbia_1:
 	mts	rtlbx, r12
 	mts	rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
 	/* sync */
 	/* sync */
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
+	.size  _tlbia, . - _tlbia
 
 
 /*
 /*
  * Flush MMU TLB for a particular address (in r5)
  * Flush MMU TLB for a particular address (in r5)
  */
  */
 .globl _tlbie;
 .globl _tlbie;
+.type  _tlbie, @function
 .align 4;
 .align 4;
 _tlbie:
 _tlbie:
 	mts	rtlbsx, r5 /* look up the address in TLB */
 	mts	rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
 
 
+	.size  _tlbie, . - _tlbie
+
 /*
 /*
  * Allocate TLB entry for early console
  * Allocate TLB entry for early console
  */
  */
 .globl early_console_reg_tlb_alloc;
 .globl early_console_reg_tlb_alloc;
+.type  early_console_reg_tlb_alloc, @function
 .align 4;
 .align 4;
 early_console_reg_tlb_alloc:
 early_console_reg_tlb_alloc:
 	/*
 	/*
 	 * Load a TLB entry for the UART, so that microblaze_progress() can use
 	 * Load a TLB entry for the UART, so that microblaze_progress() can use
 	 * the UARTs nice and early.  We use a 4k real==virtual mapping.
 	 * the UARTs nice and early.  We use a 4k real==virtual mapping.
 	 */
 	 */
-	ori	r4, r0, 63
+	ori	r4, r0, MICROBLAZE_TLB_SIZE - 1
 	mts	rtlbx, r4 /* TLB slot 2 */
 	mts	rtlbx, r4 /* TLB slot 2 */
 
 
 	or	r4,r5,r0
 	or	r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
 
 
+	.size  early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
+
 /*
 /*
  * Copy a whole page (4096 bytes).
  * Copy a whole page (4096 bytes).
  */
  */
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
 #define DCACHE_LINE_BYTES (4 * 4)
 #define DCACHE_LINE_BYTES (4 * 4)
 
 
 .globl copy_page;
 .globl copy_page;
+.type  copy_page, @function
 .align 4;
 .align 4;
 copy_page:
 copy_page:
 	ori	r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
 	ori	r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
 	addik	r11, r11, -1
 	addik	r11, r11, -1
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
+
+	.size  copy_page, . - copy_page

+ 6 - 4
arch/microblaze/kernel/process.c

@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/pgalloc.h>
 #include <asm/pgalloc.h>
+#include <asm/uaccess.h> /* for USER_DS macros */
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 
 
 void show_regs(struct pt_regs *regs)
 void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
 
 
 void default_idle(void)
 void default_idle(void)
 {
 {
-	if (!hlt_counter) {
+	if (likely(hlt_counter)) {
+		while (!need_resched())
+			cpu_relax();
+	} else {
 		clear_thread_flag(TIF_POLLING_NRFLAG);
 		clear_thread_flag(TIF_POLLING_NRFLAG);
 		smp_mb__after_clear_bit();
 		smp_mb__after_clear_bit();
 		local_irq_disable();
 		local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
 			cpu_sleep();
 			cpu_sleep();
 		local_irq_enable();
 		local_irq_enable();
 		set_thread_flag(TIF_POLLING_NRFLAG);
 		set_thread_flag(TIF_POLLING_NRFLAG);
-	} else
-		while (!need_resched())
-			cpu_relax();
+	}
 }
 }
 
 
 void cpu_idle(void)
 void cpu_idle(void)

+ 15 - 9
arch/microblaze/kernel/setup.c

@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
 }
 }
 #endif	/* CONFIG_MTD_UCLINUX_EBSS */
 #endif	/* CONFIG_MTD_UCLINUX_EBSS */
 
 
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
 void __init machine_early_init(const char *cmdline, unsigned int ram,
 void __init machine_early_init(const char *cmdline, unsigned int ram,
 		unsigned int fdt, unsigned int msr)
 		unsigned int fdt, unsigned int msr)
 {
 {
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
 	setup_early_printk(NULL);
 	setup_early_printk(NULL);
 #endif
 #endif
 
 
-	early_printk("Ramdisk addr 0x%08x, ", ram);
+	eprintk("Ramdisk addr 0x%08x, ", ram);
 	if (fdt)
 	if (fdt)
-		early_printk("FDT at 0x%08x\n", fdt);
+		eprintk("FDT at 0x%08x\n", fdt);
 	else
 	else
-		early_printk("Compiled-in FDT at 0x%08x\n",
+		eprintk("Compiled-in FDT at 0x%08x\n",
 					(unsigned int)_fdt_start);
 					(unsigned int)_fdt_start);
 
 
 #ifdef CONFIG_MTD_UCLINUX
 #ifdef CONFIG_MTD_UCLINUX
-	early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+	eprintk("Found romfs @ 0x%08x (0x%08x)\n",
 			romfs_base, romfs_size);
 			romfs_base, romfs_size);
-	early_printk("#### klimit %p ####\n", old_klimit);
+	eprintk("#### klimit %p ####\n", old_klimit);
 	BUG_ON(romfs_size < 0); /* What else can we do? */
 	BUG_ON(romfs_size < 0); /* What else can we do? */
 
 
-	early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+	eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
 			romfs_size, romfs_base, (unsigned)&_ebss);
 			romfs_size, romfs_base, (unsigned)&_ebss);
 
 
-	early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
+	eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
 #endif
 #endif
 
 
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 	if (msr)
 	if (msr)
-		early_printk("!!!Your kernel has setup MSR instruction but "
+		eprintk("!!!Your kernel has setup MSR instruction but "
 				"CPU don't have it %d\n", msr);
 				"CPU don't have it %d\n", msr);
 #else
 #else
 	if (!msr)
 	if (!msr)
-		early_printk("!!!Your kernel not setup MSR instruction but "
+		eprintk("!!!Your kernel not setup MSR instruction but "
 				"CPU have it %d\n", msr);
 				"CPU have it %d\n", msr);
 #endif
 #endif
 
 

+ 2 - 4
arch/microblaze/kernel/traps.c

@@ -22,13 +22,11 @@ void trap_init(void)
 	__enable_hw_exceptions();
 	__enable_hw_exceptions();
 }
 }
 
 
-static int kstack_depth_to_print = 24;
+static unsigned long kstack_depth_to_print = 24;
 
 
 static int __init kstack_setup(char *s)
 static int __init kstack_setup(char *s)
 {
 {
-	kstack_depth_to_print = strict_strtoul(s, 0, NULL);
-
-	return 1;
+	return !strict_strtoul(s, 0, &kstack_depth_to_print);
 }
 }
 __setup("kstack=", kstack_setup);
 __setup("kstack=", kstack_setup);
 
 

+ 1 - 2
arch/microblaze/lib/Makefile

@@ -10,5 +10,4 @@ else
 lib-y += memcpy.o memmove.o
 lib-y += memcpy.o memmove.o
 endif
 endif
 
 
-lib-$(CONFIG_NO_MMU) += uaccess.o
-lib-$(CONFIG_MMU) += uaccess_old.o
+lib-y += uaccess_old.o

+ 5 - 1
arch/microblaze/lib/fastcopy.S

@@ -30,8 +30,9 @@
  */
  */
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
-
+	.text
 	.globl	memcpy
 	.globl	memcpy
+	.type  memcpy, @function
 	.ent	memcpy
 	.ent	memcpy
 
 
 memcpy:
 memcpy:
@@ -345,9 +346,11 @@ a_done:
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
 
 
+.size  memcpy, . - memcpy
 .end memcpy
 .end memcpy
 /*----------------------------------------------------------------------------*/
 /*----------------------------------------------------------------------------*/
 	.globl	memmove
 	.globl	memmove
+	.type  memmove, @function
 	.ent	memmove
 	.ent	memmove
 
 
 memmove:
 memmove:
@@ -659,4 +662,5 @@ d_done:
 	rtsd	r15, 8
 	rtsd	r15, 8
 	nop
 	nop
 
 
+.size  memmove, . - memmove
 .end memmove
 .end memmove

+ 1 - 1
arch/microblaze/lib/memcpy.c

@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
 	const uint32_t *i_src;
 	const uint32_t *i_src;
 	uint32_t *i_dst;
 	uint32_t *i_dst;
 
 
-	if (c >= 4) {
+	if (likely(c >= 4)) {
 		unsigned  value, buf_hold;
 		unsigned  value, buf_hold;
 
 
 		/* Align the dstination to a word boundry. */
 		/* Align the dstination to a word boundry. */

+ 8 - 7
arch/microblaze/lib/memset.c

@@ -33,22 +33,23 @@
 #ifdef __HAVE_ARCH_MEMSET
 #ifdef __HAVE_ARCH_MEMSET
 void *memset(void *v_src, int c, __kernel_size_t n)
 void *memset(void *v_src, int c, __kernel_size_t n)
 {
 {
-
 	char *src = v_src;
 	char *src = v_src;
 #ifdef CONFIG_OPT_LIB_FUNCTION
 #ifdef CONFIG_OPT_LIB_FUNCTION
 	uint32_t *i_src;
 	uint32_t *i_src;
-	uint32_t w32;
+	uint32_t w32 = 0;
 #endif
 #endif
 	/* Truncate c to 8 bits */
 	/* Truncate c to 8 bits */
 	c = (c & 0xFF);
 	c = (c & 0xFF);
 
 
 #ifdef CONFIG_OPT_LIB_FUNCTION
 #ifdef CONFIG_OPT_LIB_FUNCTION
-	/* Make a repeating word out of it */
-	w32 = c;
-	w32 |= w32 << 8;
-	w32 |= w32 << 16;
+	if (unlikely(c)) {
+		/* Make a repeating word out of it */
+		w32 = c;
+		w32 |= w32 << 8;
+		w32 |= w32 << 16;
+	}
 
 
-	if (n >= 4) {
+	if (likely(n >= 4)) {
 		/* Align the destination to a word boundary */
 		/* Align the destination to a word boundary */
 		/* This is done in an endian independant manner */
 		/* This is done in an endian independant manner */
 		switch ((unsigned) src & 3) {
 		switch ((unsigned) src & 3) {

+ 0 - 48
arch/microblaze/lib/uaccess.c

@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/string.h>
-#include <asm/uaccess.h>
-
-#include <asm/bug.h>
-
-long strnlen_user(const char __user *src, long count)
-{
-	return strlen(src) + 1;
-}
-
-#define __do_strncpy_from_user(dst, src, count, res)			\
-	do {								\
-		char *tmp;						\
-		strncpy(dst, src, count);				\
-		for (tmp = dst; *tmp && count > 0; tmp++, count--)	\
-			;						\
-		res = (tmp - dst);					\
-	} while (0)
-
-long __strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res;
-	__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
-	long res = -EFAULT;
-	if (access_ok(VERIFY_READ, src, 1))
-		__do_strncpy_from_user(dst, src, count, res);
-	return res;
-}
-
-unsigned long __copy_tofrom_user(void __user *to,
-		const void __user *from, unsigned long size)
-{
-	memcpy(to, from, size);
-	return 0;
-}

+ 31 - 14
arch/microblaze/lib/uaccess_old.S

@@ -22,6 +22,7 @@
 
 
 	.text
 	.text
 .globl __strncpy_user;
 .globl __strncpy_user;
+.type  __strncpy_user, @function
 .align 4;
 .align 4;
 __strncpy_user:
 __strncpy_user:
 
 
@@ -50,7 +51,7 @@ __strncpy_user:
 3:
 3:
 	rtsd	r15,8
 	rtsd	r15,8
 	nop
 	nop
-
+	.size   __strncpy_user, . - __strncpy_user
 
 
 	.section	.fixup, "ax"
 	.section	.fixup, "ax"
 	.align	2
 	.align	2
@@ -72,6 +73,7 @@ __strncpy_user:
 
 
 	.text
 	.text
 .globl __strnlen_user;
 .globl __strnlen_user;
+.type  __strnlen_user, @function
 .align 4;
 .align 4;
 __strnlen_user:
 __strnlen_user:
 	addik	r3,r6,0
 	addik	r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
 3:
 3:
 	rtsd	r15,8
 	rtsd	r15,8
 	nop
 	nop
-
+	.size   __strnlen_user, . - __strnlen_user
 
 
 	.section	.fixup,"ax"
 	.section	.fixup,"ax"
 4:
 4:
@@ -108,6 +110,7 @@ __strnlen_user:
  */
  */
 	.text
 	.text
 .globl __copy_tofrom_user;
 .globl __copy_tofrom_user;
+.type  __copy_tofrom_user, @function
 .align 4;
 .align 4;
 __copy_tofrom_user:
 __copy_tofrom_user:
 	/*
 	/*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
 	 * r7, r3 - count
 	 * r7, r3 - count
 	 * r4 - tempval
 	 * r4 - tempval
 	 */
 	 */
-	addik	r3,r7,0
-	beqi	r3,3f
-1:
-	lbu	r4,r6,r0
-	addik	r6,r6,1
-2:
-	sb	r4,r5,r0
-	addik	r3,r3,-1
-	bneid	r3,1b
-	addik	r5,r5,1		/* delay slot */
+	beqid	r7, 3f /* zero size is not likely */
+	andi	r3, r7, 0x3 /* filter add count */
+	bneid	r3, 4f /* if is odd value then byte copying */
+	or	r3, r5, r6 /* find if is any to/from unaligned */
+	andi	r3, r3, 0x3 /* mask unaligned */
+	bneid	r3, 1f /* it is unaligned -> then jump */
+	or	r3, r0, r0
+
+/* at least one 4 byte copy */
+5:	lw	r4, r6, r3
+6:	sw	r4, r5, r3
+	addik	r7, r7, -4
+	bneid	r7, 5b
+	addik	r3, r3, 4
+	addik	r3, r7, 0
+	rtsd	r15, 8
+	nop
+4:	or	r3, r0, r0
+1:	lbu	r4,r6,r3
+2:	sb	r4,r5,r3
+	addik	r7,r7,-1
+	bneid	r7,1b
+	addik	r3,r3,1		/* delay slot */
 3:
 3:
+	addik	r3,r7,0
 	rtsd	r15,8
 	rtsd	r15,8
 	nop
 	nop
-
+	.size   __copy_tofrom_user, . - __copy_tofrom_user
 
 
 	.section	__ex_table,"a"
 	.section	__ex_table,"a"
-	.word	1b,3b,2b,3b
+	.word	1b,3b,2b,3b,5b,3b,6b,3b

+ 12 - 12
arch/microblaze/mm/fault.c

@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 	regs->esr = error_code;
 	regs->esr = error_code;
 
 
 	/* On a kernel SLB miss we can only check for a valid exception entry */
 	/* On a kernel SLB miss we can only check for a valid exception entry */
-	if (kernel_mode(regs) && (address >= TASK_SIZE)) {
+	if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
 		printk(KERN_WARNING "kernel task_size exceed");
 		printk(KERN_WARNING "kernel task_size exceed");
 		_exception(SIGSEGV, regs, code, address);
 		_exception(SIGSEGV, regs, code, address);
 	}
 	}
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 	}
 	}
 #endif /* CONFIG_KGDB */
 #endif /* CONFIG_KGDB */
 
 
-	if (in_atomic() || !mm) {
+	if (unlikely(in_atomic() || !mm)) {
 		if (kernel_mode(regs))
 		if (kernel_mode(regs))
 			goto bad_area_nosemaphore;
 			goto bad_area_nosemaphore;
 
 
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 	 * source.  If this is invalid we can skip the address space check,
 	 * source.  If this is invalid we can skip the address space check,
 	 * thus avoiding the deadlock.
 	 * thus avoiding the deadlock.
 	 */
 	 */
-	if (!down_read_trylock(&mm->mmap_sem)) {
+	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
 		if (kernel_mode(regs) && !search_exception_tables(regs->pc))
 		if (kernel_mode(regs) && !search_exception_tables(regs->pc))
 			goto bad_area_nosemaphore;
 			goto bad_area_nosemaphore;
 
 
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 	}
 	}
 
 
 	vma = find_vma(mm, address);
 	vma = find_vma(mm, address);
-	if (!vma)
+	if (unlikely(!vma))
 		goto bad_area;
 		goto bad_area;
 
 
 	if (vma->vm_start <= address)
 	if (vma->vm_start <= address)
 		goto good_area;
 		goto good_area;
 
 
-	if (!(vma->vm_flags & VM_GROWSDOWN))
+	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
 		goto bad_area;
 		goto bad_area;
 
 
-	if (!is_write)
+	if (unlikely(!is_write))
 		goto bad_area;
 		goto bad_area;
 
 
 	/*
 	/*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 	 * before setting the user r1.  Thus we allow the stack to
 	 * before setting the user r1.  Thus we allow the stack to
 	 * expand to 1MB without further checks.
 	 * expand to 1MB without further checks.
 	 */
 	 */
-	if (address + 0x100000 < vma->vm_end) {
+	if (unlikely(address + 0x100000 < vma->vm_end)) {
 
 
 		/* get user regs even if this fault is in kernel mode */
 		/* get user regs even if this fault is in kernel mode */
 		struct pt_regs *uregs = current->thread.regs;
 		struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
 	code = SEGV_ACCERR;
 	code = SEGV_ACCERR;
 
 
 	/* a write */
 	/* a write */
-	if (is_write) {
-		if (!(vma->vm_flags & VM_WRITE))
+	if (unlikely(is_write)) {
+		if (unlikely(!(vma->vm_flags & VM_WRITE)))
 			goto bad_area;
 			goto bad_area;
 	/* a read */
 	/* a read */
 	} else {
 	} else {
 		/* protection fault */
 		/* protection fault */
-		if (error_code & 0x08000000)
+		if (unlikely(error_code & 0x08000000))
 			goto bad_area;
 			goto bad_area;
-		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+		if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
 			goto bad_area;
 			goto bad_area;
 	}
 	}
 
 
@@ -235,7 +235,7 @@ survive:
 			goto do_sigbus;
 			goto do_sigbus;
 		BUG();
 		BUG();
 	}
 	}
-	if (fault & VM_FAULT_MAJOR)
+	if (unlikely(fault & VM_FAULT_MAJOR))
 		current->maj_flt++;
 		current->maj_flt++;
 	else
 	else
 		current->min_flt++;
 		current->min_flt++;

+ 0 - 9
arch/microblaze/mm/init.c

@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
-		memset((void *)addr, 0xcc, PAGE_SIZE);
 		free_page(addr);
 		free_page(addr);
 		totalram_pages++;
 		totalram_pages++;
 	}
 	}
@@ -208,14 +207,6 @@ void __init mem_init(void)
 }
 }
 
 
 #ifndef CONFIG_MMU
 #ifndef CONFIG_MMU
-/* Check against bounds of physical memory */
-int ___range_ok(unsigned long addr, unsigned long size)
-{
-	return ((addr < memory_start) ||
-		((addr + size) > memory_end));
-}
-EXPORT_SYMBOL(___range_ok);
-
 int page_is_ram(unsigned long pfn)
 int page_is_ram(unsigned long pfn)
 {
 {
 	return __range_ok(pfn, 0);
 	return __range_ok(pfn, 0);

+ 1 - 1
arch/microblaze/mm/pgtable.c

@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
 		err = 0;
 		err = 0;
 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
 				__pgprot(flags)));
 				__pgprot(flags)));
-		if (mem_init_done)
+		if (unlikely(mem_init_done))
 			flush_HPTE(0, va, pmd_val(*pd));
 			flush_HPTE(0, va, pmd_val(*pd));
 			/* flush_HPTE(0, va, pg); */
 			/* flush_HPTE(0, va, pg); */
 	}
 	}

+ 2 - 0
arch/powerpc/include/asm/asm-compat.h

@@ -28,6 +28,7 @@
 #define PPC_LLARX(t, a, b, eh)	PPC_LDARX(t, a, b, eh)
 #define PPC_LLARX(t, a, b, eh)	PPC_LDARX(t, a, b, eh)
 #define PPC_STLCX	stringify_in_c(stdcx.)
 #define PPC_STLCX	stringify_in_c(stdcx.)
 #define PPC_CNTLZL	stringify_in_c(cntlzd)
 #define PPC_CNTLZL	stringify_in_c(cntlzd)
+#define PPC_LR_STKOFF	16
 
 
 /* Move to CR, single-entry optimized version. Only available
 /* Move to CR, single-entry optimized version. Only available
  * on POWER4 and later.
  * on POWER4 and later.
@@ -51,6 +52,7 @@
 #define PPC_STLCX	stringify_in_c(stwcx.)
 #define PPC_STLCX	stringify_in_c(stwcx.)
 #define PPC_CNTLZL	stringify_in_c(cntlzw)
 #define PPC_CNTLZL	stringify_in_c(cntlzw)
 #define PPC_MTOCRF	stringify_in_c(mtcrf)
 #define PPC_MTOCRF	stringify_in_c(mtcrf)
+#define PPC_LR_STKOFF	4
 
 
 #endif
 #endif
 
 

+ 26 - 0
arch/powerpc/kernel/misc.S

@@ -127,3 +127,29 @@ _GLOBAL(__setup_cpu_power7)
 _GLOBAL(__restore_cpu_power7)
 _GLOBAL(__restore_cpu_power7)
 	/* place holder */
 	/* place holder */
 	blr
 	blr
+
+/*
+ * Get a minimal set of registers for our caller's nth caller.
+ * r3 = regs pointer, r5 = n.
+ *
+ * We only get R1 (stack pointer), NIP (next instruction pointer)
+ * and LR (link register).  These are all we can get in the
+ * general case without doing complicated stack unwinding, but
+ * fortunately they are enough to do a stack backtrace, which
+ * is all we need them for.
+ */
+_GLOBAL(perf_arch_fetch_caller_regs)
+	mr	r6,r1
+	cmpwi	r5,0
+	mflr	r4
+	ble	2f
+	mtctr	r5
+1:	PPC_LL	r6,0(r6)
+	bdnz	1b
+	PPC_LL	r4,PPC_LR_STKOFF(r6)
+2:	PPC_LL	r7,0(r6)
+	PPC_LL	r7,PPC_LR_STKOFF(r7)
+	PPC_STL	r6,GPR1-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r4,_NIP-STACK_FRAME_OVERHEAD(r3)
+	PPC_STL	r7,_LINK-STACK_FRAME_OVERHEAD(r3)
+	blr

+ 2 - 0
arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c

@@ -481,6 +481,8 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
 	if (rc)
 	if (rc)
 		goto err_bcom_rx_irq;
 		goto err_bcom_rx_irq;
 
 
+	lpbfifo.dma_irqs_enabled = 1;
+
 	/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
 	/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
 	lpbfifo.bcom_tx_task =
 	lpbfifo.bcom_tx_task =
 		bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
 		bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,

+ 3 - 0
arch/sh/kernel/return_address.c

@@ -9,6 +9,7 @@
  * for more details.
  * for more details.
  */
  */
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <asm/dwarf.h>
 #include <asm/dwarf.h>
 
 
 #ifdef CONFIG_DWARF_UNWINDER
 #ifdef CONFIG_DWARF_UNWINDER
@@ -52,3 +53,5 @@ void *return_address(unsigned int depth)
 }
 }
 
 
 #endif
 #endif
+
+EXPORT_SYMBOL_GPL(return_address);

+ 28 - 0
arch/sh/mm/tlb-pteaex.c

@@ -77,3 +77,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
 	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 	back_to_cached();
 	back_to_cached();
 }
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+	int i;
+
+	/*
+	 * Flush all the TLB.
+	 */
+	local_irq_save(flags);
+	jump_to_uncached();
+
+	status = __raw_readl(MMUCR);
+	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+	if (status == 0)
+		status = MMUCR_URB_NENTRIES;
+
+	for (i = 0; i < status; i++)
+		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+	for (i = 0; i < 4; i++)
+		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+	back_to_cached();
+	ctrl_barrier();
+	local_irq_restore(flags);
+}

+ 19 - 0
arch/sh/mm/tlb-sh3.c

@@ -77,3 +77,22 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
 	for (i = 0; i < ways; i++)
 	for (i = 0; i < ways; i++)
 		__raw_writel(data, addr + (i << 8));
 		__raw_writel(data, addr + (i << 8));
 }
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+
+	/*
+	 * Flush all the TLB.
+	 *
+	 * Write to the MMU control register's bit:
+	 *	TF-bit for SH-3, TI-bit for SH-4.
+	 *      It's same position, bit #2.
+	 */
+	local_irq_save(flags);
+	status = __raw_readl(MMUCR);
+	status |= 0x04;
+	__raw_writel(status, MMUCR);
+	ctrl_barrier();
+	local_irq_restore(flags);
+}

+ 28 - 0
arch/sh/mm/tlb-sh4.c

@@ -80,3 +80,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
 	__raw_writel(data, addr);
 	__raw_writel(data, addr);
 	back_to_cached();
 	back_to_cached();
 }
 }
+
+void local_flush_tlb_all(void)
+{
+	unsigned long flags, status;
+	int i;
+
+	/*
+	 * Flush all the TLB.
+	 */
+	local_irq_save(flags);
+	jump_to_uncached();
+
+	status = __raw_readl(MMUCR);
+	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+	if (status == 0)
+		status = MMUCR_URB_NENTRIES;
+
+	for (i = 0; i < status; i++)
+		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+	for (i = 0; i < 4; i++)
+		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+	back_to_cached();
+	ctrl_barrier();
+	local_irq_restore(flags);
+}

+ 0 - 28
arch/sh/mm/tlbflush_32.c

@@ -119,31 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm)
 		local_irq_restore(flags);
 		local_irq_restore(flags);
 	}
 	}
 }
 }
-
-void local_flush_tlb_all(void)
-{
-	unsigned long flags, status;
-	int i;
-
-	/*
-	 * Flush all the TLB.
-	 */
-	local_irq_save(flags);
-	jump_to_uncached();
-
-	status = __raw_readl(MMUCR);
-	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
-
-	if (status == 0)
-		status = MMUCR_URB_NENTRIES;
-
-	for (i = 0; i < status; i++)
-		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
-
-	for (i = 0; i < 4; i++)
-		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
-
-	back_to_cached();
-	ctrl_barrier();
-	local_irq_restore(flags);
-}

+ 16 - 12
arch/sparc/configs/sparc64_defconfig

@@ -1,7 +1,7 @@
 #
 #
 # Automatically generated make config: don't edit
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33
-# Wed Mar  3 02:54:29 2010
+# Linux kernel version: 2.6.34-rc3
+# Sat Apr  3 15:49:56 2010
 #
 #
 CONFIG_64BIT=y
 CONFIG_64BIT=y
 CONFIG_SPARC=y
 CONFIG_SPARC=y
@@ -23,6 +23,7 @@ CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
 CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
 CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_MMU=y
 CONFIG_MMU=y
+CONFIG_NEED_DMA_MAP_STATE=y
 CONFIG_ARCH_NO_VIRT_TO_BUS=y
 CONFIG_ARCH_NO_VIRT_TO_BUS=y
 CONFIG_OF=y
 CONFIG_OF=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
 CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -439,6 +440,7 @@ CONFIG_MISC_DEVICES=y
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_HP_ILO is not set
 # CONFIG_HP_ILO is not set
 # CONFIG_ISL29003 is not set
 # CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_DS1682 is not set
 # CONFIG_DS1682 is not set
 # CONFIG_C2PORT is not set
 # CONFIG_C2PORT is not set
 
 
@@ -511,6 +513,7 @@ CONFIG_BLK_DEV_IDEDMA=y
 #
 #
 # SCSI device support
 # SCSI device support
 #
 #
+CONFIG_SCSI_MOD=y
 CONFIG_RAID_ATTRS=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_SCSI=y
 CONFIG_SCSI_DMA=y
 CONFIG_SCSI_DMA=y
@@ -888,6 +891,7 @@ CONFIG_SERIAL_SUNHV=y
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 # CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
 # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
 # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
 CONFIG_UNIX98_PTYS=y
 CONFIG_UNIX98_PTYS=y
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -935,6 +939,7 @@ CONFIG_I2C_ALGOBIT=y
 #
 #
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_SIMTEC is not set
 # CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
 
 
 #
 #
 # External I2C/SMBus adapter drivers
 # External I2C/SMBus adapter drivers
@@ -948,15 +953,9 @@ CONFIG_I2C_ALGOBIT=y
 #
 #
 # CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
 # CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
 # CONFIG_SPI is not set
 # CONFIG_SPI is not set
 
 
 #
 #
@@ -982,10 +981,11 @@ CONFIG_HWMON=y
 # CONFIG_SENSORS_ADM1029 is not set
 # CONFIG_SENSORS_ADM1029 is not set
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
 # CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
 # CONFIG_SENSORS_ADT7462 is not set
 # CONFIG_SENSORS_ADT7462 is not set
 # CONFIG_SENSORS_ADT7470 is not set
 # CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ADT7475 is not set
 # CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_I5K_AMB is not set
 # CONFIG_SENSORS_I5K_AMB is not set
@@ -1052,18 +1052,21 @@ CONFIG_SSB_POSSIBLE=y
 # Multifunction device drivers
 # Multifunction device drivers
 #
 #
 # CONFIG_MFD_CORE is not set
 # CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
 # CONFIG_HTC_PASIC3 is not set
 # CONFIG_TWL4030_CORE is not set
 # CONFIG_TWL4030_CORE is not set
 # CONFIG_MFD_TMIO is not set
 # CONFIG_MFD_TMIO is not set
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_PMIC_ADP5520 is not set
 # CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM831X is not set
 # CONFIG_MFD_WM831X is not set
 # CONFIG_MFD_WM8350_I2C is not set
 # CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
 # CONFIG_MFD_PCF50633 is not set
 # CONFIG_MFD_PCF50633 is not set
 # CONFIG_AB3100_CORE is not set
 # CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_LPC_SCH is not set
 # CONFIG_REGULATOR is not set
 # CONFIG_REGULATOR is not set
 # CONFIG_MEDIA_SUPPORT is not set
 # CONFIG_MEDIA_SUPPORT is not set
 
 
@@ -1113,6 +1116,7 @@ CONFIG_FB_FFB=y
 # CONFIG_FB_LEO is not set
 # CONFIG_FB_LEO is not set
 CONFIG_FB_XVR500=y
 CONFIG_FB_XVR500=y
 CONFIG_FB_XVR2500=y
 CONFIG_FB_XVR2500=y
+CONFIG_FB_XVR1000=y
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_NVIDIA is not set
 # CONFIG_FB_NVIDIA is not set
 # CONFIG_FB_RIVA is not set
 # CONFIG_FB_RIVA is not set
@@ -1430,7 +1434,6 @@ CONFIG_USB_STORAGE=m
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
 # CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
 # CONFIG_USB_LED is not set
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
 # CONFIG_USB_CYTHERM is not set
@@ -1443,7 +1446,6 @@ CONFIG_USB_STORAGE=m
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_ISIGHTFW is not set
 # CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
 # CONFIG_USB_GADGET is not set
 
 
 #
 #
@@ -1610,6 +1612,7 @@ CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_BEFS_FS is not set
 # CONFIG_BEFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
 # CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_SQUASHFS is not set
 # CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
 # CONFIG_VXFS_FS is not set
@@ -1624,6 +1627,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
 # CONFIG_NFS_FS is not set
 # CONFIG_NFS_FS is not set
 # CONFIG_NFSD is not set
 # CONFIG_NFSD is not set
 # CONFIG_SMB_FS is not set
 # CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
 # CONFIG_CIFS is not set
 # CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_CODA_FS is not set

+ 2 - 2
arch/sparc/include/asm/stat.h

@@ -53,8 +53,8 @@ struct stat {
 	ino_t		st_ino;
 	ino_t		st_ino;
 	mode_t		st_mode;
 	mode_t		st_mode;
 	short		st_nlink;
 	short		st_nlink;
-	uid16_t		st_uid;
-	gid16_t		st_gid;
+	unsigned short	st_uid;
+	unsigned short	st_gid;
 	unsigned short	st_rdev;
 	unsigned short	st_rdev;
 	off_t		st_size;
 	off_t		st_size;
 	time_t		st_atime;
 	time_t		st_atime;

+ 75 - 0
arch/sparc/kernel/helpers.S

@@ -46,6 +46,81 @@ stack_trace_flush:
 	 nop
 	 nop
 	.size		stack_trace_flush,.-stack_trace_flush
 	.size		stack_trace_flush,.-stack_trace_flush
 
 
+#ifdef CONFIG_PERF_EVENTS
+	.globl		perf_arch_fetch_caller_regs
+	.type		perf_arch_fetch_caller_regs,#function
+perf_arch_fetch_caller_regs:
+	/* We always read the %pstate into %o5 since we will use
+	 * that to construct a fake %tstate to store into the regs.
+	 */
+	rdpr		%pstate, %o5
+	brz,pn		%o2, 50f
+	 mov		%o2, %g7
+
+	/* Turn off interrupts while we walk around the register
+	 * window by hand.
+	 */
+	wrpr		%o5, PSTATE_IE, %pstate
+
+	/* The %canrestore tells us how many register windows are
+	 * still live in the chip above us, past that we have to
+	 * walk the frame as saved on the stack.   We stash away
+	 * the %cwp in %g1 so we can return back to the original
+	 * register window.
+	 */
+	rdpr		%cwp, %g1
+	rdpr		%canrestore, %g2
+	sub		%g1, 1, %g3
+
+	/* We have the skip count in %g7, if it hits zero then
+	 * %fp/%i7 are the registers we need.  Otherwise if our
+	 * %canrestore count maintained in %g2 hits zero we have
+	 * to start traversing the stack.
+	 */
+10:	brz,pn		%g2, 4f
+	 sub		%g2, 1, %g2
+	wrpr		%g3, %cwp
+	subcc		%g7, 1, %g7
+	bne,pt		%xcc, 10b
+	 sub		%g3, 1, %g3
+
+	/* We found the values we need in the cpu's register
+	 * windows.
+	 */
+	mov		%fp, %g3
+	ba,pt		%xcc, 3f
+	 mov		%i7, %g2
+
+50:	mov		%fp, %g3
+	ba,pt		%xcc, 2f
+	 mov		%i7, %g2
+
+	/* We hit the end of the valid register windows in the
+	 * cpu, start traversing the stack frame.
+	 */
+4:	mov		%fp, %g3
+
+20:	ldx		[%g3 + STACK_BIAS + RW_V9_I7], %g2
+	subcc		%g7, 1, %g7
+	bne,pn		%xcc, 20b
+	 ldx		[%g3 + STACK_BIAS + RW_V9_I6], %g3
+
+	/* Restore the current register window position and
+	 * re-enable interrupts.
+	 */
+3:	wrpr		%g1, %cwp
+	wrpr		%o5, %pstate
+
+2:	stx		%g3, [%o0 + PT_V9_FP]
+	sllx		%o5, 8, %o5
+	stx		%o5, [%o0 + PT_V9_TSTATE]
+	stx		%g2, [%o0 + PT_V9_TPC]
+	add		%g2, 4, %g2
+	retl
+	 stx		%g2, [%o0 + PT_V9_TNPC]
+	.size		perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs
+#endif /* CONFIG_PERF_EVENTS */
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	.globl		hard_smp_processor_id
 	.globl		hard_smp_processor_id
 	.type		hard_smp_processor_id,#function
 	.type		hard_smp_processor_id,#function

+ 1 - 1
arch/sparc/kernel/perf_event.c

@@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 	callchain_store(entry, PERF_CONTEXT_USER);
 	callchain_store(entry, PERF_CONTEXT_USER);
 	callchain_store(entry, regs->tpc);
 	callchain_store(entry, regs->tpc);
 
 
-	ufp = regs->u_regs[UREG_I6];
+	ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
 	do {
 	do {
 		struct sparc_stackf32 *usf, sf;
 		struct sparc_stackf32 *usf, sf;
 		unsigned long pc;
 		unsigned long pc;

+ 4 - 0
arch/sparc/kernel/ptrace_32.c

@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
 			*k++ = regs->u_regs[pos++];
 			*k++ = regs->u_regs[pos++];
 
 
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		for (; count > 0 && pos < 32; count--) {
 		for (; count > 0 && pos < 32; count--) {
 			if (get_user(*k++, &reg_window[pos++]))
 			if (get_user(*k++, &reg_window[pos++]))
 				return -EFAULT;
 				return -EFAULT;
@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
 		}
 		}
 
 
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		for (; count > 0 && pos < 32; count--) {
 		for (; count > 0 && pos < 32; count--) {
 			if (get_user(reg, &reg_window[pos++]) ||
 			if (get_user(reg, &reg_window[pos++]) ||
 			    put_user(reg, u++))
 			    put_user(reg, u++))
@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
 			regs->u_regs[pos++] = *k++;
 			regs->u_regs[pos++] = *k++;
 
 
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		for (; count > 0 && pos < 32; count--) {
 		for (; count > 0 && pos < 32; count--) {
 			if (put_user(*k++, &reg_window[pos++]))
 			if (put_user(*k++, &reg_window[pos++]))
 				return -EFAULT;
 				return -EFAULT;
@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
 		}
 		}
 
 
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
 		reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		for (; count > 0 && pos < 32; count--) {
 		for (; count > 0 && pos < 32; count--) {
 			if (get_user(reg, u++) ||
 			if (get_user(reg, u++) ||
 			    put_user(reg, &reg_window[pos++]))
 			    put_user(reg, &reg_window[pos++]))

+ 4 - 0
arch/sparc/kernel/ptrace_64.c

@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
 			*k++ = regs->u_regs[pos++];
 			*k++ = regs->u_regs[pos++];
 
 
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		if (target == current) {
 		if (target == current) {
 			for (; count > 0 && pos < 32; count--) {
 			for (; count > 0 && pos < 32; count--) {
 				if (get_user(*k++, &reg_window[pos++]))
 				if (get_user(*k++, &reg_window[pos++]))
@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
 		}
 		}
 
 
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		if (target == current) {
 		if (target == current) {
 			for (; count > 0 && pos < 32; count--) {
 			for (; count > 0 && pos < 32; count--) {
 				if (get_user(reg, &reg_window[pos++]) ||
 				if (get_user(reg, &reg_window[pos++]) ||
@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
 			regs->u_regs[pos++] = *k++;
 			regs->u_regs[pos++] = *k++;
 
 
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		if (target == current) {
 		if (target == current) {
 			for (; count > 0 && pos < 32; count--) {
 			for (; count > 0 && pos < 32; count--) {
 				if (put_user(*k++, &reg_window[pos++]))
 				if (put_user(*k++, &reg_window[pos++]))
@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
 		}
 		}
 
 
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
 		reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+		reg_window -= 16;
 		if (target == current) {
 		if (target == current) {
 			for (; count > 0 && pos < 32; count--) {
 			for (; count > 0 && pos < 32; count--) {
 				if (get_user(reg, u++) ||
 				if (get_user(reg, u++) ||

+ 2 - 2
arch/sparc/kernel/sysfs.c

@@ -107,12 +107,12 @@ static unsigned long run_on_cpu(unsigned long cpu,
 	unsigned long ret;
 	unsigned long ret;
 
 
 	/* should return -EINVAL to userspace */
 	/* should return -EINVAL to userspace */
-	if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
+	if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
 		return 0;
 		return 0;
 
 
 	ret = func(arg);
 	ret = func(arg);
 
 
-	set_cpus_allowed(current, old_affinity);
+	set_cpus_allowed_ptr(current, &old_affinity);
 
 
 	return ret;
 	return ret;
 }
 }

+ 4 - 4
arch/sparc/kernel/us2e_cpufreq.c

@@ -238,12 +238,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
 		return 0;
 		return 0;
 
 
 	cpus_allowed = current->cpus_allowed;
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 
 	clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
 	estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
 
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 
 	return clock_tick / estar_to_divisor(estar);
 	return clock_tick / estar_to_divisor(estar);
 }
 }
@@ -259,7 +259,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
 		return;
 		return;
 
 
 	cpus_allowed = current->cpus_allowed;
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 
 	new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
 	new_bits = index_to_estar_mode(index);
 	new_bits = index_to_estar_mode(index);
@@ -281,7 +281,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
 
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
 }
 
 
 static int us2e_freq_target(struct cpufreq_policy *policy,
 static int us2e_freq_target(struct cpufreq_policy *policy,

+ 4 - 4
arch/sparc/kernel/us3_cpufreq.c

@@ -86,12 +86,12 @@ static unsigned int us3_freq_get(unsigned int cpu)
 		return 0;
 		return 0;
 
 
 	cpus_allowed = current->cpus_allowed;
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 
 	reg = read_safari_cfg();
 	reg = read_safari_cfg();
 	ret = get_current_freq(cpu, reg);
 	ret = get_current_freq(cpu, reg);
 
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -106,7 +106,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
 		return;
 		return;
 
 
 	cpus_allowed = current->cpus_allowed;
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
 
 	new_freq = sparc64_get_clock_tick(cpu) / 1000;
 	new_freq = sparc64_get_clock_tick(cpu) / 1000;
 	switch (index) {
 	switch (index) {
@@ -140,7 +140,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
 
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 
 
-	set_cpus_allowed(current, cpus_allowed);
+	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
 }
 
 
 static int us3_freq_target(struct cpufreq_policy *policy,
 static int us3_freq_target(struct cpufreq_policy *policy,

+ 1 - 1
arch/sparc/mm/init_64.c

@@ -2117,7 +2117,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
 			       "node=%d entry=%lu/%lu\n", start, block, nr,
 			       "node=%d entry=%lu/%lu\n", start, block, nr,
 			       node,
 			       node,
 			       addr >> VMEMMAP_CHUNK_SHIFT,
 			       addr >> VMEMMAP_CHUNK_SHIFT,
-			       VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+			       VMEMMAP_SIZE);
 		}
 		}
 	}
 	}
 	return 0;
 	return 0;

+ 3 - 3
arch/x86/include/asm/fixmap.h

@@ -82,6 +82,9 @@ enum fixed_addresses {
 #endif
 #endif
 	FIX_DBGP_BASE,
 	FIX_DBGP_BASE,
 	FIX_EARLYCON_MEM_BASE,
 	FIX_EARLYCON_MEM_BASE,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+	FIX_OHCI1394_BASE,
+#endif
 #ifdef CONFIG_X86_LOCAL_APIC
 #ifdef CONFIG_X86_LOCAL_APIC
 	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
 	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
 #endif
 #endif
@@ -132,9 +135,6 @@ enum fixed_addresses {
 	   (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
 	   (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
 	 : __end_of_permanent_fixed_addresses,
 	 : __end_of_permanent_fixed_addresses,
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
 	FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-	FIX_OHCI1394_BASE,
-#endif
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32
 	FIX_WP_TEST,
 	FIX_WP_TEST,
 #endif
 #endif

+ 1 - 0
arch/x86/include/asm/hw_irq.h

@@ -133,6 +133,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
 
 
 typedef int vector_irq_t[NR_VECTORS];
 typedef int vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
+extern void setup_vector_irq(int cpu);
 
 
 #ifdef CONFIG_X86_IO_APIC
 #ifdef CONFIG_X86_IO_APIC
 extern void lock_vector_lock(void);
 extern void lock_vector_lock(void);

+ 2 - 0
arch/x86/include/asm/msr-index.h

@@ -105,6 +105,8 @@
 #define MSR_AMD64_PATCH_LEVEL		0x0000008b
 #define MSR_AMD64_PATCH_LEVEL		0x0000008b
 #define MSR_AMD64_NB_CFG		0xc001001f
 #define MSR_AMD64_NB_CFG		0xc001001f
 #define MSR_AMD64_PATCH_LOADER		0xc0010020
 #define MSR_AMD64_PATCH_LOADER		0xc0010020
+#define MSR_AMD64_OSVW_ID_LENGTH	0xc0010140
+#define MSR_AMD64_OSVW_STATUS		0xc0010141
 #define MSR_AMD64_IBSFETCHCTL		0xc0011030
 #define MSR_AMD64_IBSFETCHCTL		0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD		0xc0011031
 #define MSR_AMD64_IBSFETCHLINAD		0xc0011031
 #define MSR_AMD64_IBSFETCHPHYSAD	0xc0011032
 #define MSR_AMD64_IBSFETCHPHYSAD	0xc0011032

+ 8 - 0
arch/x86/kernel/apic/io_apic.c

@@ -1268,6 +1268,14 @@ void __setup_vector_irq(int cpu)
 	/* Mark the inuse vectors */
 	/* Mark the inuse vectors */
 	for_each_irq_desc(irq, desc) {
 	for_each_irq_desc(irq, desc) {
 		cfg = desc->chip_data;
 		cfg = desc->chip_data;
+
+		/*
+		 * If it is a legacy IRQ handled by the legacy PIC, this cpu
+		 * will be part of the irq_cfg's domain.
+		 */
+		if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
+			cpumask_set_cpu(cpu, cfg->domain);
+
 		if (!cpumask_test_cpu(cpu, cfg->domain))
 		if (!cpumask_test_cpu(cpu, cfg->domain))
 			continue;
 			continue;
 		vector = cfg->vector;
 		vector = cfg->vector;

+ 44 - 10
arch/x86/kernel/cpu/perf_event.c

@@ -28,6 +28,7 @@
 #include <asm/apic.h>
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
 #include <asm/nmi.h>
+#include <asm/compat.h>
 
 
 static u64 perf_event_mask __read_mostly;
 static u64 perf_event_mask __read_mostly;
 
 
@@ -158,7 +159,7 @@ struct x86_pmu {
 						 struct perf_event *event);
 						 struct perf_event *event);
 	struct event_constraint *event_constraints;
 	struct event_constraint *event_constraints;
 
 
-	void		(*cpu_prepare)(int cpu);
+	int		(*cpu_prepare)(int cpu);
 	void		(*cpu_starting)(int cpu);
 	void		(*cpu_starting)(int cpu);
 	void		(*cpu_dying)(int cpu);
 	void		(*cpu_dying)(int cpu);
 	void		(*cpu_dead)(int cpu);
 	void		(*cpu_dead)(int cpu);
@@ -1333,11 +1334,12 @@ static int __cpuinit
 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 {
 {
 	unsigned int cpu = (long)hcpu;
 	unsigned int cpu = (long)hcpu;
+	int ret = NOTIFY_OK;
 
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE:
 		if (x86_pmu.cpu_prepare)
 		if (x86_pmu.cpu_prepare)
-			x86_pmu.cpu_prepare(cpu);
+			ret = x86_pmu.cpu_prepare(cpu);
 		break;
 		break;
 
 
 	case CPU_STARTING:
 	case CPU_STARTING:
@@ -1350,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 			x86_pmu.cpu_dying(cpu);
 			x86_pmu.cpu_dying(cpu);
 		break;
 		break;
 
 
+	case CPU_UP_CANCELED:
 	case CPU_DEAD:
 	case CPU_DEAD:
 		if (x86_pmu.cpu_dead)
 		if (x86_pmu.cpu_dead)
 			x86_pmu.cpu_dead(cpu);
 			x86_pmu.cpu_dead(cpu);
@@ -1359,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 		break;
 		break;
 	}
 	}
 
 
-	return NOTIFY_OK;
+	return ret;
 }
 }
 
 
 static void __init pmu_check_apic(void)
 static void __init pmu_check_apic(void)
@@ -1628,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
 	return len;
 	return len;
 }
 }
 
 
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+#ifdef CONFIG_COMPAT
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 {
 {
-	unsigned long bytes;
+	/* 32-bit process in 64-bit kernel. */
+	struct stack_frame_ia32 frame;
+	const void __user *fp;
 
 
-	bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
+	if (!test_thread_flag(TIF_IA32))
+		return 0;
+
+	fp = compat_ptr(regs->bp);
+	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+		unsigned long bytes;
+		frame.next_frame     = 0;
+		frame.return_address = 0;
+
+		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+		if (bytes != sizeof(frame))
+			break;
+
+		if (fp < compat_ptr(regs->sp))
+			break;
 
 
-	return bytes == sizeof(*frame);
+		callchain_store(entry, frame.return_address);
+		fp = compat_ptr(frame.next_frame);
+	}
+	return 1;
+}
+#else
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+    return 0;
 }
 }
+#endif
 
 
 static void
 static void
 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1651,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
 	callchain_store(entry, PERF_CONTEXT_USER);
 	callchain_store(entry, PERF_CONTEXT_USER);
 	callchain_store(entry, regs->ip);
 	callchain_store(entry, regs->ip);
 
 
+	if (perf_callchain_user32(regs, entry))
+		return;
+
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+		unsigned long bytes;
 		frame.next_frame	     = NULL;
 		frame.next_frame	     = NULL;
 		frame.return_address = 0;
 		frame.return_address = 0;
 
 
-		if (!copy_stack_frame(fp, &frame))
+		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+		if (bytes != sizeof(frame))
 			break;
 			break;
 
 
 		if ((unsigned long)fp < regs->sp)
 		if ((unsigned long)fp < regs->sp)
@@ -1702,7 +1738,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 	return entry;
 	return entry;
 }
 }
 
 
-#ifdef CONFIG_EVENT_TRACING
 void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
 void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
 {
 {
 	regs->ip = ip;
 	regs->ip = ip;
@@ -1714,4 +1749,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
 	regs->cs = __KERNEL_CS;
 	regs->cs = __KERNEL_CS;
 	local_save_flags(regs->flags);
 	local_save_flags(regs->flags);
 }
 }
-#endif

+ 47 - 33
arch/x86/kernel/cpu/perf_event_amd.c

@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
 	return (hwc->config & 0xe0) == 0xe0;
 	return (hwc->config & 0xe0) == 0xe0;
 }
 }
 
 
+static inline int amd_has_nb(struct cpu_hw_events *cpuc)
+{
+	struct amd_nb *nb = cpuc->amd_nb;
+
+	return nb && nb->nb_id != -1;
+}
+
 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
 				      struct perf_event *event)
 				      struct perf_event *event)
 {
 {
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
 	/*
 	/*
 	 * only care about NB events
 	 * only care about NB events
 	 */
 	 */
-	if (!(nb && amd_is_nb_event(hwc)))
+	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
 		return;
 		return;
 
 
 	/*
 	/*
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 	/*
 	/*
 	 * if not NB event or no NB, then no constraints
 	 * if not NB event or no NB, then no constraints
 	 */
 	 */
-	if (!(nb && amd_is_nb_event(hwc)))
+	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
 		return &unconstrained;
 		return &unconstrained;
 
 
 	/*
 	/*
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
 	return nb;
 	return nb;
 }
 }
 
 
-static void amd_pmu_cpu_online(int cpu)
+static int amd_pmu_cpu_prepare(int cpu)
+{
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+	WARN_ON_ONCE(cpuc->amd_nb);
+
+	if (boot_cpu_data.x86_max_cores < 2)
+		return NOTIFY_OK;
+
+	cpuc->amd_nb = amd_alloc_nb(cpu, -1);
+	if (!cpuc->amd_nb)
+		return NOTIFY_BAD;
+
+	return NOTIFY_OK;
+}
+
+static void amd_pmu_cpu_starting(int cpu)
 {
 {
-	struct cpu_hw_events *cpu1, *cpu2;
-	struct amd_nb *nb = NULL;
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+	struct amd_nb *nb;
 	int i, nb_id;
 	int i, nb_id;
 
 
 	if (boot_cpu_data.x86_max_cores < 2)
 	if (boot_cpu_data.x86_max_cores < 2)
 		return;
 		return;
 
 
-	/*
-	 * function may be called too early in the
-	 * boot process, in which case nb_id is bogus
-	 */
 	nb_id = amd_get_nb_id(cpu);
 	nb_id = amd_get_nb_id(cpu);
-	if (nb_id == BAD_APICID)
-		return;
-
-	cpu1 = &per_cpu(cpu_hw_events, cpu);
-	cpu1->amd_nb = NULL;
+	WARN_ON_ONCE(nb_id == BAD_APICID);
 
 
 	raw_spin_lock(&amd_nb_lock);
 	raw_spin_lock(&amd_nb_lock);
 
 
 	for_each_online_cpu(i) {
 	for_each_online_cpu(i) {
-		cpu2 = &per_cpu(cpu_hw_events, i);
-		nb = cpu2->amd_nb;
-		if (!nb)
+		nb = per_cpu(cpu_hw_events, i).amd_nb;
+		if (WARN_ON_ONCE(!nb))
 			continue;
 			continue;
-		if (nb->nb_id == nb_id)
-			goto found;
-	}
 
 
-	nb = amd_alloc_nb(cpu, nb_id);
-	if (!nb) {
-		pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
-		raw_spin_unlock(&amd_nb_lock);
-		return;
+		if (nb->nb_id == nb_id) {
+			kfree(cpuc->amd_nb);
+			cpuc->amd_nb = nb;
+			break;
+		}
 	}
 	}
-found:
-	nb->refcnt++;
-	cpu1->amd_nb = nb;
+
+	cpuc->amd_nb->nb_id = nb_id;
+	cpuc->amd_nb->refcnt++;
 
 
 	raw_spin_unlock(&amd_nb_lock);
 	raw_spin_unlock(&amd_nb_lock);
 }
 }
 
 
-static void amd_pmu_cpu_offline(int cpu)
+static void amd_pmu_cpu_dead(int cpu)
 {
 {
 	struct cpu_hw_events *cpuhw;
 	struct cpu_hw_events *cpuhw;
 
 
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
 	raw_spin_lock(&amd_nb_lock);
 	raw_spin_lock(&amd_nb_lock);
 
 
 	if (cpuhw->amd_nb) {
 	if (cpuhw->amd_nb) {
-		if (--cpuhw->amd_nb->refcnt == 0)
-			kfree(cpuhw->amd_nb);
+		struct amd_nb *nb = cpuhw->amd_nb;
+
+		if (nb->nb_id == -1 || --nb->refcnt == 0)
+			kfree(nb);
 
 
 		cpuhw->amd_nb = NULL;
 		cpuhw->amd_nb = NULL;
 	}
 	}
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
 	.get_event_constraints	= amd_get_event_constraints,
 	.get_event_constraints	= amd_get_event_constraints,
 	.put_event_constraints	= amd_put_event_constraints,
 	.put_event_constraints	= amd_put_event_constraints,
 
 
-	.cpu_prepare		= amd_pmu_cpu_online,
-	.cpu_dead		= amd_pmu_cpu_offline,
+	.cpu_prepare		= amd_pmu_cpu_prepare,
+	.cpu_starting		= amd_pmu_cpu_starting,
+	.cpu_dead		= amd_pmu_cpu_dead,
 };
 };
 
 
 static __init int amd_pmu_init(void)
 static __init int amd_pmu_init(void)

+ 5 - 0
arch/x86/kernel/dumpstack.h

@@ -30,6 +30,11 @@ struct stack_frame {
 	unsigned long return_address;
 	unsigned long return_address;
 };
 };
 
 
+struct stack_frame_ia32 {
+    u32 next_frame;
+    u32 return_address;
+};
+
 static inline unsigned long rewind_frame_pointer(int n)
 static inline unsigned long rewind_frame_pointer(int n)
 {
 {
 	struct stack_frame *frame;
 	struct stack_frame *frame;

+ 3 - 1
arch/x86/kernel/head32.c

@@ -7,6 +7,7 @@
 
 
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/start_kernel.h>
 #include <linux/start_kernel.h>
+#include <linux/mm.h>
 
 
 #include <asm/setup.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
 	/* Reserve INITRD */
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+		/* Assume only end is not page aligned */
 		u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 		u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 		u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
 		u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-		u64 ramdisk_end   = ramdisk_image + ramdisk_size;
+		u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 	}
 	}
 #endif
 #endif

+ 2 - 1
arch/x86/kernel/head64.c

@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 	/* Reserve INITRD */
 	/* Reserve INITRD */
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
 	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+		/* Assume only end is not page aligned */
 		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
 		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
 		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
 		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
-		unsigned long ramdisk_end   = ramdisk_image + ramdisk_size;
+		unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 		reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
 	}
 	}
 #endif
 #endif

+ 22 - 0
arch/x86/kernel/irqinit.c

@@ -141,6 +141,28 @@ void __init init_IRQ(void)
 	x86_init.irqs.intr_init();
 	x86_init.irqs.intr_init();
 }
 }
 
 
+/*
+ * Setup the vector to irq mappings.
+ */
+void setup_vector_irq(int cpu)
+{
+#ifndef CONFIG_X86_IO_APIC
+	int irq;
+
+	/*
+	 * On most of the platforms, legacy PIC delivers the interrupts on the
+	 * boot cpu. But there are certain platforms where PIC interrupts are
+	 * delivered to multiple cpu's. If the legacy IRQ is handled by the
+	 * legacy PIC, for the new cpu that is coming online, setup the static
+	 * legacy vector to irq mapping:
+	 */
+	for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
+		per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+#endif
+
+	__setup_vector_irq(cpu);
+}
+
 static void __init smp_intr_init(void)
 static void __init smp_intr_init(void)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP

+ 1 - 1
arch/x86/kernel/kgdb.c

@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
 	 * portion of kgdb because this operation requires mutexs to
 	 * portion of kgdb because this operation requires mutexs to
 	 * complete.
 	 * complete.
 	 */
 	 */
+	hw_breakpoint_init(&attr);
 	attr.bp_addr = (unsigned long)kgdb_arch_init;
 	attr.bp_addr = (unsigned long)kgdb_arch_init;
-	attr.type = PERF_TYPE_BREAKPOINT;
 	attr.bp_len = HW_BREAKPOINT_LEN_1;
 	attr.bp_len = HW_BREAKPOINT_LEN_1;
 	attr.bp_type = HW_BREAKPOINT_W;
 	attr.bp_type = HW_BREAKPOINT_W;
 	attr.disabled = 1;
 	attr.disabled = 1;

+ 24 - 8
arch/x86/kernel/process.c

@@ -526,21 +526,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
 }
 }
 
 
 /*
 /*
- * Check for AMD CPUs, which have potentially C1E support
+ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
+ * For more information see
+ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
+ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
  */
  */
 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
 {
 {
+	u64 val;
 	if (c->x86_vendor != X86_VENDOR_AMD)
 	if (c->x86_vendor != X86_VENDOR_AMD)
-		return 0;
-
-	if (c->x86 < 0x0F)
-		return 0;
+		goto no_c1e_idle;
 
 
 	/* Family 0x0f models < rev F do not have C1E */
 	/* Family 0x0f models < rev F do not have C1E */
-	if (c->x86 == 0x0f && c->x86_model < 0x40)
-		return 0;
+	if (c->x86 == 0x0F && c->x86_model >= 0x40)
+		return 1;
 
 
-	return 1;
+	if (c->x86 == 0x10) {
+		/*
+		 * check OSVW bit for CPUs that are not affected
+		 * by erratum #400
+		 */
+		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+		if (val >= 2) {
+			rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+			if (!(val & BIT(1)))
+				goto no_c1e_idle;
+		}
+		return 1;
+	}
+
+no_c1e_idle:
+	return 0;
 }
 }
 
 
 static cpumask_var_t c1e_mask;
 static cpumask_var_t c1e_mask;

+ 6 - 4
arch/x86/kernel/setup.c

@@ -314,16 +314,17 @@ static void __init reserve_brk(void)
 #define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
 #define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
 static void __init relocate_initrd(void)
 static void __init relocate_initrd(void)
 {
 {
-
+	/* Assume only end is not page aligned */
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
+	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 	u64 ramdisk_here;
 	u64 ramdisk_here;
 	unsigned long slop, clen, mapaddr;
 	unsigned long slop, clen, mapaddr;
 	char *p, *q;
 	char *p, *q;
 
 
 	/* We need to move the initrd down into lowmem */
 	/* We need to move the initrd down into lowmem */
-	ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
+	ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
 					 PAGE_SIZE);
 					 PAGE_SIZE);
 
 
 	if (ramdisk_here == -1ULL)
 	if (ramdisk_here == -1ULL)
@@ -332,7 +333,7 @@ static void __init relocate_initrd(void)
 
 
 	/* Note: this includes all the lowmem currently occupied by
 	/* Note: this includes all the lowmem currently occupied by
 	   the initrd, we rely on that fact to keep the data intact. */
 	   the initrd, we rely on that fact to keep the data intact. */
-	reserve_early(ramdisk_here, ramdisk_here + ramdisk_size,
+	reserve_early(ramdisk_here, ramdisk_here + area_size,
 			 "NEW RAMDISK");
 			 "NEW RAMDISK");
 	initrd_start = ramdisk_here + PAGE_OFFSET;
 	initrd_start = ramdisk_here + PAGE_OFFSET;
 	initrd_end   = initrd_start + ramdisk_size;
 	initrd_end   = initrd_start + ramdisk_size;
@@ -376,9 +377,10 @@ static void __init relocate_initrd(void)
 
 
 static void __init reserve_initrd(void)
 static void __init reserve_initrd(void)
 {
 {
+	/* Assume only end is not page aligned */
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
 	u64 ramdisk_size  = boot_params.hdr.ramdisk_size;
-	u64 ramdisk_end   = ramdisk_image + ramdisk_size;
+	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 	u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
 
 
 	if (!boot_params.hdr.type_of_loader ||
 	if (!boot_params.hdr.type_of_loader ||

+ 3 - 3
arch/x86/kernel/smpboot.c

@@ -242,12 +242,10 @@ static void __cpuinit smp_callin(void)
 	end_local_APIC_setup();
 	end_local_APIC_setup();
 	map_cpu_to_logical_apicid();
 	map_cpu_to_logical_apicid();
 
 
-	notify_cpu_starting(cpuid);
-
 	/*
 	/*
 	 * Need to setup vector mappings before we enable interrupts.
 	 * Need to setup vector mappings before we enable interrupts.
 	 */
 	 */
-	__setup_vector_irq(smp_processor_id());
+	setup_vector_irq(smp_processor_id());
 	/*
 	/*
 	 * Get our bogomips.
 	 * Get our bogomips.
 	 *
 	 *
@@ -264,6 +262,8 @@ static void __cpuinit smp_callin(void)
 	 */
 	 */
 	smp_store_cpu_info(cpuid);
 	smp_store_cpu_info(cpuid);
 
 
+	notify_cpu_starting(cpuid);
+
 	/*
 	/*
 	 * Allow the master to continue.
 	 * Allow the master to continue.
 	 */
 	 */

+ 1 - 1
arch/x86/kernel/vmlinux.lds.S

@@ -291,8 +291,8 @@ SECTIONS
 	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 		__smp_locks = .;
 		__smp_locks = .;
 		*(.smp_locks)
 		*(.smp_locks)
-		__smp_locks_end = .;
 		. = ALIGN(PAGE_SIZE);
 		. = ALIGN(PAGE_SIZE);
+		__smp_locks_end = .;
 	}
 	}
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64

+ 26 - 6
arch/x86/mm/init.c

@@ -331,11 +331,23 @@ int devmem_is_allowed(unsigned long pagenr)
 
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
 {
-	unsigned long addr = begin;
+	unsigned long addr;
+	unsigned long begin_aligned, end_aligned;
 
 
-	if (addr >= end)
+	/* Make sure boundaries are page aligned */
+	begin_aligned = PAGE_ALIGN(begin);
+	end_aligned   = end & PAGE_MASK;
+
+	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
+		begin = begin_aligned;
+		end   = end_aligned;
+	}
+
+	if (begin >= end)
 		return;
 		return;
 
 
+	addr = begin;
+
 	/*
 	/*
 	 * If debugging page accesses then do not free this memory but
 	 * If debugging page accesses then do not free this memory but
 	 * mark them not present - any buggy init-section access will
 	 * mark them not present - any buggy init-section access will
@@ -343,7 +355,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 	 */
 	 */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 #ifdef CONFIG_DEBUG_PAGEALLOC
 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
 	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-		begin, PAGE_ALIGN(end));
+		begin, end);
 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
 #else
 	/*
 	/*
@@ -358,8 +370,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 	for (; addr < end; addr += PAGE_SIZE) {
 	for (; addr < end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
-		memset((void *)(addr & ~(PAGE_SIZE-1)),
-			POISON_FREE_INITMEM, PAGE_SIZE);
+		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
 		free_page(addr);
 		free_page(addr);
 		totalram_pages++;
 		totalram_pages++;
 	}
 	}
@@ -376,6 +387,15 @@ void free_initmem(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
 {
-	free_init_pages("initrd memory", start, end);
+	/*
+	 * end could be not aligned, and We can not align that,
+	 * decompresser could be confused by aligned initrd_end
+	 * We already reserve the end partial page before in
+	 *   - i386_start_kernel()
+	 *   - x86_64_start_kernel()
+	 *   - relocate_initrd()
+	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
+	 */
+	free_init_pages("initrd memory", start, PAGE_ALIGN(end));
 }
 }
 #endif
 #endif

+ 18 - 4
arch/x86/pci/acpi.c

@@ -122,8 +122,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
 	struct acpi_resource_address64 addr;
 	struct acpi_resource_address64 addr;
 	acpi_status status;
 	acpi_status status;
 	unsigned long flags;
 	unsigned long flags;
-	struct resource *root;
-	u64 start, end;
+	struct resource *root, *conflict;
+	u64 start, end, max_len;
 
 
 	status = resource_to_addr(acpi_res, &addr);
 	status = resource_to_addr(acpi_res, &addr);
 	if (!ACPI_SUCCESS(status))
 	if (!ACPI_SUCCESS(status))
@@ -140,6 +140,17 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
 	} else
 	} else
 		return AE_OK;
 		return AE_OK;
 
 
+	max_len = addr.maximum - addr.minimum + 1;
+	if (addr.address_length > max_len) {
+		dev_printk(KERN_DEBUG, &info->bridge->dev,
+			   "host bridge window length %#llx doesn't fit in "
+			   "%#llx-%#llx, trimming\n",
+			   (unsigned long long) addr.address_length,
+			   (unsigned long long) addr.minimum,
+			   (unsigned long long) addr.maximum);
+		addr.address_length = max_len;
+	}
+
 	start = addr.minimum + addr.translation_offset;
 	start = addr.minimum + addr.translation_offset;
 	end = start + addr.address_length - 1;
 	end = start + addr.address_length - 1;
 
 
@@ -157,9 +168,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
 		return AE_OK;
 		return AE_OK;
 	}
 	}
 
 
-	if (insert_resource(root, res)) {
+	conflict = insert_resource_conflict(root, res);
+	if (conflict) {
 		dev_err(&info->bridge->dev,
 		dev_err(&info->bridge->dev,
-			"can't allocate host bridge window %pR\n", res);
+			"address space collision: host bridge window %pR "
+			"conflicts with %s %pR\n",
+			res, conflict->name, conflict);
 	} else {
 	} else {
 		pci_bus_add_resource(info->bus, res, 0);
 		pci_bus_add_resource(info->bus, res, 0);
 		info->res_num++;
 		info->res_num++;

+ 0 - 5
arch/x86/pci/i386.c

@@ -127,9 +127,6 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
 					continue;
 					continue;
 				if (!r->start ||
 				if (!r->start ||
 				    pci_claim_resource(dev, idx) < 0) {
 				    pci_claim_resource(dev, idx) < 0) {
-					dev_info(&dev->dev,
-						 "can't reserve window %pR\n",
-						 r);
 					/*
 					/*
 					 * Something is wrong with the region.
 					 * Something is wrong with the region.
 					 * Invalidate the resource to prevent
 					 * Invalidate the resource to prevent
@@ -181,8 +178,6 @@ static void __init pcibios_allocate_resources(int pass)
 					"BAR %d: reserving %pr (d=%d, p=%d)\n",
 					"BAR %d: reserving %pr (d=%d, p=%d)\n",
 					idx, r, disabled, pass);
 					idx, r, disabled, pass);
 				if (pci_claim_resource(dev, idx) < 0) {
 				if (pci_claim_resource(dev, idx) < 0) {
-					dev_info(&dev->dev,
-						 "can't reserve %pR\n", r);
 					/* We'll assign a new address later */
 					/* We'll assign a new address later */
 					r->end -= r->start;
 					r->end -= r->start;
 					r->start = 0;
 					r->start = 0;

+ 4 - 0
drivers/ata/pata_via.c

@@ -576,6 +576,10 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 			u8 rev = isa->revision;
 			u8 rev = isa->revision;
 			pci_dev_put(isa);
 			pci_dev_put(isa);
 
 
+			if ((id->device == 0x0415 || id->device == 0x3164) &&
+			    (config->id != id->device))
+				continue;
+
 			if (rev >= config->rev_min && rev <= config->rev_max)
 			if (rev >= config->rev_min && rev <= config->rev_max)
 				break;
 				break;
 		}
 		}

+ 31 - 0
drivers/base/power/main.c

@@ -439,8 +439,23 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 	if (dev->bus && dev->bus->pm) {
 	if (dev->bus && dev->bus->pm) {
 		pm_dev_dbg(dev, state, "EARLY ");
 		pm_dev_dbg(dev, state, "EARLY ");
 		error = pm_noirq_op(dev, dev->bus->pm, state);
 		error = pm_noirq_op(dev, dev->bus->pm, state);
+		if (error)
+			goto End;
 	}
 	}
 
 
+	if (dev->type && dev->type->pm) {
+		pm_dev_dbg(dev, state, "EARLY type ");
+		error = pm_noirq_op(dev, dev->type->pm, state);
+		if (error)
+			goto End;
+	}
+
+	if (dev->class && dev->class->pm) {
+		pm_dev_dbg(dev, state, "EARLY class ");
+		error = pm_noirq_op(dev, dev->class->pm, state);
+	}
+
+End:
 	TRACE_RESUME(error);
 	TRACE_RESUME(error);
 	return error;
 	return error;
 }
 }
@@ -735,10 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
 {
 {
 	int error = 0;
 	int error = 0;
 
 
+	if (dev->class && dev->class->pm) {
+		pm_dev_dbg(dev, state, "LATE class ");
+		error = pm_noirq_op(dev, dev->class->pm, state);
+		if (error)
+			goto End;
+	}
+
+	if (dev->type && dev->type->pm) {
+		pm_dev_dbg(dev, state, "LATE type ");
+		error = pm_noirq_op(dev, dev->type->pm, state);
+		if (error)
+			goto End;
+	}
+
 	if (dev->bus && dev->bus->pm) {
 	if (dev->bus && dev->bus->pm) {
 		pm_dev_dbg(dev, state, "LATE ");
 		pm_dev_dbg(dev, state, "LATE ");
 		error = pm_noirq_op(dev, dev->bus->pm, state);
 		error = pm_noirq_op(dev, dev->bus->pm, state);
 	}
 	}
+
+End:
 	return error;
 	return error;
 }
 }
 
 

+ 2 - 0
drivers/char/tty_io.c

@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
 	list_del_init(&tty->tty_files);
 	list_del_init(&tty->tty_files);
 	file_list_unlock();
 	file_list_unlock();
 
 
+	put_pid(tty->pgrp);
+	put_pid(tty->session);
 	free_tty_struct(tty);
 	free_tty_struct(tty);
 }
 }
 
 

+ 40 - 63
drivers/firewire/core-device.c

@@ -126,97 +126,74 @@ int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
 }
 }
 EXPORT_SYMBOL(fw_csr_string);
 EXPORT_SYMBOL(fw_csr_string);
 
 
-static bool is_fw_unit(struct device *dev);
-
-static int match_unit_directory(const u32 *directory, u32 match_flags,
-				const struct ieee1394_device_id *id)
+static void get_ids(const u32 *directory, int *id)
 {
 {
 	struct fw_csr_iterator ci;
 	struct fw_csr_iterator ci;
-	int key, value, match;
+	int key, value;
 
 
-	match = 0;
 	fw_csr_iterator_init(&ci, directory);
 	fw_csr_iterator_init(&ci, directory);
 	while (fw_csr_iterator_next(&ci, &key, &value)) {
 	while (fw_csr_iterator_next(&ci, &key, &value)) {
-		if (key == CSR_VENDOR && value == id->vendor_id)
-			match |= IEEE1394_MATCH_VENDOR_ID;
-		if (key == CSR_MODEL && value == id->model_id)
-			match |= IEEE1394_MATCH_MODEL_ID;
-		if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
-			match |= IEEE1394_MATCH_SPECIFIER_ID;
-		if (key == CSR_VERSION && value == id->version)
-			match |= IEEE1394_MATCH_VERSION;
+		switch (key) {
+		case CSR_VENDOR:	id[0] = value; break;
+		case CSR_MODEL:		id[1] = value; break;
+		case CSR_SPECIFIER_ID:	id[2] = value; break;
+		case CSR_VERSION:	id[3] = value; break;
+		}
 	}
 	}
+}
+
+static void get_modalias_ids(struct fw_unit *unit, int *id)
+{
+	get_ids(&fw_parent_device(unit)->config_rom[5], id);
+	get_ids(unit->directory, id);
+}
+
+static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
+{
+	int match = 0;
+
+	if (id[0] == id_table->vendor_id)
+		match |= IEEE1394_MATCH_VENDOR_ID;
+	if (id[1] == id_table->model_id)
+		match |= IEEE1394_MATCH_MODEL_ID;
+	if (id[2] == id_table->specifier_id)
+		match |= IEEE1394_MATCH_SPECIFIER_ID;
+	if (id[3] == id_table->version)
+		match |= IEEE1394_MATCH_VERSION;
 
 
-	return (match & match_flags) == match_flags;
+	return (match & id_table->match_flags) == id_table->match_flags;
 }
 }
 
 
+static bool is_fw_unit(struct device *dev);
+
 static int fw_unit_match(struct device *dev, struct device_driver *drv)
 static int fw_unit_match(struct device *dev, struct device_driver *drv)
 {
 {
-	struct fw_unit *unit = fw_unit(dev);
-	struct fw_device *device;
-	const struct ieee1394_device_id *id;
+	const struct ieee1394_device_id *id_table =
+			container_of(drv, struct fw_driver, driver)->id_table;
+	int id[] = {0, 0, 0, 0};
 
 
 	/* We only allow binding to fw_units. */
 	/* We only allow binding to fw_units. */
 	if (!is_fw_unit(dev))
 	if (!is_fw_unit(dev))
 		return 0;
 		return 0;
 
 
-	device = fw_parent_device(unit);
-	id = container_of(drv, struct fw_driver, driver)->id_table;
+	get_modalias_ids(fw_unit(dev), id);
 
 
-	for (; id->match_flags != 0; id++) {
-		if (match_unit_directory(unit->directory, id->match_flags, id))
+	for (; id_table->match_flags != 0; id_table++)
+		if (match_ids(id_table, id))
 			return 1;
 			return 1;
 
 
-		/* Also check vendor ID in the root directory. */
-		if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
-		    match_unit_directory(&device->config_rom[5],
-				IEEE1394_MATCH_VENDOR_ID, id) &&
-		    match_unit_directory(unit->directory, id->match_flags
-				& ~IEEE1394_MATCH_VENDOR_ID, id))
-			return 1;
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 
 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
 {
 {
-	struct fw_device *device = fw_parent_device(unit);
-	struct fw_csr_iterator ci;
+	int id[] = {0, 0, 0, 0};
 
 
-	int key, value;
-	int vendor = 0;
-	int model = 0;
-	int specifier_id = 0;
-	int version = 0;
-
-	fw_csr_iterator_init(&ci, &device->config_rom[5]);
-	while (fw_csr_iterator_next(&ci, &key, &value)) {
-		switch (key) {
-		case CSR_VENDOR:
-			vendor = value;
-			break;
-		case CSR_MODEL:
-			model = value;
-			break;
-		}
-	}
-
-	fw_csr_iterator_init(&ci, unit->directory);
-	while (fw_csr_iterator_next(&ci, &key, &value)) {
-		switch (key) {
-		case CSR_SPECIFIER_ID:
-			specifier_id = value;
-			break;
-		case CSR_VERSION:
-			version = value;
-			break;
-		}
-	}
+	get_modalias_ids(unit, id);
 
 
 	return snprintf(buffer, buffer_size,
 	return snprintf(buffer, buffer_size,
 			"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
 			"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
-			vendor, model, specifier_id, version);
+			id[0], id[1], id[2], id[3]);
 }
 }
 
 
 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)

+ 3 - 2
drivers/firewire/core-iso.c

@@ -331,8 +331,9 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
 	if (ret < 0)
 	if (ret < 0)
 		*bandwidth = 0;
 		*bandwidth = 0;
 
 
-	if (allocate && ret < 0 && c >= 0) {
-		deallocate_channel(card, irm_id, generation, c, buffer);
+	if (allocate && ret < 0) {
+		if (c >= 0)
+			deallocate_channel(card, irm_id, generation, c, buffer);
 		*channel = ret;
 		*channel = ret;
 	}
 	}
 }
 }

+ 4 - 0
drivers/firewire/ohci.c

@@ -231,6 +231,8 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
 
 
 static char ohci_driver_name[] = KBUILD_MODNAME;
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
 
+#define PCI_DEVICE_ID_TI_TSB12LV22	0x8009
+
 #define QUIRK_CYCLE_TIMER		1
 #define QUIRK_CYCLE_TIMER		1
 #define QUIRK_RESET_PACKET		2
 #define QUIRK_RESET_PACKET		2
 #define QUIRK_BE_HEADERS		4
 #define QUIRK_BE_HEADERS		4
@@ -239,6 +241,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
 static const struct {
 static const struct {
 	unsigned short vendor, device, flags;
 	unsigned short vendor, device, flags;
 } ohci_quirks[] = {
 } ohci_quirks[] = {
+	{PCI_VENDOR_ID_TI,	PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
+							    QUIRK_RESET_PACKET},
 	{PCI_VENDOR_ID_TI,	PCI_ANY_ID,	QUIRK_RESET_PACKET},
 	{PCI_VENDOR_ID_TI,	PCI_ANY_ID,	QUIRK_RESET_PACKET},
 	{PCI_VENDOR_ID_AL,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
 	{PCI_VENDOR_ID_AL,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
 	{PCI_VENDOR_ID_NEC,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},
 	{PCI_VENDOR_ID_NEC,	PCI_ANY_ID,	QUIRK_CYCLE_TIMER},

+ 1 - 0
drivers/gpu/drm/drm_crtc_helper.c

@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 	if (connector->status == connector_status_disconnected) {
 	if (connector->status == connector_status_disconnected) {
 		DRM_DEBUG_KMS("%s is disconnected\n",
 		DRM_DEBUG_KMS("%s is disconnected\n",
 			  drm_get_connector_name(connector));
 			  drm_get_connector_name(connector));
+		drm_mode_connector_update_edid_property(connector, NULL);
 		goto prune;
 		goto prune;
 	}
 	}
 
 

+ 0 - 9
drivers/gpu/drm/drm_edid.c

@@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
 	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
 	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
 	mode->vtotal = mode->vdisplay + vblank;
 	mode->vtotal = mode->vdisplay + vblank;
 
 
-	/* perform the basic check for the detailed timing */
-	if (mode->hsync_end > mode->htotal ||
-		mode->vsync_end > mode->vtotal) {
-		drm_mode_destroy(dev, mode);
-		DRM_DEBUG_KMS("Incorrect detailed timing. "
-				"Sync is beyond the blank.\n");
-		return NULL;
-	}
-
 	/* Some EDIDs have bogus h/vtotal values */
 	/* Some EDIDs have bogus h/vtotal values */
 	if (mode->hsync_end > mode->htotal)
 	if (mode->hsync_end > mode->htotal)
 		mode->htotal = mode->hsync_end + 1;
 		mode->htotal = mode->hsync_end + 1;

+ 2 - 0
drivers/gpu/drm/drm_fb_helper.c

@@ -283,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
 	.help_msg = "force-fb(V)",
 	.help_msg = "force-fb(V)",
 	.action_msg = "Restore framebuffer console",
 	.action_msg = "Restore framebuffer console",
 };
 };
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
 #endif
 #endif
 
 
 static void drm_fb_helper_on(struct fb_info *info)
 static void drm_fb_helper_on(struct fb_info *info)

+ 9 - 7
drivers/gpu/drm/drm_fops.c

@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
 		spin_unlock(&dev->count_lock);
 		spin_unlock(&dev->count_lock);
 	}
 	}
 out:
 out:
-	mutex_lock(&dev->struct_mutex);
-	if (minor->type == DRM_MINOR_LEGACY) {
-		BUG_ON((dev->dev_mapping != NULL) &&
-			(dev->dev_mapping != inode->i_mapping));
-		if (dev->dev_mapping == NULL)
-			dev->dev_mapping = inode->i_mapping;
+	if (!retcode) {
+		mutex_lock(&dev->struct_mutex);
+		if (minor->type == DRM_MINOR_LEGACY) {
+			if (dev->dev_mapping == NULL)
+				dev->dev_mapping = inode->i_mapping;
+			else if (dev->dev_mapping != inode->i_mapping)
+				retcode = -ENODEV;
+		}
+		mutex_unlock(&dev->struct_mutex);
 	}
 	}
-	mutex_unlock(&dev->struct_mutex);
 
 
 	return retcode;
 	return retcode;
 }
 }

+ 1 - 1
drivers/gpu/drm/nouveau/Makefile

@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_dp.o nouveau_grctx.o \
              nouveau_dp.o nouveau_grctx.o \
              nv04_timer.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
-             nv04_fb.o nv10_fb.o nv40_fb.o \
+             nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o \
              nv40_graph.o nv50_graph.o \

+ 26 - 2
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -5210,6 +5210,21 @@ divine_connector_type(struct nvbios *bios, int index)
 	return type;
 	return type;
 }
 }
 
 
+static void
+apply_dcb_connector_quirks(struct nvbios *bios, int idx)
+{
+	struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+	struct drm_device *dev = bios->dev;
+
+	/* Gigabyte NX85T */
+	if ((dev->pdev->device == 0x0421) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x344c)) {
+		if (cte->type == DCB_CONNECTOR_HDMI_1)
+			cte->type = DCB_CONNECTOR_DVI_I;
+	}
+}
+
 static void
 static void
 parse_dcb_connector_table(struct nvbios *bios)
 parse_dcb_connector_table(struct nvbios *bios)
 {
 {
@@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios)
 	entry = conntab + conntab[1];
 	entry = conntab + conntab[1];
 	cte = &ct->entry[0];
 	cte = &ct->entry[0];
 	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
 	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+		cte->index = i;
 		if (conntab[3] == 2)
 		if (conntab[3] == 2)
 			cte->entry = ROM16(entry[0]);
 			cte->entry = ROM16(entry[0]);
 		else
 		else
 			cte->entry = ROM32(entry[0]);
 			cte->entry = ROM32(entry[0]);
 
 
 		cte->type  = (cte->entry & 0x000000ff) >> 0;
 		cte->type  = (cte->entry & 0x000000ff) >> 0;
-		cte->index = (cte->entry & 0x00000f00) >> 8;
+		cte->index2 = (cte->entry & 0x00000f00) >> 8;
 		switch (cte->entry & 0x00033000) {
 		switch (cte->entry & 0x00033000) {
 		case 0x00001000:
 		case 0x00001000:
 			cte->gpio_tag = 0x07;
 			cte->gpio_tag = 0x07;
@@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios)
 		if (cte->type == 0xff)
 		if (cte->type == 0xff)
 			continue;
 			continue;
 
 
+		apply_dcb_connector_quirks(bios, i);
+
 		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
 		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
 			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
 			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
 
 
@@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios)
 			break;
 			break;
 		default:
 		default:
 			cte->type = divine_connector_type(bios, cte->index);
 			cte->type = divine_connector_type(bios, cte->index);
-			NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
+			NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
 			break;
 			break;
 		}
 		}
 
 
+		if (nouveau_override_conntype) {
+			int type = divine_connector_type(bios, cte->index);
+			if (type != cte->type)
+				NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+		}
+
 	}
 	}
 }
 }
 
 

+ 2 - 1
drivers/gpu/drm/nouveau/nouveau_bios.h

@@ -72,9 +72,10 @@ enum dcb_connector_type {
 };
 };
 
 
 struct dcb_connector_table_entry {
 struct dcb_connector_table_entry {
+	uint8_t index;
 	uint32_t entry;
 	uint32_t entry;
 	enum dcb_connector_type type;
 	enum dcb_connector_type type;
-	uint8_t index;
+	uint8_t index2;
 	uint8_t gpio_tag;
 	uint8_t gpio_tag;
 };
 };
 
 

+ 1 - 2
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -439,8 +439,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 
 
 	switch (bo->mem.mem_type) {
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
 	case TTM_PL_VRAM:
-		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
-					 TTM_PL_FLAG_SYSTEM);
+		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
 		break;
 		break;
 	default:
 	default:
 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_connector.c

@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
 
 
 detect_analog:
 detect_analog:
 	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
 	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
-	if (!nv_encoder)
+	if (!nv_encoder && !nouveau_tv_disable)
 		nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
 		nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
 	if (nv_encoder) {
 	if (nv_encoder) {
 		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
 		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);

+ 5 - 0
drivers/gpu/drm/nouveau/nouveau_dma.c

@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
 	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
 
 
 	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
 	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+
+	DRM_MEMORYBARRIER();
+	/* Flush writes. */
+	nouveau_bo_rd32(pb, 0);
+
 	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
 	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 	chan->dma.ib_free--;
 }
 }

+ 10 - 0
drivers/gpu/drm/nouveau/nouveau_drv.c

@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
 int nouveau_nofbaccel = 0;
 int nouveau_nofbaccel = 0;
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 
 
+MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
+int nouveau_override_conntype = 0;
+module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
+int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
 		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
 		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
 		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
 		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
 	if (pm_state.event == PM_EVENT_PRETHAW)
 	if (pm_state.event == PM_EVENT_PRETHAW)
 		return 0;
 		return 0;
 
 
+	NV_INFO(dev, "Disabling fbcon acceleration...\n");
 	fbdev_flags = dev_priv->fbdev_info->flags;
 	fbdev_flags = dev_priv->fbdev_info->flags;
 	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
 	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
 
 
+	NV_INFO(dev, "Unpinning framebuffer(s)...\n");
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_framebuffer *nouveau_fb;
 		struct nouveau_framebuffer *nouveau_fb;
 
 

+ 6 - 0
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds;
 extern int nouveau_vram_pushbuf;
 extern int nouveau_vram_pushbuf;
 extern int nouveau_vram_notify;
 extern int nouveau_vram_notify;
 extern int nouveau_fbpercrtc;
 extern int nouveau_fbpercrtc;
+extern int nouveau_tv_disable;
 extern char *nouveau_tv_norm;
 extern char *nouveau_tv_norm;
 extern int nouveau_reg_debug;
 extern int nouveau_reg_debug;
 extern char *nouveau_vbios;
 extern char *nouveau_vbios;
@@ -688,6 +689,7 @@ extern int nouveau_ctxfw;
 extern int nouveau_ignorelid;
 extern int nouveau_ignorelid;
 extern int nouveau_nofbaccel;
 extern int nouveau_nofbaccel;
 extern int nouveau_noaccel;
 extern int nouveau_noaccel;
+extern int nouveau_override_conntype;
 
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *);
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
 				      uint32_t, uint32_t);
 				      uint32_t, uint32_t);
 
 
+/* nv50_fb.c */
+extern int  nv50_fb_init(struct drm_device *);
+extern void nv50_fb_takedown(struct drm_device *);
+
 /* nv04_fifo.c */
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
 extern int  nv04_fifo_init(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);

+ 561 - 48
drivers/gpu/drm/nouveau/nouveau_irq.c

@@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value,
 #define nouveau_print_bitfield_names(val, namelist) \
 #define nouveau_print_bitfield_names(val, namelist) \
 	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
 	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
 
 
+struct nouveau_enum_names {
+	uint32_t value;
+	const char *name;
+};
+
+static void
+nouveau_print_enum_names_(uint32_t value,
+				const struct nouveau_enum_names *namelist,
+				const int namelist_len)
+{
+	/*
+	 * Caller must have already printed the KERN_* log level for us.
+	 * Also the caller is responsible for adding the newline.
+	 */
+	int i;
+	for (i = 0; i < namelist_len; ++i) {
+		if (value == namelist[i].value) {
+			printk("%s", namelist[i].name);
+			return;
+		}
+	}
+	printk("unknown value 0x%08x", value);
+}
+#define nouveau_print_enum_names(val, namelist) \
+	nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
 
 
 static int
 static int
 nouveau_graph_chid_from_grctx(struct drm_device *dev)
 nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
 	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
 
 
-	NV_INFO(dev, "%s - nSource:", id);
-	nouveau_print_bitfield_names(nsource, nsource_names);
-	printk(", nStatus:");
-	if (dev_priv->card_type < NV_10)
-		nouveau_print_bitfield_names(nstatus, nstatus_names);
-	else
-		nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
-	printk("\n");
+	if (dev_priv->card_type < NV_50) {
+		NV_INFO(dev, "%s - nSource:", id);
+		nouveau_print_bitfield_names(nsource, nsource_names);
+		printk(", nStatus:");
+		if (dev_priv->card_type < NV_10)
+			nouveau_print_bitfield_names(nstatus, nstatus_names);
+		else
+			nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+		printk("\n");
+	}
 
 
 	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
 	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
 					"Data 0x%08x:0x%08x\n",
 					"Data 0x%08x:0x%08x\n",
@@ -577,28 +604,503 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 }
 }
 
 
+static void
+nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t trap[6];
+	int i, ch;
+	uint32_t idx = nv_rd32(dev, 0x100c90);
+	if (idx & 0x80000000) {
+		idx &= 0xffffff;
+		if (display) {
+			for (i = 0; i < 6; i++) {
+				nv_wr32(dev, 0x100c90, idx | i << 24);
+				trap[i] = nv_rd32(dev, 0x100c94);
+			}
+			for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+				struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+				if (!chan || !chan->ramin)
+					continue;
+
+				if (trap[1] == chan->ramin->instance >> 12)
+					break;
+			}
+			NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
+					name, (trap[5]&0x100?"read":"write"),
+					trap[5]&0xff, trap[4]&0xffff,
+					trap[3]&0xffff, trap[0], trap[2], ch);
+		}
+		nv_wr32(dev, 0x100c90, idx | 0x80000000);
+	} else if (display) {
+		NV_INFO(dev, "%s - no VM fault?\n", name);
+	}
+}
+
+static struct nouveau_enum_names nv50_mp_exec_error_names[] =
+{
+	{ 3, "STACK_UNDERFLOW" },
+	{ 4, "QUADON_ACTIVE" },
+	{ 8, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x40, "BREAKPOINT" },
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	uint32_t addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(dev, addr + 0x10);
+		status = nv_rd32(dev, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(dev, addr + 0x20);
+			pc = nv_rd32(dev, addr + 0x24);
+			oplow = nv_rd32(dev, addr + 0x70);
+			ophigh= nv_rd32(dev, addr + 0x74);
+			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_print_enum_names(status,
+					nv50_mp_exec_error_names);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(dev, addr + 0x10, mp10);
+		nv_wr32(dev, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+		uint32_t ustatus_new, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int tps = 0;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i, r;
+	uint32_t ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			nv50_pfb_vm_trap(dev, display, name);
+			if (display) {
+				NV_ERROR(dev, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(dev, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x00010000) {
+				nv50_pgraph_mp_trap(dev, i, display);
+				ustatus &= ~0x00010000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+			nv50_pfb_vm_trap(dev, display, name);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(dev, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static void
+nv50_pgraph_trap_handler(struct drm_device *dev)
+{
+	struct nouveau_pgraph_trap trap;
+	uint32_t status = nv_rd32(dev, 0x400108);
+	uint32_t ustatus;
+	int display = nouveau_ratelimit();
+
+
+	if (!status && display) {
+		nouveau_graph_trap_info(dev, &trap);
+		nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
+		NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+			nv_wr32(dev, 0x400500, 0);
+			if (nv_rd32(dev, 0x400808) & 0x80000000) {
+				if (display) {
+					if (nouveau_graph_trapped_channel(dev, &trap.channel))
+						trap.channel = -1;
+					trap.class = nv_rd32(dev, 0x400814);
+					trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
+					trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
+					trap.data = nv_rd32(dev, 0x40080c);
+					trap.data2 = nv_rd32(dev, 0x400810);
+					nouveau_graph_dump_trap_info(dev,
+							"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
+				}
+				nv_wr32(dev, 0x400808, 0);
+			} else if (display) {
+				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
+			}
+			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+			nv_wr32(dev, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus & 0x00000002) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+			nv_wr32(dev, 0x400500, 0);
+			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
+				if (display) {
+					if (nouveau_graph_trapped_channel(dev, &trap.channel))
+						trap.channel = -1;
+					trap.class = nv_rd32(dev, 0x400814);
+					trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
+					trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
+					trap.data = nv_rd32(dev, 0x40085c);
+					trap.data2 = 0;
+					nouveau_graph_dump_trap_info(dev,
+							"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
+					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
+				}
+				nv_wr32(dev, 0x40084c, 0);
+			} else if (display) {
+				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
+			}
+			ustatus &= ~0x00000002;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x400804, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x001);
+		status &= ~0x001;
+	}
+
+	/* TRAPs other than dispatch use the "normal" trap regs. */
+	if (status && display) {
+		nouveau_graph_trap_info(dev, &trap);
+		nouveau_graph_dump_trap_info(dev,
+				"PGRAPH_TRAP", &trap);
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus & 0x00000002) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+			ustatus &= ~0x00000002;
+		}
+		if (ustatus & 0x00000004) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+			ustatus &= ~0x00000004;
+		}
+		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x406804),
+				nv_rd32(dev, 0x406808),
+				nv_rd32(dev, 0x40680c),
+				nv_rd32(dev, 0x406810));
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 2);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x406800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x400c00),
+					nv_rd32(dev, 0x400c08),
+					nv_rd32(dev, 0x400c0c),
+					nv_rd32(dev, 0x400c10));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x400c04, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x401804),
+					nv_rd32(dev, 0x401808),
+					nv_rd32(dev, 0x40180c),
+					nv_rd32(dev, 0x401810));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 0x80);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x401800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
+		}
+		if (ustatus & 0x00000001) {
+			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
+					nv_rd32(dev, 0x405800),
+					nv_rd32(dev, 0x405804),
+					nv_rd32(dev, 0x405808),
+					nv_rd32(dev, 0x40580c),
+					nv_rd32(dev, 0x405810),
+					nv_rd32(dev, 0x405814),
+					nv_rd32(dev, 0x40581c));
+			ustatus &= ~0x00000001;
+		}
+		if (ustatus && display)
+			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x405018, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+		if (display)
+			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
+				"PGRAPH_TRAP_TEXTURE");
+		nv_wr32(dev, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
+				"PGRAPH_TRAP_MP");
+		nv_wr32(dev, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
+				"PGRAPH_TRAP_TPDMA");
+		nv_wr32(dev, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
+				status);
+		nv_wr32(dev, 0x400108, status);
+	}
+}
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+static struct nouveau_enum_names nv50_data_error_names[] =
+{
+	{ 4,	"INVALID_VALUE" },
+	{ 5,	"INVALID_ENUM" },
+	{ 8,	"INVALID_OBJECT" },
+	{ 0xc,	"INVALID_BITFIELD" },
+	{ 0x28,	"MP_NO_REG_SPACE" },
+	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
+};
+
 static void
 static void
 nv50_pgraph_irq_handler(struct drm_device *dev)
 nv50_pgraph_irq_handler(struct drm_device *dev)
 {
 {
+	struct nouveau_pgraph_trap trap;
+	int unhandled = 0;
 	uint32_t status;
 	uint32_t status;
 
 
 	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
 	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
+		/* NOTIFY: You've set a NOTIFY an a command and it's done. */
 		if (status & 0x00000001) {
 		if (status & 0x00000001) {
-			nouveau_pgraph_intr_notify(dev, nsource);
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_NOTIFY", &trap);
 			status &= ~0x00000001;
 			status &= ~0x00000001;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
 		}
 		}
 
 
-		if (status & 0x00000010) {
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+		/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
+		 * when you write 0x200 to 0x50c0 method 0x31c. */
+		if (status & 0x00000002) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_COMPUTE_QUERY", &trap);
+			status &= ~0x00000002;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
+		}
 
 
+		/* Unknown, never seen: 0x4 */
+
+		/* ILLEGAL_MTHD: You used a wrong method for this class. */
+		if (status & 0x00000010) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_pgraph_intr_swmthd(dev, &trap))
+				unhandled = 1;
+			if (unhandled && nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_ILLEGAL_MTHD", &trap);
 			status &= ~0x00000010;
 			status &= ~0x00000010;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
 		}
 		}
 
 
+		/* ILLEGAL_CLASS: You used a wrong class. */
+		if (status & 0x00000020) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_ILLEGAL_CLASS", &trap);
+			status &= ~0x00000020;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
+		}
+
+		/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
+		if (status & 0x00000040) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_DOUBLE_NOTIFY", &trap);
+			status &= ~0x00000040;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
+		}
+
+		/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
 		if (status & 0x00001000) {
 		if (status & 0x00001000) {
 			nv_wr32(dev, 0x400500, 0x00000000);
 			nv_wr32(dev, 0x400500, 0x00000000);
 			nv_wr32(dev, NV03_PGRAPH_INTR,
 			nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
 			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		}
 		}
 
 
-		if (status & 0x00100000) {
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_DATA_ERROR);
+		/* BUFFER_NOTIFY: Your m2mf transfer finished */
+		if (status & 0x00010000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_BUFFER_NOTIFY", &trap);
+			status &= ~0x00010000;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
+		}
 
 
+		/* DATA_ERROR: Invalid value for this method, or invalid
+		 * state in current PGRAPH context for this operation */
+		if (status & 0x00100000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit()) {
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_DATA_ERROR", &trap);
+				NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
+				nouveau_print_enum_names(nv_rd32(dev, 0x400110),
+						nv50_data_error_names);
+				printk("\n");
+			}
 			status &= ~0x00100000;
 			status &= ~0x00100000;
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
 		}
 		}
 
 
+		/* TRAP: Something bad happened in the middle of command
+		 * execution.  Has a billion types, subtypes, and even
+		 * subsubtypes. */
 		if (status & 0x00200000) {
 		if (status & 0x00200000) {
-			int r;
-
-			nouveau_pgraph_intr_error(dev, nsource |
-					NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
-			NV_ERROR(dev, "magic set 1:\n");
-			for (r = 0x408900; r <= 0x408910; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x408900,
-				nv_rd32(dev, 0x408904) | 0xc0000000);
-			for (r = 0x408e08; r <= 0x408e24; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-							nv_rd32(dev, r));
-			nv_wr32(dev, 0x408e08,
-				nv_rd32(dev, 0x408e08) | 0xc0000000);
-
-			NV_ERROR(dev, "magic set 2:\n");
-			for (r = 0x409900; r <= 0x409910; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x409900,
-				nv_rd32(dev, 0x409904) | 0xc0000000);
-			for (r = 0x409e08; r <= 0x409e24; r += 4)
-				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-					nv_rd32(dev, r));
-			nv_wr32(dev, 0x409e08,
-				nv_rd32(dev, 0x409e08) | 0xc0000000);
-
+			nv50_pgraph_trap_handler(dev);
 			status &= ~0x00200000;
 			status &= ~0x00200000;
-			nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
 			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
 		}
 		}
 
 
+		/* Unknown, never seen: 0x00400000 */
+
+		/* SINGLE_STEP: Happens on every method if you turned on
+		 * single stepping in 40008c */
+		if (status & 0x01000000) {
+			nouveau_graph_trap_info(dev, &trap);
+			if (nouveau_ratelimit())
+				nouveau_graph_dump_trap_info(dev,
+						"PGRAPH_SINGLE_STEP", &trap);
+			status &= ~0x01000000;
+			nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
+		}
+
+		/* 0x02000000 happens when you pause a ctxprog...
+		 * but the only way this can happen that I know is by
+		 * poking the relevant MMIO register, and we don't
+		 * do that. */
+
 		if (status) {
 		if (status) {
 			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
 			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
 				status);
 				status);
@@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
 	}
 	}
 
 
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+	if (nv_rd32(dev, 0x400824) & (1 << 31))
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
 }
 }
 
 
 static void
 static void

+ 2 - 3
drivers/gpu/drm/nouveau/nouveau_state.c

@@ -35,7 +35,6 @@
 #include "nouveau_drm.h"
 #include "nouveau_drm.h"
 #include "nv50_display.h"
 #include "nv50_display.h"
 
 
-static int nouveau_stub_init(struct drm_device *dev) { return 0; }
 static void nouveau_stub_takedown(struct drm_device *dev) {}
 static void nouveau_stub_takedown(struct drm_device *dev) {}
 
 
 static int nouveau_init_engine_ptrs(struct drm_device *dev)
 static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -277,8 +276,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.init		= nv04_timer_init;
 		engine->timer.read		= nv04_timer_read;
 		engine->timer.read		= nv04_timer_read;
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->timer.takedown		= nv04_timer_takedown;
-		engine->fb.init			= nouveau_stub_init;
-		engine->fb.takedown		= nouveau_stub_takedown;
+		engine->fb.init			= nv50_fb_init;
+		engine->fb.takedown		= nv50_fb_takedown;
 		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.takedown		= nv50_graph_takedown;
 		engine->graph.takedown		= nv50_graph_takedown;

+ 3 - 3
drivers/gpu/drm/nouveau/nv04_crtc.c

@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
 	struct drm_framebuffer *fb = crtc->fb;
 	struct drm_framebuffer *fb = crtc->fb;
 
 
 	/* Calculate our timings */
 	/* Calculate our timings */
-	int horizDisplay	= (mode->crtc_hdisplay >> 3) 	- 1;
-	int horizStart		= (mode->crtc_hsync_start >> 3) 	- 1;
-	int horizEnd		= (mode->crtc_hsync_end >> 3) 	- 1;
+	int horizDisplay	= (mode->crtc_hdisplay >> 3)		- 1;
+	int horizStart		= (mode->crtc_hsync_start >> 3) 	+ 1;
+	int horizEnd		= (mode->crtc_hsync_end >> 3)		+ 1;
 	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
 	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
 	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
 	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
 	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;
 	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;

+ 3 - 3
drivers/gpu/drm/nouveau/nv04_fbcon.c

@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 		return;
 		return;
 	}
 	}
 
 
-	width = ALIGN(image->width, 32);
-	dsize = (width * image->height) >> 5;
+	width = ALIGN(image->width, 8);
+	dsize = ALIGN(width * image->height, 32) >> 5;
 
 
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 			 ((image->dx + image->width) & 0xffff));
 			 ((image->dx + image->width) & 0xffff));
 	OUT_RING(chan, bg);
 	OUT_RING(chan, bg);
 	OUT_RING(chan, fg);
 	OUT_RING(chan, fg);
-	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->height << 16) | width);
 	OUT_RING(chan, (image->height << 16) | width);
+	OUT_RING(chan, (image->height << 16) | image->width);
 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
 
 	while (dsize) {
 	while (dsize) {

+ 2 - 2
drivers/gpu/drm/nouveau/nv50_display.c

@@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev)
 	}
 	}
 
 
 	for (i = 0 ; i < dcb->connector.entries; i++) {
 	for (i = 0 ; i < dcb->connector.entries; i++) {
-		if (i != 0 && dcb->connector.entry[i].index ==
-			      dcb->connector.entry[i - 1].index)
+		if (i != 0 && dcb->connector.entry[i].index2 ==
+			      dcb->connector.entry[i - 1].index2)
 			continue;
 			continue;
 		nouveau_connector_create(dev, &dcb->connector.entry[i]);
 		nouveau_connector_create(dev, &dcb->connector.entry[i]);
 	}
 	}

Some files were not shown because too many files changed in this diff